diff --git a/.github/workflows/docker_build_and_test.yml b/.github/workflows/docker_build_and_test.yml index 695c08672f..1d30aa85ea 100644 --- a/.github/workflows/docker_build_and_test.yml +++ b/.github/workflows/docker_build_and_test.yml @@ -23,6 +23,7 @@ on: description: Docker image type to build and test options: - "jvm" + - "native" kafka_url: description: Kafka url to be used to build the docker image required: true diff --git a/.github/workflows/docker_official_image_build_and_test.yml b/.github/workflows/docker_official_image_build_and_test.yml new file mode 100644 index 0000000000..a315cd0e0d --- /dev/null +++ b/.github/workflows/docker_official_image_build_and_test.yml @@ -0,0 +1,66 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: Docker Official Image Build Test + +on: + workflow_dispatch: + inputs: + image_type: + type: choice + description: Docker image type to build and test + options: + - "jvm" + kafka_version: + description: Kafka version for the docker official image. This should be >=3.7.0 + required: true + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.10 + uses: actions/setup-python@v3 + with: + python-version: "3.10" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r docker/requirements.txt + - name: Build image and run tests + working-directory: ./docker + run: | + python docker_official_image_build_test.py kafka/test -tag=test -type=${{ github.event.inputs.image_type }} -v=${{ github.event.inputs.kafka_version }} + - name: Run CVE scan + uses: aquasecurity/trivy-action@master + with: + image-ref: 'kafka/test:test' + format: 'table' + severity: 'CRITICAL,HIGH' + output: scan_report_${{ github.event.inputs.image_type }}.txt + exit-code: '1' + - name: Upload test report + if: always() + uses: actions/upload-artifact@v3 + with: + name: report_${{ github.event.inputs.image_type }}.html + path: docker/test/report_${{ github.event.inputs.image_type }}.html + - name: Upload CVE scan report + if: always() + uses: actions/upload-artifact@v3 + with: + name: scan_report_${{ github.event.inputs.image_type }}.txt + path: scan_report_${{ github.event.inputs.image_type }}.txt diff --git a/.github/workflows/docker_promote.yml b/.github/workflows/docker_promote.yml index 3449265877..04872f9d59 100644 --- a/.github/workflows/docker_promote.yml +++ b/.github/workflows/docker_promote.yml @@ -19,10 +19,10 @@ on: workflow_dispatch: inputs: rc_docker_image: - description: RC docker image that needs to be promoted (Example:- apache/kafka:3.6.0-rc0) + description: RC docker image that needs to be promoted (Example:- apache/kafka:3.8.0-rc0 (OR) apache/kafka-native:3.8.0-rc0) required: true promoted_docker_image: - description: Docker image name of the promoted image (Example:- apache/kafka:3.6.0) + description: Docker image name of the promoted image (Example:- apache/kafka:3.8.0 (OR) apache/kafka-native:3.8.0) required: true jobs: diff --git a/.github/workflows/docker_rc_release.yml b/.github/workflows/docker_rc_release.yml index c7082dcac9..22dd924b51 100644 --- a/.github/workflows/docker_rc_release.yml +++ b/.github/workflows/docker_rc_release.yml @@ -23,8 +23,9 @@ on: description: Docker image type to be built and pushed options: - "jvm" + - "native" rc_docker_image: - description: RC docker image that needs to be built and pushed to Dockerhub (Example:- apache/kafka:3.6.0-rc0) + description: RC docker image that needs to be built and pushed to Dockerhub (Example:- apache/kafka:3.8.0-rc0 (OR) apache/kafka-native:3.8.0-rc0) required: true kafka_url: description: Kafka url to be used to build the docker image diff --git a/.github/workflows/docker_scan.yml b/.github/workflows/docker_scan.yml index 7d9ecfe619..bdc8eafbe6 100644 --- a/.github/workflows/docker_scan.yml +++ b/.github/workflows/docker_scan.yml @@ -21,6 +21,7 @@ on: workflow_dispatch: jobs: scan_jvm: + if: github.repository == 'apache/kafka' runs-on: ubuntu-latest strategy: matrix: diff --git a/.github/workflows/prepare_docker_official_image_source.yml b/.github/workflows/prepare_docker_official_image_source.yml new file mode 100644 index 0000000000..4549104583 --- /dev/null +++ b/.github/workflows/prepare_docker_official_image_source.yml @@ -0,0 +1,52 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: Docker Prepare Docker Official Image Source + +on: + workflow_dispatch: + inputs: + image_type: + type: choice + description: Docker image type to build and test + options: + - "jvm" + kafka_version: + description: Kafka version for the docker official image. This should be >=3.7.0 + required: true + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.10 + uses: actions/setup-python@v3 + with: + python-version: "3.10" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r docker/requirements.txt + - name: Build Docker Official Image Artifact + working-directory: ./docker + run: | + python prepare_docker_official_image_source.py -type=${{ github.event.inputs.image_type }} -v=${{ github.event.inputs.kafka_version }} + - name: Upload Docker Official Image Artifact + if: success() + uses: actions/upload-artifact@v4 + with: + name: ${{ github.event.inputs.kafka_version }} + path: docker/docker_official_images/${{ github.event.inputs.kafka_version }} diff --git a/.gitignore b/.gitignore index 376c781ad3..015df8ead8 100644 --- a/.gitignore +++ b/.gitignore @@ -34,6 +34,8 @@ Vagrantfile.local config/server-* config/zookeeper-* +gradle/wrapper/*.jar +gradlew.bat results tests/results @@ -58,4 +60,5 @@ jmh-benchmarks/src/main/generated storage/kafka-tiered-storage/ docker/test/report_*.html +kafka.Kafka __pycache__ diff --git a/Jenkinsfile b/Jenkinsfile index 50b7f6a298..0a795637ff 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -36,6 +36,12 @@ def doTest(env, target = "test") { junit '**/build/test-results/**/TEST-*.xml' } +def runTestOnDevBranch(env) { + if (!isChangeRequest(env)) { + doTest(env) + } +} + def doStreamsArchetype() { echo 'Verify that Kafka Streams archetype compiles' @@ -132,7 +138,7 @@ pipeline { } steps { doValidation() - doTest(env) + runTestOnDevBranch(env) echo 'Skipping Kafka Streams archetype test for Java 11' } } @@ -151,7 +157,7 @@ pipeline { } steps { doValidation() - doTest(env) + runTestOnDevBranch(env) echo 'Skipping Kafka Streams archetype test for Java 17' } } diff --git a/LICENSE-binary b/LICENSE-binary index a395f15e42..01a2b03025 100644 --- a/LICENSE-binary +++ b/LICENSE-binary @@ -60,12 +60,12 @@ reload4j-1.2.25 rocksdbjni-7.9.2 scala-collection-compat_2.12-2.10.0 scala-collection-compat_2.13-2.10.0 -scala-library-2.12.18 -scala-library-2.13.12 +scala-library-2.12.19 +scala-library-2.13.14 scala-logging_2.12-3.9.4 scala-logging_2.13-3.9.4 -scala-reflect-2.12.18 -scala-reflect-2.13.12 +scala-reflect-2.12.19 +scala-reflect-2.13.14 scala-java8-compat_2.12-1.0.2 scala-java8-compat_2.13-1.0.2 snappy-java-1.1.10.5 diff --git a/bin/kafka-run-class.sh b/bin/kafka-run-class.sh index 351f1932b7..ef731a0615 100755 --- a/bin/kafka-run-class.sh +++ b/bin/kafka-run-class.sh @@ -49,7 +49,7 @@ should_include_file() { base_dir=$(dirname $0)/.. if [ -z "$SCALA_VERSION" ]; then - SCALA_VERSION=2.13.12 + SCALA_VERSION=2.13.14 if [[ -f "$base_dir/gradle.properties" ]]; then SCALA_VERSION=`grep "^scalaVersion=" "$base_dir/gradle.properties" | cut -d= -f 2` fi @@ -208,7 +208,7 @@ fi # JMX settings if [ -z "$KAFKA_JMX_OPTS" ]; then - KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false " + KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false " fi # JMX port to use @@ -353,9 +353,16 @@ CLASSPATH=${CLASSPATH#:} # If Cygwin is detected, classpath is converted to Windows format. (( WINDOWS_OS_FORMAT )) && CLASSPATH=$(cygpath --path --mixed "${CLASSPATH}") -# Launch mode -if [ "x$DAEMON_MODE" = "xtrue" ]; then - nohup "$JAVA" $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_CMD_OPTS $KAFKA_JDK_COMPATIBILITY_OPTS -cp "$CLASSPATH" $KAFKA_OPTS "$@" > "$CONSOLE_OUTPUT_FILE" 2>&1 < /dev/null & +# If KAFKA_MODE=native, it will bring up Kafka in the native mode. +# It expects the Kafka executable binary to be present at $base_dir/kafka.Kafka. +# This is specifically used to run system tests on native Kafka - by bringing up Kafka in the native mode. +if [[ "x$KAFKA_MODE" == "xnative" ]] && [[ "$*" == *"kafka.Kafka"* ]]; then + exec $base_dir/kafka.Kafka start --config "$2" $KAFKA_LOG4J_CMD_OPTS $KAFKA_JMX_OPTS $KAFKA_OPTS else - exec "$JAVA" $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_CMD_OPTS $KAFKA_JDK_COMPATIBILITY_OPTS -cp "$CLASSPATH" $KAFKA_OPTS "$@" + # Launch mode + if [ "x$DAEMON_MODE" = "xtrue" ]; then + nohup "$JAVA" $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_CMD_OPTS $KAFKA_JDK_COMPATIBILITY_OPTS -cp "$CLASSPATH" $KAFKA_OPTS "$@" > "$CONSOLE_OUTPUT_FILE" 2>&1 < /dev/null & + else + exec "$JAVA" $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_CMD_OPTS $KAFKA_JDK_COMPATIBILITY_OPTS -cp "$CLASSPATH" $KAFKA_OPTS "$@" + fi fi diff --git a/bin/windows/kafka-run-class.bat b/bin/windows/kafka-run-class.bat index 616dabd70f..e42245efc0 100755 --- a/bin/windows/kafka-run-class.bat +++ b/bin/windows/kafka-run-class.bat @@ -27,7 +27,7 @@ set BASE_DIR=%CD% popd IF ["%SCALA_VERSION%"] EQU [""] ( - set SCALA_VERSION=2.13.12 + set SCALA_VERSION=2.13.14 ) IF ["%SCALA_BINARY_VERSION%"] EQU [""] ( diff --git a/build.gradle b/build.gradle index 030b4f1f2a..72622a100e 100644 --- a/build.gradle +++ b/build.gradle @@ -114,6 +114,13 @@ ext { repo = file("$rootDir/.git").isDirectory() ? Grgit.open(currentDir: project.getRootDir()) : null commitId = determineCommitId() + + addParametersForTests = { name, options -> + // -parameters generates arguments with parameter names in TestInfo#getDisplayName. + // ref: https://github.com/junit-team/junit5/blob/4c0dddad1b96d4a20e92a2cd583954643ac56ac0/junit-jupiter-params/src/main/java/org/junit/jupiter/params/ParameterizedTest.java#L161-L164 + if (name == "compileTestJava" || name == "compileTestScala") + options.compilerArgs << "-parameters" + } } allprojects { @@ -161,6 +168,21 @@ allprojects { } } task printAllDependencies(type: DependencyReportTask) {} + + tasks.withType(Javadoc) { + options.charSet = 'UTF-8' + options.docEncoding = 'UTF-8' + options.encoding = 'UTF-8' + options.memberLevel = JavadocMemberLevel.PUBLIC // Document only public members/API + // Turn off doclint for now, see https://blog.joda.org/2014/02/turning-off-doclint-in-jdk-8-javadoc.html for rationale + options.addStringOption('Xdoclint:none', '-quiet') + + // The URL structure was changed to include the locale after Java 8 + if (JavaVersion.current().isJava11Compatible()) + options.links "https://docs.oracle.com/en/java/javase/${JavaVersion.current().majorVersion}/docs/api/" + else + options.links "https://docs.oracle.com/javase/8/docs/api/" + } } def determineCommitId() { @@ -225,7 +247,7 @@ subprojects { options.encoding = 'UTF-8' options.compilerArgs << "-Xlint:all" // temporary exclusions until all the warnings are fixed - if (!project.path.startsWith(":connect")) + if (!project.path.startsWith(":connect") && !project.path.startsWith(":storage")) options.compilerArgs << "-Xlint:-rawtypes" options.compilerArgs << "-Xlint:-serial" options.compilerArgs << "-Xlint:-try" @@ -238,12 +260,16 @@ subprojects { // --source/--target 8 is deprecated in Java 20, suppress warning until Java 8 support is dropped in Kafka 4.0 if (JavaVersion.current().isCompatibleWith(JavaVersion.VERSION_20)) options.compilerArgs << "-Xlint:-options" + + addParametersForTests(name, options) } - // We should only set this if Java version is < 9 (--release is recommended for >= 9), but the Scala plugin for IntelliJ sets - // `-target` incorrectly if this is unset - sourceCompatibility = minJavaVersion - targetCompatibility = minJavaVersion + java { + // We should only set this if Java version is < 9 (--release is recommended for >= 9), but the Scala plugin for IntelliJ sets + // `-target` incorrectly if this is unset + sourceCompatibility = minJavaVersion + targetCompatibility = minJavaVersion + } if (shouldPublish) { @@ -265,6 +291,24 @@ subprojects { } else { apply plugin: 'com.github.johnrengelman.shadow' project.shadow.component(mavenJava) + + // Fix for avoiding inclusion of runtime dependencies marked as 'shadow' in MANIFEST Class-Path. + // https://github.com/johnrengelman/shadow/issues/324 + afterEvaluate { + pom.withXml { xml -> + if (xml.asNode().get('dependencies') == null) { + xml.asNode().appendNode('dependencies') + } + def dependenciesNode = xml.asNode().get('dependencies').get(0) + project.configurations.shadowed.allDependencies.each { + def dependencyNode = dependenciesNode.appendNode('dependency') + dependencyNode.appendNode('groupId', it.group) + dependencyNode.appendNode('artifactId', it.name) + dependencyNode.appendNode('version', it.version) + dependencyNode.appendNode('scope', 'runtime') + } + } + } } afterEvaluate { @@ -274,7 +318,7 @@ subprojects { artifact task } - artifactId = archivesBaseName + artifactId = base.archivesName.get() pom { name = 'Apache Kafka' url = 'https://kafka.apache.org' @@ -562,21 +606,6 @@ subprojects { task docsJar(dependsOn: javadocJar) - javadoc { - options.charSet = 'UTF-8' - options.docEncoding = 'UTF-8' - options.encoding = 'UTF-8' - options.memberLevel = JavadocMemberLevel.PUBLIC // Document only public members/API - // Turn off doclint for now, see https://blog.joda.org/2014/02/turning-off-doclint-in-jdk-8-javadoc.html for rationale - options.addStringOption('Xdoclint:none', '-quiet') - - // The URL structure was changed to include the locale after Java 8 - if (JavaVersion.current().isJava11Compatible()) - options.links "https://docs.oracle.com/en/java/javase/${JavaVersion.current().majorVersion}/docs/api/" - else - options.links "https://docs.oracle.com/javase/8/docs/api/" - } - task systemTestLibs(dependsOn: jar) if (!sourceSets.test.allSource.isEmpty()) { @@ -619,7 +648,7 @@ subprojects { scalaCompileOptions.keepAliveMode = userKeepAliveMode scalaCompileOptions.additionalParameters = [ - "-deprecation", + "-deprecation:false", "-unchecked", "-encoding", "utf8", "-Xlog-reflective-calls", @@ -676,6 +705,8 @@ subprojects { if (versions.baseScala == "2.13" || JavaVersion.current().isJava9Compatible()) scalaCompileOptions.additionalParameters += ["-release", String.valueOf(minJavaVersion)] + addParametersForTests(name, options) + configure(scalaCompileOptions.forkOptions) { memoryMaximumSize = defaultMaxHeapSize jvmArgs = defaultJvmArgs @@ -707,8 +738,8 @@ subprojects { } test.dependsOn('spotbugsMain') - tasks.withType(com.github.spotbugs.snom.SpotBugsTask) { - reports { + tasks.withType(com.github.spotbugs.snom.SpotBugsTask).configureEach { + reports.configure { // Continue supporting `xmlFindBugsReport` for compatibility xml.enabled(project.hasProperty('xmlSpotBugsReport') || project.hasProperty('xmlFindBugsReport')) html.enabled(!project.hasProperty('xmlSpotBugsReport') && !project.hasProperty('xmlFindBugsReport')) @@ -743,13 +774,14 @@ subprojects { } if (userEnableTestCoverage) { - def coverageGen = it.path == ':core' ? 'reportScoverage' : 'jacocoTestReport' + def coverageGen = it.path == ':core' ? 'reportTestScoverage' : 'jacocoTestReport' tasks.register('reportCoverage').configure { dependsOn(coverageGen) } } dependencyCheck { suppressionFile = "$rootDir/gradle/resources/dependencycheck-suppressions.xml" skipProjects = [ ":jmh-benchmarks", ":trogdor" ] + skipConfigurations = [ "zinc" ] } } @@ -815,7 +847,9 @@ tasks.create(name: "jarConnect", dependsOn: connectPkgs.collect { it + ":jar" }) tasks.create(name: "testConnect", dependsOn: connectPkgs.collect { it + ":test" }) {} project(':server') { - archivesBaseName = "kafka-server" + base { + archivesName = "kafka-server" + } dependencies { implementation project(':clients') @@ -824,6 +858,7 @@ project(':server') { implementation project(':transaction-coordinator') implementation project(':raft') implementation libs.metrics + implementation libs.jacksonDatabind implementation libs.slf4jApi @@ -886,7 +921,10 @@ project(':core') { } if (userEnableTestCoverage) apply plugin: "org.scoverage" - archivesBaseName = "kafka_${versions.baseScala}" + + base { + archivesName = "kafka_${versions.baseScala}" + } configurations { generator @@ -1250,7 +1288,9 @@ project(':core') { } project(':metadata') { - archivesBaseName = "kafka-metadata" + base { + archivesName = "kafka-metadata" + } configurations { generator @@ -1327,7 +1367,9 @@ project(':metadata') { } project(':group-coordinator') { - archivesBaseName = "kafka-group-coordinator" + base { + archivesName = "kafka-group-coordinator" + } configurations { generator @@ -1338,6 +1380,8 @@ project(':group-coordinator') { implementation project(':clients') implementation project(':metadata') implementation project(':storage') + implementation libs.jacksonDatabind + implementation libs.jacksonJDK8Datatypes implementation libs.slf4jApi implementation libs.metrics @@ -1368,13 +1412,17 @@ project(':group-coordinator') { enabled = false } + checkstyle { + configProperties = checkstyleConfigProperties("import-control-group-coordinator.xml") + } + task processMessages(type:JavaExec) { mainClass = "org.apache.kafka.message.MessageGenerator" classpath = configurations.generator args = [ "-p", "org.apache.kafka.coordinator.group.generated", "-o", "src/generated/java/org/apache/kafka/coordinator/group/generated", "-i", "src/main/resources/common/message", - "-m", "MessageDataGenerator" + "-m", "MessageDataGenerator", "JsonConverterGenerator" ] inputs.dir("src/main/resources/common/message") .withPropertyName("messages") @@ -1388,7 +1436,9 @@ project(':group-coordinator') { } project(':transaction-coordinator') { - archivesBaseName = "kafka-transaction-coordinator" + base { + archivesName = "kafka-transaction-coordinator" + } sourceSets { main { @@ -1409,7 +1459,9 @@ project(':transaction-coordinator') { } project(':examples') { - archivesBaseName = "kafka-examples" + base { + archivesName = "kafka-examples" + } dependencies { implementation project(':clients') @@ -1439,10 +1491,13 @@ project(':generator') { } project(':clients') { - archivesBaseName = "kafka-clients" + base { + archivesName = "kafka-clients" + } configurations { generator + shadowed } dependencies { @@ -1453,10 +1508,10 @@ project(':clients') { implementation libs.opentelemetryProto // libraries which should be added as runtime dependencies in generated pom.xml should be defined here: - shadow libs.zstd - shadow libs.lz4 - shadow libs.snappy - shadow libs.slf4jApi + shadowed libs.zstd + shadowed libs.lz4 + shadowed libs.snappy + shadowed libs.slf4jApi compileOnly libs.jacksonDatabind // for SASL/OAUTHBEARER bearer token parsing compileOnly libs.jacksonJDK8Datatypes @@ -1509,10 +1564,9 @@ project(':clients') { // dependencies excluded from the final jar, since they are declared as runtime dependencies dependencies { - exclude(dependency(libs.snappy)) - exclude(dependency(libs.zstd)) - exclude(dependency(libs.lz4)) - exclude(dependency(libs.slf4jApi)) + project.configurations.shadowed.allDependencies.each { + exclude(dependency(it.group + ':' + it.name)) + } // exclude proto files from the jar exclude "**/opentelemetry/proto/**/*.proto" exclude "**/google/protobuf/*.proto" @@ -1614,7 +1668,9 @@ project(':clients') { } project(':raft') { - archivesBaseName = "kafka-raft" + base { + archivesName = "kafka-raft" + } configurations { generator @@ -1710,7 +1766,9 @@ project(':raft') { } project(':server-common') { - archivesBaseName = "kafka-server-common" + base { + archivesName = "kafka-server-common" + } dependencies { api project(':clients') @@ -1770,7 +1828,9 @@ project(':server-common') { } project(':storage:storage-api') { - archivesBaseName = "kafka-storage-api" + base { + archivesName = "kafka-storage-api" + } dependencies { implementation project(':clients') @@ -1838,7 +1898,9 @@ project(':storage:storage-api') { } project(':storage') { - archivesBaseName = "kafka-storage" + base { + archivesName = "kafka-storage" + } configurations { generator @@ -1857,6 +1919,7 @@ project(':storage') { testImplementation project(':clients').sourceSets.test.output testImplementation project(':core') testImplementation project(':core').sourceSets.test.output + testImplementation project(':server') testImplementation project(':server-common') testImplementation project(':server-common').sourceSets.test.output testImplementation libs.hamcrest @@ -2019,7 +2082,9 @@ project(':s3stream') { } project(':tools:tools-api') { - archivesBaseName = "kafka-tools-api" + base { + archivesName = "kafka-tools-api" + } dependencies { implementation project(':clients') @@ -2074,8 +2139,10 @@ project(':tools:tools-api') { } project(':tools') { - archivesBaseName = "kafka-tools" - // TODO: compare the kafka and fix the incorrected exclude + base { + archivesName = "kafka-tools" + } + dependencies { implementation (project(':clients')){ exclude group: 'org.slf4j', module: '*' @@ -2138,6 +2205,7 @@ project(':tools') { testImplementation project(':connect:runtime') testImplementation project(':connect:runtime').sourceSets.test.output testImplementation project(':storage:storage-api').sourceSets.main.output + testImplementation project(':group-coordinator') testImplementation libs.junitJupiter testImplementation libs.mockitoCore testImplementation libs.mockitoJunitJupiter // supports MockitoExtension @@ -2170,7 +2238,9 @@ project(':tools') { } project(':trogdor') { - archivesBaseName = "trogdor" + base { + archivesName = "trogdor" + } dependencies { implementation project(':clients') @@ -2220,7 +2290,9 @@ project(':trogdor') { } project(':shell') { - archivesBaseName = "kafka-shell" + base { + archivesName = "kafka-shell" + } dependencies { implementation libs.argparse4j @@ -2272,7 +2344,10 @@ project(':shell') { } project(':streams') { - archivesBaseName = "kafka-streams" + base { + archivesName = "kafka-streams" + } + ext.buildStreamsVersionFileName = "kafka-streams-version.properties" configurations { @@ -2292,12 +2367,16 @@ project(':streams') { testCompileOnly project(':streams:test-utils') testImplementation project(':clients').sourceSets.test.output + testImplementation project(':server') testImplementation project(':core') testImplementation project(':tools') testImplementation project(':core').sourceSets.test.output testImplementation project(':storage') + testImplementation project(':group-coordinator') + testImplementation project(':transaction-coordinator') testImplementation project(':server-common') testImplementation project(':server-common').sourceSets.test.output + testImplementation project(':server') testImplementation libs.log4j testImplementation libs.junitJupiter testImplementation libs.junitVintageEngine @@ -2429,7 +2508,11 @@ project(':streams') { project(':streams:streams-scala') { apply plugin: 'scala' - archivesBaseName = "kafka-streams-scala_${versions.baseScala}" + + base { + archivesName = "kafka-streams-scala_${versions.baseScala}" + } + dependencies { api project(':streams') @@ -2442,6 +2525,7 @@ project(':streams:streams-scala') { // So we make sure to not include it in the dependencies. api libs.scalaCollectionCompat } + testImplementation project(':group-coordinator') testImplementation project(':core') testImplementation project(':core').sourceSets.test.output testImplementation project(':server-common').sourceSets.test.output @@ -2490,7 +2574,9 @@ project(':streams:streams-scala') { } project(':streams:test-utils') { - archivesBaseName = "kafka-streams-test-utils" + base { + archivesName = "kafka-streams-test-utils" + } dependencies { api project(':streams') @@ -2525,7 +2611,9 @@ project(':streams:test-utils') { } project(':streams:examples') { - archivesBaseName = "kafka-streams-examples" + base { + archivesName = "kafka-streams-examples" + } dependencies { // this dependency should be removed after we unify data API @@ -2562,7 +2650,9 @@ project(':streams:examples') { } project(':streams:upgrade-system-tests-0100') { - archivesBaseName = "kafka-streams-upgrade-system-tests-0100" + base { + archivesName = "kafka-streams-upgrade-system-tests-0100" + } dependencies { testImplementation(libs.kafkaStreams_0100) { @@ -2578,7 +2668,9 @@ project(':streams:upgrade-system-tests-0100') { } project(':streams:upgrade-system-tests-0101') { - archivesBaseName = "kafka-streams-upgrade-system-tests-0101" + base { + archivesName = "kafka-streams-upgrade-system-tests-0101" + } dependencies { testImplementation(libs.kafkaStreams_0101) { @@ -2594,7 +2686,9 @@ project(':streams:upgrade-system-tests-0101') { } project(':streams:upgrade-system-tests-0102') { - archivesBaseName = "kafka-streams-upgrade-system-tests-0102" + base { + archivesName = "kafka-streams-upgrade-system-tests-0102" + } dependencies { testImplementation libs.kafkaStreams_0102 @@ -2607,7 +2701,9 @@ project(':streams:upgrade-system-tests-0102') { } project(':streams:upgrade-system-tests-0110') { - archivesBaseName = "kafka-streams-upgrade-system-tests-0110" + base{ + archivesName = "kafka-streams-upgrade-system-tests-0110" + } dependencies { testImplementation libs.kafkaStreams_0110 @@ -2620,7 +2716,9 @@ project(':streams:upgrade-system-tests-0110') { } project(':streams:upgrade-system-tests-10') { - archivesBaseName = "kafka-streams-upgrade-system-tests-10" + base { + archivesName = "kafka-streams-upgrade-system-tests-10" + } dependencies { testImplementation libs.kafkaStreams_10 @@ -2633,7 +2731,9 @@ project(':streams:upgrade-system-tests-10') { } project(':streams:upgrade-system-tests-11') { - archivesBaseName = "kafka-streams-upgrade-system-tests-11" + base { + archivesName = "kafka-streams-upgrade-system-tests-11" + } dependencies { testImplementation libs.kafkaStreams_11 @@ -2646,7 +2746,9 @@ project(':streams:upgrade-system-tests-11') { } project(':streams:upgrade-system-tests-20') { - archivesBaseName = "kafka-streams-upgrade-system-tests-20" + base { + archivesName = "kafka-streams-upgrade-system-tests-20" + } dependencies { testImplementation libs.kafkaStreams_20 @@ -2659,7 +2761,9 @@ project(':streams:upgrade-system-tests-20') { } project(':streams:upgrade-system-tests-21') { - archivesBaseName = "kafka-streams-upgrade-system-tests-21" + base { + archivesName = "kafka-streams-upgrade-system-tests-21" + } dependencies { testImplementation libs.kafkaStreams_21 @@ -2672,7 +2776,9 @@ project(':streams:upgrade-system-tests-21') { } project(':streams:upgrade-system-tests-22') { - archivesBaseName = "kafka-streams-upgrade-system-tests-22" + base { + archivesName = "kafka-streams-upgrade-system-tests-22" + } dependencies { testImplementation libs.kafkaStreams_22 @@ -2685,7 +2791,9 @@ project(':streams:upgrade-system-tests-22') { } project(':streams:upgrade-system-tests-23') { - archivesBaseName = "kafka-streams-upgrade-system-tests-23" + base { + archivesName = "kafka-streams-upgrade-system-tests-23" + } dependencies { testImplementation libs.kafkaStreams_23 @@ -2698,7 +2806,9 @@ project(':streams:upgrade-system-tests-23') { } project(':streams:upgrade-system-tests-24') { - archivesBaseName = "kafka-streams-upgrade-system-tests-24" + base { + archivesName = "kafka-streams-upgrade-system-tests-24" + } dependencies { testImplementation libs.kafkaStreams_24 @@ -2711,7 +2821,9 @@ project(':streams:upgrade-system-tests-24') { } project(':streams:upgrade-system-tests-25') { - archivesBaseName = "kafka-streams-upgrade-system-tests-25" + base { + archivesName = "kafka-streams-upgrade-system-tests-25" + } dependencies { testImplementation libs.kafkaStreams_25 @@ -2724,7 +2836,9 @@ project(':streams:upgrade-system-tests-25') { } project(':streams:upgrade-system-tests-26') { - archivesBaseName = "kafka-streams-upgrade-system-tests-26" + base { + archivesName = "kafka-streams-upgrade-system-tests-26" + } dependencies { testImplementation libs.kafkaStreams_26 @@ -2737,7 +2851,9 @@ project(':streams:upgrade-system-tests-26') { } project(':streams:upgrade-system-tests-27') { - archivesBaseName = "kafka-streams-upgrade-system-tests-27" + base { + archivesName = "kafka-streams-upgrade-system-tests-27" + } dependencies { testImplementation libs.kafkaStreams_27 @@ -2750,7 +2866,9 @@ project(':streams:upgrade-system-tests-27') { } project(':streams:upgrade-system-tests-28') { - archivesBaseName = "kafka-streams-upgrade-system-tests-28" + base { + archivesName = "kafka-streams-upgrade-system-tests-28" + } dependencies { testImplementation libs.kafkaStreams_28 @@ -2763,7 +2881,9 @@ project(':streams:upgrade-system-tests-28') { } project(':streams:upgrade-system-tests-30') { - archivesBaseName = "kafka-streams-upgrade-system-tests-30" + base { + archivesName = "kafka-streams-upgrade-system-tests-30" + } dependencies { testImplementation libs.kafkaStreams_30 @@ -2776,7 +2896,9 @@ project(':streams:upgrade-system-tests-30') { } project(':streams:upgrade-system-tests-31') { - archivesBaseName = "kafka-streams-upgrade-system-tests-31" + base { + archivesName = "kafka-streams-upgrade-system-tests-31" + } dependencies { testImplementation libs.kafkaStreams_31 @@ -2789,7 +2911,9 @@ project(':streams:upgrade-system-tests-31') { } project(':streams:upgrade-system-tests-32') { - archivesBaseName = "kafka-streams-upgrade-system-tests-32" + base { + archivesName = "kafka-streams-upgrade-system-tests-32" + } dependencies { testImplementation libs.kafkaStreams_32 @@ -2802,7 +2926,9 @@ project(':streams:upgrade-system-tests-32') { } project(':streams:upgrade-system-tests-33') { - archivesBaseName = "kafka-streams-upgrade-system-tests-33" + base { + archivesName = "kafka-streams-upgrade-system-tests-33" + } dependencies { testImplementation libs.kafkaStreams_33 @@ -2815,7 +2941,9 @@ project(':streams:upgrade-system-tests-33') { } project(':streams:upgrade-system-tests-34') { - archivesBaseName = "kafka-streams-upgrade-system-tests-34" + base { + archivesName = "kafka-streams-upgrade-system-tests-34" + } dependencies { testImplementation libs.kafkaStreams_34 @@ -2828,7 +2956,9 @@ project(':streams:upgrade-system-tests-34') { } project(':streams:upgrade-system-tests-35') { - archivesBaseName = "kafka-streams-upgrade-system-tests-35" + base { + archivesName = "kafka-streams-upgrade-system-tests-35" + } dependencies { testImplementation libs.kafkaStreams_35 @@ -2841,7 +2971,9 @@ project(':streams:upgrade-system-tests-35') { } project(':streams:upgrade-system-tests-36') { - archivesBaseName = "kafka-streams-upgrade-system-tests-36" + base { + archivesName = "kafka-streams-upgrade-system-tests-36" + } dependencies { testImplementation libs.kafkaStreams_36 @@ -2854,7 +2986,9 @@ project(':streams:upgrade-system-tests-36') { } project(':streams:upgrade-system-tests-37') { - archivesBaseName = "kafka-streams-upgrade-system-tests-37" + base { + archivesName = "kafka-streams-upgrade-system-tests-37" + } dependencies { testImplementation libs.kafkaStreams_37 @@ -2880,6 +3014,8 @@ project(':jmh-benchmarks') { exclude group: 'net.sf.jopt-simple', module: 'jopt-simple' } implementation project(':server-common') + implementation project(':server') + implementation project(':raft') implementation project(':clients') implementation project(':group-coordinator') implementation project(':metadata') @@ -2937,7 +3073,9 @@ project(':jmh-benchmarks') { } project(':log4j-appender') { - archivesBaseName = "kafka-log4j-appender" + base { + archivesName = "kafka-log4j-appender" + } dependencies { implementation project(':clients') @@ -2956,7 +3094,9 @@ project(':log4j-appender') { } project(':connect:api') { - archivesBaseName = "connect-api" + base { + archivesName = "connect-api" + } dependencies { api project(':clients') @@ -2991,7 +3131,9 @@ project(':connect:api') { } project(':connect:transforms') { - archivesBaseName = "connect-transforms" + base { + archivesName = "connect-transforms" + } dependencies { api project(':connect:api') @@ -3027,7 +3169,9 @@ project(':connect:transforms') { } project(':connect:json') { - archivesBaseName = "connect-json" + base { + archivesName = "connect-json" + } dependencies { api project(':connect:api') @@ -3037,7 +3181,7 @@ project(':connect:json') { api libs.jacksonAfterburner implementation libs.slf4jApi - + testImplementation libs.junitJupiter testRuntimeOnly libs.slf4jlog4j @@ -3071,7 +3215,9 @@ project(':connect:runtime') { swagger } - archivesBaseName = "connect-runtime" + base { + archivesName = "connect-runtime" + } dependencies { // connect-runtime is used in tests, use `api` for modules below for backwards compatibility even though @@ -3107,6 +3253,8 @@ project(':connect:runtime') { testImplementation project(':metadata') testImplementation project(':core').sourceSets.test.output testImplementation project(':server-common') + testImplementation project(':server') + testImplementation project(':group-coordinator') testImplementation project(':storage') testImplementation project(':connect:test-plugins') @@ -3209,7 +3357,9 @@ project(':connect:runtime') { } project(':connect:file') { - archivesBaseName = "connect-file" + base { + archivesName = "connect-file" + } dependencies { implementation project(':connect:api') @@ -3249,7 +3399,9 @@ project(':connect:file') { } project(':connect:basic-auth-extension') { - archivesBaseName = "connect-basic-auth-extension" + base { + archivesName = "connect-basic-auth-extension" + } dependencies { implementation project(':connect:api') @@ -3289,7 +3441,9 @@ project(':connect:basic-auth-extension') { } project(':connect:mirror') { - archivesBaseName = "connect-mirror" + base { + archivesName = "connect-mirror" + } dependencies { implementation project(':connect:api') @@ -3319,6 +3473,7 @@ project(':connect:mirror') { testImplementation project(':connect:runtime').sourceSets.test.output testImplementation project(':core') testImplementation project(':core').sourceSets.test.output + testImplementation project(':server') testRuntimeOnly project(':connect:runtime') testRuntimeOnly libs.slf4jlog4j @@ -3376,7 +3531,9 @@ project(':connect:mirror') { } project(':connect:mirror-client') { - archivesBaseName = "connect-mirror-client" + base { + archivesName = "connect-mirror-client" + } dependencies { implementation project(':clients') @@ -3411,7 +3568,9 @@ project(':connect:mirror-client') { } project(':connect:test-plugins') { - archivesBaseName = "connect-test-plugins" + base { + archivesName = "connect-test-plugins" + } dependencies { api project(':connect:api') @@ -3428,16 +3587,4 @@ task aggregatedJavadoc(type: Javadoc, dependsOn: compileJava) { classpath = files(projectsWithJavadoc.collect { it.sourceSets.main.compileClasspath }) includes = projectsWithJavadoc.collectMany { it.javadoc.getIncludes() } excludes = projectsWithJavadoc.collectMany { it.javadoc.getExcludes() } - - options.charSet = 'UTF-8' - options.docEncoding = 'UTF-8' - options.encoding = 'UTF-8' - // Turn off doclint for now, see https://blog.joda.org/2014/02/turning-off-doclint-in-jdk-8-javadoc.html for rationale - options.addStringOption('Xdoclint:none', '-quiet') - - // The URL structure was changed to include the locale after Java 8 - if (JavaVersion.current().isJava11Compatible()) - options.links "https://docs.oracle.com/en/java/javase/${JavaVersion.current().majorVersion}/docs/api/" - else - options.links "https://docs.oracle.com/javase/8/docs/api/" } diff --git a/checkstyle/import-control-core.xml b/checkstyle/import-control-core.xml index 782d2fe461..ed6c53a322 100644 --- a/checkstyle/import-control-core.xml +++ b/checkstyle/import-control-core.xml @@ -37,7 +37,7 @@ - + @@ -57,6 +57,7 @@ + @@ -106,6 +107,8 @@ + + @@ -115,4 +118,17 @@ + + + + + + + + + + + + + diff --git a/checkstyle/import-control-group-coordinator.xml b/checkstyle/import-control-group-coordinator.xml new file mode 100644 index 0000000000..51a94efb7f --- /dev/null +++ b/checkstyle/import-control-group-coordinator.xml @@ -0,0 +1,82 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/checkstyle/import-control-jmh-benchmarks.xml b/checkstyle/import-control-jmh-benchmarks.xml index 1160e3f67d..ddbf22e210 100644 --- a/checkstyle/import-control-jmh-benchmarks.xml +++ b/checkstyle/import-control-jmh-benchmarks.xml @@ -47,6 +47,7 @@ + @@ -55,6 +56,7 @@ + diff --git a/checkstyle/import-control-metadata.xml b/checkstyle/import-control-metadata.xml index 897932492b..9e549776af 100644 --- a/checkstyle/import-control-metadata.xml +++ b/checkstyle/import-control-metadata.xml @@ -119,6 +119,7 @@ + @@ -172,18 +173,22 @@ - - - + + + + + + + diff --git a/checkstyle/import-control-server-common.xml b/checkstyle/import-control-server-common.xml index 2c5c652e97..24a9cd3440 100644 --- a/checkstyle/import-control-server-common.xml +++ b/checkstyle/import-control-server-common.xml @@ -105,15 +105,19 @@ + - + + + + diff --git a/checkstyle/import-control-server.xml b/checkstyle/import-control-server.xml index 0fad8c1627..02bf92088f 100644 --- a/checkstyle/import-control-server.xml +++ b/checkstyle/import-control-server.xml @@ -46,6 +46,8 @@ + + @@ -82,6 +84,9 @@ + + + diff --git a/checkstyle/import-control-storage.xml b/checkstyle/import-control-storage.xml index d8c5e287d3..623f2c6f45 100644 --- a/checkstyle/import-control-storage.xml +++ b/checkstyle/import-control-storage.xml @@ -53,6 +53,7 @@ + @@ -65,6 +66,7 @@ + @@ -73,9 +75,10 @@ + - + @@ -111,6 +114,7 @@ + diff --git a/checkstyle/import-control.xml b/checkstyle/import-control.xml index 8bbf572821..71c8e0dd4b 100644 --- a/checkstyle/import-control.xml +++ b/checkstyle/import-control.xml @@ -76,8 +76,10 @@ - + + + @@ -138,6 +140,7 @@ + @@ -166,6 +169,7 @@ + @@ -207,6 +211,11 @@ + + + + + @@ -234,35 +243,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -303,6 +283,7 @@ + @@ -323,11 +304,13 @@ - + + + @@ -413,6 +396,9 @@ + + + @@ -451,13 +437,15 @@ + + + - @@ -468,8 +456,9 @@ - + + @@ -483,6 +472,9 @@ + + + @@ -532,6 +524,7 @@ + @@ -595,6 +588,7 @@ + @@ -605,6 +599,7 @@ + @@ -644,6 +639,7 @@ + diff --git a/checkstyle/suppressions.xml b/checkstyle/suppressions.xml index e1b21aa201..5d78a0e577 100644 --- a/checkstyle/suppressions.xml +++ b/checkstyle/suppressions.xml @@ -43,11 +43,14 @@ + + + files="(Microbenchmarks|SaslServerAuthenticator).java"/> + files="(Utils|Topic|Lz4BlockOutputStream|AclData|JoinGroupRequest).java"/> @@ -100,7 +103,7 @@ files="(AbstractRequest|AbstractResponse|KerberosLogin|WorkerSinkTaskTest|TransactionManagerTest|SenderTest|KafkaAdminClient|ConsumerCoordinatorTest|KafkaAdminClientTest|KafkaRaftClientTest).java"/> + files="(ConsumerCoordinator|BufferPool|MetricName|Node|ConfigDef|RecordBatch|SslFactory|SslTransportLayer|MetadataResponse|KerberosLogin|Selector|Sender|Serdes|TokenInformation|Agent|PluginUtils|MiniTrogdorCluster|TasksRequest|KafkaProducer|AbstractStickyAssignor|KafkaRaftClient|Authorizer|FetchSessionHandler|RecordAccumulator|Shell).java"/> @@ -144,7 +147,7 @@ + files="(DistributedHerder|DistributedConfig|KafkaConfigBackingStore|IncrementalCooperativeAssignor).java"/> + files="(JsonConverter|ConnectHeaders).java"/> + files="(KafkaConfigBackingStore|ConnectMetricsRegistry).java"/> @@ -348,7 +351,7 @@ + files="(GroupMetadataManager|GroupMetadataManagerTest).java"/> partitions) if (!log.isTraceEnabled()) { return String.format("%d partition(s)", partitions.size()); } - return "(" + Utils.join(partitions, ", ") + ")"; + return "(" + partitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", ")) + ")"; } private String topicIdPartitionsToLogString(Collection partitions) { if (!log.isTraceEnabled()) { return String.format("%d partition(s)", partitions.size()); } - return "(" + Utils.join(partitions, ", ") + ")"; + return "(" + partitions.stream().map(TopicIdPartition::toString).collect(Collectors.joining(", ")) + ")"; } /** @@ -438,16 +438,16 @@ String verifyFullFetchResponsePartitions(Set topicPartitions, Se extraIds = findMissing(ids, sessionTopicNames.keySet()); } if (!omitted.isEmpty()) { - bld.append("omittedPartitions=(").append(Utils.join(omitted, ", ")).append("), "); + bld.append("omittedPartitions=(").append(omitted.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))).append("), "); } if (!extra.isEmpty()) { - bld.append("extraPartitions=(").append(Utils.join(extra, ", ")).append("), "); + bld.append("extraPartitions=(").append(extra.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))).append("), "); } if (!extraIds.isEmpty()) { - bld.append("extraIds=(").append(Utils.join(extraIds, ", ")).append("), "); + bld.append("extraIds=(").append(extraIds.stream().map(Uuid::toString).collect(Collectors.joining(", "))).append("), "); } if ((!omitted.isEmpty()) || (!extra.isEmpty()) || (!extraIds.isEmpty())) { - bld.append("response=(").append(Utils.join(topicPartitions, ", ")).append(")"); + bld.append("response=(").append(topicPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))).append(")"); return bld.toString(); } return null; @@ -470,11 +470,11 @@ String verifyIncrementalFetchResponsePartitions(Set topicPartiti findMissing(topicPartitions, sessionPartitions.keySet()); StringBuilder bld = new StringBuilder(); if (!extra.isEmpty()) - bld.append("extraPartitions=(").append(Utils.join(extra, ", ")).append("), "); + bld.append("extraPartitions=(").append(extra.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))).append("), "); if (!extraIds.isEmpty()) - bld.append("extraIds=(").append(Utils.join(extraIds, ", ")).append("), "); + bld.append("extraIds=(").append(extraIds.stream().map(Uuid::toString).collect(Collectors.joining(", "))).append("), "); if ((!extra.isEmpty()) || (!extraIds.isEmpty())) { - bld.append("response=(").append(Utils.join(topicPartitions, ", ")).append(")"); + bld.append("response=(").append(topicPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))).append(")"); return bld.toString(); } return null; @@ -499,7 +499,7 @@ private String responseDataToLogString(Set topicPartitions) { } StringBuilder bld = new StringBuilder(); bld.append(" with response=("). - append(Utils.join(topicPartitions, ", ")). + append(topicPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))). append(")"); String prefix = ", implied=("; String suffix = ""; @@ -599,7 +599,9 @@ public boolean handleResponse(FetchResponse response, short version) { * The client will initiate the session close on next fetch request. */ public void notifyClose() { - log.debug("Set the metadata for next fetch request to close the existing session ID={}", nextMetadata.sessionId()); + if (log.isDebugEnabled()) { + log.debug("Set the metadata for next fetch request to close the existing session ID={}", nextMetadata.sessionId()); + } nextMetadata = nextMetadata.nextCloseExisting(); } diff --git a/clients/src/main/java/org/apache/kafka/clients/InFlightRequests.java b/clients/src/main/java/org/apache/kafka/clients/InFlightRequests.java index 4235be5c8d..2bd70206fb 100644 --- a/clients/src/main/java/org/apache/kafka/clients/InFlightRequests.java +++ b/clients/src/main/java/org/apache/kafka/clients/InFlightRequests.java @@ -44,11 +44,7 @@ public InFlightRequests(int maxInFlightRequestsPerConnection) { */ public void add(NetworkClient.InFlightRequest request) { String destination = request.destination; - Deque reqs = this.requests.get(destination); - if (reqs == null) { - reqs = new ArrayDeque<>(); - this.requests.put(destination, reqs); - } + Deque reqs = this.requests.computeIfAbsent(destination, k -> new ArrayDeque<>()); reqs.addFirst(request); inFlightRequestCount.incrementAndGet(); } diff --git a/clients/src/main/java/org/apache/kafka/clients/KafkaClient.java b/clients/src/main/java/org/apache/kafka/clients/KafkaClient.java index 18a7eefe20..a03d57b40f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/KafkaClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/KafkaClient.java @@ -70,7 +70,7 @@ public interface KafkaClient extends Closeable { /** * Check if the connection of the node has failed, based on the connection state. Such connection failure are - * usually transient and can be resumed in the next {@link #ready(org.apache.kafka.common.Node, long)} } + * usually transient and can be resumed in the next {@link #ready(org.apache.kafka.common.Node, long)} * call, but there are cases where transient failures needs to be caught and re-acted upon. * * @param node the node to check diff --git a/clients/src/main/java/org/apache/kafka/clients/NodeApiVersions.java b/clients/src/main/java/org/apache/kafka/clients/NodeApiVersions.java index e1a4b87905..838718652f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/NodeApiVersions.java +++ b/clients/src/main/java/org/apache/kafka/clients/NodeApiVersions.java @@ -185,7 +185,7 @@ public String toString(boolean lineBreaks) { bld.append("("); if (lineBreaks) bld.append("\n\t"); - bld.append(Utils.join(apiKeysText.values(), separator)); + bld.append(String.join(separator, apiKeysText.values())); if (lineBreaks) bld.append("\n"); bld.append(")"); diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java b/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java index dbd0124c00..0beef4edb2 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java @@ -764,7 +764,7 @@ default CreateDelegationTokenResult createDelegationToken() { * * * @param options The options to use when creating delegation token. - * @return The DeleteRecordsResult. + * @return The CreateDelegationTokenResult. */ CreateDelegationTokenResult createDelegationToken(CreateDelegationTokenOptions options); diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/AdminClient.java b/clients/src/main/java/org/apache/kafka/clients/admin/AdminClient.java index 75f1c5f10d..989a6fa5d3 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/AdminClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/AdminClient.java @@ -25,7 +25,7 @@ * * Client code should use the newer {@link Admin} interface in preference to this class. * - * This class may be removed in a later release, but has not be marked as deprecated to avoid unnecessary noise. + * This class may be removed in a later release, but has not been marked as deprecated to avoid unnecessary noise. */ public abstract class AdminClient implements Admin { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ConsumerGroupDescription.java b/clients/src/main/java/org/apache/kafka/clients/admin/ConsumerGroupDescription.java index a9d555cb39..13ec5965ee 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ConsumerGroupDescription.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ConsumerGroupDescription.java @@ -21,13 +21,13 @@ import org.apache.kafka.common.GroupType; import org.apache.kafka.common.Node; import org.apache.kafka.common.acl.AclOperation; -import org.apache.kafka.common.utils.Utils; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Objects; import java.util.Set; +import java.util.stream.Collectors; /** * A detailed description of a single consumer group in the cluster. @@ -161,7 +161,7 @@ public Set authorizedOperations() { public String toString() { return "(groupId=" + groupId + ", isSimpleConsumerGroup=" + isSimpleConsumerGroup + - ", members=" + Utils.join(members, ",") + + ", members=" + members.stream().map(MemberDescription::toString).collect(Collectors.joining(",")) + ", partitionAssignor=" + partitionAssignor + ", type=" + type + ", state=" + state + diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/CreateTopicsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/CreateTopicsResult.java index 100e996b5d..0d065d7bd5 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/CreateTopicsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/CreateTopicsResult.java @@ -46,7 +46,7 @@ protected CreateTopicsResult(Map> fu */ public Map> values() { return futures.entrySet().stream() - .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().thenApply(v -> (Void) null))); + .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().thenApply(v -> null))); } /** diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeConfigsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeConfigsResult.java index 653c97d905..b2fc40f403 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeConfigsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeConfigsResult.java @@ -53,21 +53,18 @@ public Map> values() { */ public KafkaFuture> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])). - thenApply(new KafkaFuture.BaseFunction>() { - @Override - public Map apply(Void v) { - Map configs = new HashMap<>(futures.size()); - for (Map.Entry> entry : futures.entrySet()) { - try { - configs.put(entry.getKey(), entry.getValue().get()); - } catch (InterruptedException | ExecutionException e) { - // This should be unreachable, because allOf ensured that all the futures - // completed successfully. - throw new RuntimeException(e); - } + thenApply(v -> { + Map configs = new HashMap<>(futures.size()); + for (Map.Entry> entry : futures.entrySet()) { + try { + configs.put(entry.getKey(), entry.getValue().get()); + } catch (InterruptedException | ExecutionException e) { + // This should be unreachable, because allOf ensured that all the futures + // completed successfully. + throw new RuntimeException(e); } - return configs; } + return configs; }); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeLogDirsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeLogDirsResult.java index 96a81f08b0..c57867aad7 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeLogDirsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeLogDirsResult.java @@ -48,7 +48,6 @@ public class DescribeLogDirsResult { * @deprecated Deprecated Since Kafka 2.7. Use {@link #descriptions()}. */ @Deprecated - @SuppressWarnings("deprecation") public Map>> values() { return descriptions().entrySet().stream() .collect(Collectors.toMap( @@ -87,7 +86,6 @@ public Map>> descriptions() * @deprecated Deprecated Since Kafka 2.7. Use {@link #allDescriptions()}. */ @Deprecated - @SuppressWarnings("deprecation") public KafkaFuture>> all() { return allDescriptions().thenApply(map -> map.entrySet().stream().collect(Collectors.toMap( entry -> entry.getKey(), diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTopicsOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTopicsOptions.java index 299aaea362..e73f12082d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTopicsOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTopicsOptions.java @@ -30,6 +30,7 @@ public class DescribeTopicsOptions extends AbstractOptions { private boolean includeAuthorizedOperations; + private int partitionSizeLimitPerResponse = 2000; /** * Set the timeout in milliseconds for this operation or {@code null} if the default api timeout for the @@ -47,8 +48,18 @@ public DescribeTopicsOptions includeAuthorizedOperations(boolean includeAuthoriz return this; } + // Note that, partitionSizeLimitPerResponse will not be effective if it is larger than the config + // max.request.partition.size.limit on the server side. + public DescribeTopicsOptions partitionSizeLimitPerResponse(int partitionSizeLimitPerResponse) { + this.partitionSizeLimitPerResponse = partitionSizeLimitPerResponse; + return this; + } + public boolean includeAuthorizedOperations() { return includeAuthorizedOperations; } + public int partitionSizeLimitPerResponse() { + return partitionSizeLimitPerResponse; + } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTopicsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTopicsResult.java index 41593c5298..1c68d8180b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTopicsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTopicsResult.java @@ -128,6 +128,7 @@ public KafkaFuture> allTopicIds() { * Return a future which succeeds only if all the topic descriptions succeed. */ private static KafkaFuture> all(Map> futures) { + if (futures == null) return null; KafkaFuture future = KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); return future. thenApply(v -> { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ElectLeadersResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/ElectLeadersResult.java index 548c94c960..567e984a45 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ElectLeadersResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ElectLeadersResult.java @@ -57,20 +57,17 @@ public KafkaFuture all() { final KafkaFutureImpl result = new KafkaFutureImpl<>(); partitions().whenComplete( - new KafkaFuture.BiConsumer>, Throwable>() { - @Override - public void accept(Map> topicPartitions, Throwable throwable) { - if (throwable != null) { - result.completeExceptionally(throwable); - } else { - for (Optional exception : topicPartitions.values()) { - if (exception.isPresent()) { - result.completeExceptionally(exception.get()); - return; - } + (topicPartitions, throwable) -> { + if (throwable != null) { + result.completeExceptionally(throwable); + } else { + for (Optional exception : topicPartitions.values()) { + if (exception.isPresent()) { + result.completeExceptionally(exception.get()); + return; } - result.complete(null); } + result.complete(null); } }); diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/FinalizedVersionRange.java b/clients/src/main/java/org/apache/kafka/clients/admin/FinalizedVersionRange.java index 22ecb8d4d2..189ffe8235 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/FinalizedVersionRange.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/FinalizedVersionRange.java @@ -28,7 +28,7 @@ public class FinalizedVersionRange { /** * Raises an exception unless the following condition is met: - * minVersionLevel >= 1 and maxVersionLevel >= 1 and maxVersionLevel >= minVersionLevel. + * {@code minVersionLevel >= 1} and {@code maxVersionLevel >= 1} and {@code maxVersionLevel >= minVersionLevel}. * * @param minVersionLevel The minimum version level value. * @param maxVersionLevel The maximum version level value. diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java index 630f1000ff..640551b5e3 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java @@ -35,9 +35,9 @@ import org.apache.kafka.clients.admin.OffsetSpec.TimestampSpec; import org.apache.kafka.clients.admin.internals.AbortTransactionHandler; import org.apache.kafka.clients.admin.internals.AdminApiDriver; -import org.apache.kafka.clients.admin.internals.AdminApiHandler; import org.apache.kafka.clients.admin.internals.AdminApiFuture; import org.apache.kafka.clients.admin.internals.AdminApiFuture.SimpleAdminApiFuture; +import org.apache.kafka.clients.admin.internals.AdminApiHandler; import org.apache.kafka.clients.admin.internals.AdminBootstrapAddresses; import org.apache.kafka.clients.admin.internals.AdminMetadataManager; import org.apache.kafka.clients.admin.internals.AllBrokersStrategy; @@ -138,6 +138,11 @@ import org.apache.kafka.common.message.DescribeLogDirsRequestData.DescribableLogDirTopic; import org.apache.kafka.common.message.DescribeLogDirsResponseData; import org.apache.kafka.common.message.DescribeQuorumResponseData; +import org.apache.kafka.common.message.DescribeTopicPartitionsRequestData; +import org.apache.kafka.common.message.DescribeTopicPartitionsRequestData.TopicRequest; +import org.apache.kafka.common.message.DescribeTopicPartitionsResponseData; +import org.apache.kafka.common.message.DescribeTopicPartitionsResponseData.DescribeTopicPartitionsResponsePartition; +import org.apache.kafka.common.message.DescribeTopicPartitionsResponseData.DescribeTopicPartitionsResponseTopic; import org.apache.kafka.common.message.DescribeUserScramCredentialsRequestData; import org.apache.kafka.common.message.DescribeUserScramCredentialsRequestData.UserName; import org.apache.kafka.common.message.DescribeUserScramCredentialsResponseData; @@ -203,11 +208,13 @@ import org.apache.kafka.common.requests.DescribeDelegationTokenResponse; import org.apache.kafka.common.requests.DescribeLogDirsRequest; import org.apache.kafka.common.requests.DescribeLogDirsResponse; -import org.apache.kafka.common.requests.DescribeUserScramCredentialsRequest; -import org.apache.kafka.common.requests.DescribeUserScramCredentialsResponse; import org.apache.kafka.common.requests.DescribeQuorumRequest; import org.apache.kafka.common.requests.DescribeQuorumRequest.Builder; import org.apache.kafka.common.requests.DescribeQuorumResponse; +import org.apache.kafka.common.requests.DescribeTopicPartitionsRequest; +import org.apache.kafka.common.requests.DescribeTopicPartitionsResponse; +import org.apache.kafka.common.requests.DescribeUserScramCredentialsRequest; +import org.apache.kafka.common.requests.DescribeUserScramCredentialsResponse; import org.apache.kafka.common.requests.ElectLeadersRequest; import org.apache.kafka.common.requests.ElectLeadersResponse; import org.apache.kafka.common.requests.ExpireDelegationTokenRequest; @@ -258,6 +265,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -266,6 +274,7 @@ import java.util.OptionalLong; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -1705,7 +1714,7 @@ private static boolean topicNameIsUnrepresentable(String topicName) { } private static boolean topicIdIsUnrepresentable(Uuid topicId) { - return topicId == null || topicId == Uuid.ZERO_UUID; + return topicId == null || topicId.equals(Uuid.ZERO_UUID); } // for testing @@ -2113,27 +2122,18 @@ public DescribeTopicsResult describeTopics(final TopicCollection topics, Describ if (topics instanceof TopicIdCollection) return DescribeTopicsResult.ofTopicIds(handleDescribeTopicsByIds(((TopicIdCollection) topics).topicIds(), options)); else if (topics instanceof TopicNameCollection) - return DescribeTopicsResult.ofTopicNames(handleDescribeTopicsByNames(((TopicNameCollection) topics).topicNames(), options)); + return DescribeTopicsResult.ofTopicNames(handleDescribeTopicsByNamesWithDescribeTopicPartitionsApi(((TopicNameCollection) topics).topicNames(), options)); else throw new IllegalArgumentException("The TopicCollection: " + topics + " provided did not match any supported classes for describeTopics."); } - private Map> handleDescribeTopicsByNames(final Collection topicNames, DescribeTopicsOptions options) { - final Map> topicFutures = new HashMap<>(topicNames.size()); - final ArrayList topicNamesList = new ArrayList<>(); - for (String topicName : topicNames) { - if (topicNameIsUnrepresentable(topicName)) { - KafkaFutureImpl future = new KafkaFutureImpl<>(); - future.completeExceptionally(new InvalidTopicException("The given topic name '" + - topicName + "' cannot be represented in a request.")); - topicFutures.put(topicName, future); - } else if (!topicFutures.containsKey(topicName)) { - topicFutures.put(topicName, new KafkaFutureImpl<>()); - topicNamesList.add(topicName); - } - } - final long now = time.milliseconds(); - Call call = new Call("describeTopics", calcDeadlineMs(now, options.timeoutMs()), + Call generateDescribeTopicsCallWithMetadataApi( + List topicNamesList, + Map> topicFutures, + DescribeTopicsOptions options, + long now + ) { + return new Call("describeTopics", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { private boolean supportsDisablingTopicCreation = true; @@ -2188,9 +2188,155 @@ void handleFailure(Throwable throwable) { completeAllExceptionally(topicFutures.values(), throwable); } }; - if (!topicNamesList.isEmpty()) { - runnable.call(call, now); + } + + Call generateDescribeTopicsCallWithDescribeTopicPartitionsApi( + List topicNamesList, + Map> topicFutures, + Map nodes, + DescribeTopicsOptions options, + long now + ) { + final Map topicsRequests = new LinkedHashMap<>(); + topicNamesList.stream().sorted().forEach(topic -> { + topicsRequests.put(topic, new TopicRequest().setName(topic)); + }); + return new Call("describeTopicPartitions", calcDeadlineMs(now, options.timeoutMs()), + new LeastLoadedNodeProvider()) { + TopicDescription partiallyFinishedTopicDescription = null; + + @Override + DescribeTopicPartitionsRequest.Builder createRequest(int timeoutMs) { + DescribeTopicPartitionsRequestData request = new DescribeTopicPartitionsRequestData() + .setTopics(new ArrayList<>(topicsRequests.values())) + .setResponsePartitionLimit(options.partitionSizeLimitPerResponse()); + if (partiallyFinishedTopicDescription != null) { + // If the previous cursor points to partition 0, it will not be set here. Instead, the previous + // cursor topic will be the first topic in the request. + request.setCursor(new DescribeTopicPartitionsRequestData.Cursor() + .setTopicName(partiallyFinishedTopicDescription.name()) + .setPartitionIndex(partiallyFinishedTopicDescription.partitions().size()) + ); + } + return new DescribeTopicPartitionsRequest.Builder(request); + } + + @SuppressWarnings("NPathComplexity") + @Override + void handleResponse(AbstractResponse abstractResponse) { + DescribeTopicPartitionsResponse response = (DescribeTopicPartitionsResponse) abstractResponse; + DescribeTopicPartitionsResponseData.Cursor responseCursor = response.data().nextCursor(); + // The topicDescription for the cursor topic of the current batch. + TopicDescription nextTopicDescription = null; + + for (DescribeTopicPartitionsResponseTopic topic : response.data().topics()) { + String topicName = topic.name(); + Errors error = Errors.forCode(topic.errorCode()); + + KafkaFutureImpl future = topicFutures.get(topicName); + + if (error != Errors.NONE) { + future.completeExceptionally(error.exception()); + topicsRequests.remove(topicName); + if (responseCursor != null && responseCursor.topicName().equals(topicName)) { + responseCursor = null; + } + continue; + } + + TopicDescription currentTopicDescription = getTopicDescriptionFromDescribeTopicsResponseTopic(topic, nodes); + + if (partiallyFinishedTopicDescription != null && partiallyFinishedTopicDescription.name().equals(topicName)) { + // Add the partitions for the cursor topic of the previous batch. + partiallyFinishedTopicDescription.partitions().addAll(currentTopicDescription.partitions()); + continue; + } + + if (responseCursor != null && responseCursor.topicName().equals(topicName)) { + // In the same batch of result, it may need to handle the partitions for the previous cursor + // topic and the current cursor topic. Cache the result in the nextTopicDescription. + nextTopicDescription = currentTopicDescription; + continue; + } + + topicsRequests.remove(topicName); + future.complete(currentTopicDescription); + } + + if (partiallyFinishedTopicDescription != null && + (responseCursor == null || !responseCursor.topicName().equals(partiallyFinishedTopicDescription.name()))) { + // We can't simply check nextTopicDescription != null here to close the partiallyFinishedTopicDescription. + // Because the responseCursor topic may not show in the response. + String topicName = partiallyFinishedTopicDescription.name(); + topicFutures.get(topicName).complete(partiallyFinishedTopicDescription); + topicsRequests.remove(topicName); + partiallyFinishedTopicDescription = null; + } + if (nextTopicDescription != null) { + partiallyFinishedTopicDescription = nextTopicDescription; + } + + if (!topicsRequests.isEmpty()) { + runnable.call(this, time.milliseconds()); + } + } + + @Override + boolean handleUnsupportedVersionException(UnsupportedVersionException exception) { + final long now = time.milliseconds(); + log.warn("The DescribeTopicPartitions API is not supported, using Metadata API to describe topics."); + runnable.call(generateDescribeTopicsCallWithMetadataApi(topicNamesList, topicFutures, options, now), now); + return false; + } + + @Override + void handleFailure(Throwable throwable) { + if (!(throwable instanceof UnsupportedVersionException)) { + completeAllExceptionally(topicFutures.values(), throwable); + } + } + }; + } + + private Map> handleDescribeTopicsByNamesWithDescribeTopicPartitionsApi( + final Collection topicNames, + DescribeTopicsOptions options + ) { + final Map> topicFutures = new HashMap<>(topicNames.size()); + final ArrayList topicNamesList = new ArrayList<>(); + for (String topicName : topicNames) { + if (topicNameIsUnrepresentable(topicName)) { + KafkaFutureImpl future = new KafkaFutureImpl<>(); + future.completeExceptionally(new InvalidTopicException("The given topic name '" + + topicName + "' cannot be represented in a request.")); + topicFutures.put(topicName, future); + } else if (!topicFutures.containsKey(topicName)) { + topicFutures.put(topicName, new KafkaFutureImpl<>()); + topicNamesList.add(topicName); + } + } + + if (topicNamesList.isEmpty()) { + return new HashMap<>(topicFutures); + } + + // First, we need to retrieve the node info. + DescribeClusterResult clusterResult = describeCluster(); + Map nodes; + try { + nodes = clusterResult.nodes().get().stream().collect(Collectors.toMap(Node::id, node -> node)); + } catch (InterruptedException | ExecutionException e) { + completeAllExceptionally(topicFutures.values(), e.getCause()); + return new HashMap<>(topicFutures); } + + final long now = time.milliseconds(); + + runnable.call( + generateDescribeTopicsCallWithDescribeTopicPartitionsApi(topicNamesList, topicFutures, nodes, options, now), + now + ); + return new HashMap<>(topicFutures); } @@ -2259,6 +2405,18 @@ void handleFailure(Throwable throwable) { return new HashMap<>(topicFutures); } + private TopicDescription getTopicDescriptionFromDescribeTopicsResponseTopic( + DescribeTopicPartitionsResponseTopic topic, + Map nodes + ) { + List partitionInfos = topic.partitions(); + List partitions = new ArrayList<>(partitionInfos.size()); + for (DescribeTopicPartitionsResponsePartition partitionInfo : partitionInfos) { + partitions.add(DescribeTopicPartitionsResponse.partitionToTopicPartitionInfo(partitionInfo, nodes)); + } + return new TopicDescription(topic.name(), topic.isInternal(), partitions, validAclOperations(topic.topicAuthorizedOperations()), topic.topicId()); + } + // AutoMQ for Kafka inject start @Override public GetNextNodeIdResult getNextNodeId(GetNextNodeIdOptions options) { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupsResult.java index 2d1c612a20..23add2b385 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupsResult.java @@ -39,27 +39,24 @@ public class ListConsumerGroupsResult { this.all = new KafkaFutureImpl<>(); this.valid = new KafkaFutureImpl<>(); this.errors = new KafkaFutureImpl<>(); - future.thenApply(new KafkaFuture.BaseFunction, Void>() { - @Override - public Void apply(Collection results) { - ArrayList curErrors = new ArrayList<>(); - ArrayList curValid = new ArrayList<>(); - for (Object resultObject : results) { - if (resultObject instanceof Throwable) { - curErrors.add((Throwable) resultObject); - } else { - curValid.add((ConsumerGroupListing) resultObject); - } - } - if (!curErrors.isEmpty()) { - all.completeExceptionally(curErrors.get(0)); + future.thenApply((KafkaFuture.BaseFunction, Void>) results -> { + ArrayList curErrors = new ArrayList<>(); + ArrayList curValid = new ArrayList<>(); + for (Object resultObject : results) { + if (resultObject instanceof Throwable) { + curErrors.add((Throwable) resultObject); } else { - all.complete(curValid); + curValid.add((ConsumerGroupListing) resultObject); } - valid.complete(curValid); - errors.complete(curErrors); - return null; } + if (!curErrors.isEmpty()) { + all.completeExceptionally(curErrors.get(0)); + } else { + all.complete(curValid); + } + valid.complete(curValid); + errors.complete(curErrors); + return null; }); } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListOffsetsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListOffsetsResult.java index 5eb00deb06..552b255995 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListOffsetsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListOffsetsResult.java @@ -57,20 +57,17 @@ public KafkaFuture partitionResult(final TopicPartition p */ public KafkaFuture> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])) - .thenApply(new KafkaFuture.BaseFunction>() { - @Override - public Map apply(Void v) { - Map offsets = new HashMap<>(futures.size()); - for (Map.Entry> entry : futures.entrySet()) { - try { - offsets.put(entry.getKey(), entry.getValue().get()); - } catch (InterruptedException | ExecutionException e) { - // This should be unreachable, because allOf ensured that all the futures completed successfully. - throw new RuntimeException(e); - } + .thenApply(v -> { + Map offsets = new HashMap<>(futures.size()); + for (Map.Entry> entry : futures.entrySet()) { + try { + offsets.put(entry.getKey(), entry.getValue().get()); + } catch (InterruptedException | ExecutionException e) { + // This should be unreachable, because allOf ensured that all the futures completed successfully. + throw new RuntimeException(e); } - return offsets; } + return offsets; }); } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/MemberAssignment.java b/clients/src/main/java/org/apache/kafka/clients/admin/MemberAssignment.java index 3305de02c7..495ddb0974 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/MemberAssignment.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/MemberAssignment.java @@ -17,12 +17,12 @@ package org.apache.kafka.clients.admin; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.utils.Utils; import java.util.Collections; import java.util.HashSet; import java.util.Objects; import java.util.Set; +import java.util.stream.Collectors; /** * A description of the assignments of a specific group member. @@ -36,7 +36,7 @@ public class MemberAssignment { * @param topicPartitions List of topic partitions */ public MemberAssignment(Set topicPartitions) { - this.topicPartitions = topicPartitions == null ? Collections.emptySet() : + this.topicPartitions = topicPartitions == null ? Collections.emptySet() : Collections.unmodifiableSet(new HashSet<>(topicPartitions)); } @@ -64,6 +64,6 @@ public Set topicPartitions() { @Override public String toString() { - return "(topicPartitions=" + Utils.join(topicPartitions, ",") + ")"; + return "(topicPartitions=" + topicPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(",")) + ")"; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/NewPartitionReassignment.java b/clients/src/main/java/org/apache/kafka/clients/admin/NewPartitionReassignment.java index f9a7008db7..02bce5e98c 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/NewPartitionReassignment.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/NewPartitionReassignment.java @@ -32,7 +32,7 @@ public class NewPartitionReassignment { * @throws IllegalArgumentException if no replicas are supplied */ public NewPartitionReassignment(List targetReplicas) { - if (targetReplicas == null || targetReplicas.size() == 0) + if (targetReplicas == null || targetReplicas.isEmpty()) throw new IllegalArgumentException("Cannot create a new partition reassignment without any replicas"); this.targetReplicas = Collections.unmodifiableList(new ArrayList<>(targetReplicas)); } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ScramMechanism.java b/clients/src/main/java/org/apache/kafka/clients/admin/ScramMechanism.java index 5c5e371529..f5eac3718a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ScramMechanism.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ScramMechanism.java @@ -85,7 +85,7 @@ public byte type() { private final byte type; private final String mechanismName; - private ScramMechanism(byte type) { + ScramMechanism(byte type) { this.type = type; this.mechanismName = toString().replace('_', '-'); } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/SupportedVersionRange.java b/clients/src/main/java/org/apache/kafka/clients/admin/SupportedVersionRange.java index b85a392a65..3ba6303eac 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/SupportedVersionRange.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/SupportedVersionRange.java @@ -28,7 +28,7 @@ public class SupportedVersionRange { /** * Raises an exception unless the following conditions are met: - * 1 <= minVersion <= maxVersion. + * 0 <= minVersion <= maxVersion. * * @param minVersion The minimum version value. * @param maxVersion The maximum version value. diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/TopicDescription.java b/clients/src/main/java/org/apache/kafka/clients/admin/TopicDescription.java index e8700d4d06..c3bbaf318a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/TopicDescription.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/TopicDescription.java @@ -20,12 +20,12 @@ import org.apache.kafka.common.TopicPartitionInfo; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.acl.AclOperation; -import org.apache.kafka.common.utils.Utils; import java.util.Collections; import java.util.List; import java.util.Objects; import java.util.Set; +import java.util.stream.Collectors; /** * A detailed description of a single topic in the cluster. @@ -135,6 +135,6 @@ public Set authorizedOperations() { @Override public String toString() { return "(name=" + name + ", internal=" + internal + ", partitions=" + - Utils.join(partitions, ",") + ", authorizedOperations=" + authorizedOperations + ")"; + partitions.stream().map(TopicPartitionInfo::toString).collect(Collectors.joining(",")) + ", authorizedOperations=" + authorizedOperations + ")"; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminApiHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminApiHandler.java index 83bd19460a..9c9d8e286b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminApiHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminApiHandler.java @@ -39,8 +39,8 @@ public interface AdminApiHandler { * Build the requests necessary for the given keys. The set of keys is derived by * {@link AdminApiDriver} during the lookup stage as the set of keys which all map * to the same destination broker. Handlers can choose to issue a single request for - * all of the provided keys (see {@link Batched}, issue one request per key (see - * {@link Unbatched}, or implement their own custom grouping logic if necessary. + * all of the provided keys (see {@link Batched}), issue one request per key (see + * {@link Unbatched}), or implement their own custom grouping logic if necessary. * * @param brokerId the target brokerId for the request * @param keys the set of keys that should be handled by this request diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategy.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategy.java index 02b68527c3..cffec69fd7 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategy.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategy.java @@ -136,8 +136,8 @@ private CoordinatorKey requireSingletonAndType(Set keys) { } private void ensureSameType(Set keys) { - if (keys.size() < 1) { - throw new IllegalArgumentException("Unexpected size of key set: expected >= 1, but got " + keys.size()); + if (keys.isEmpty()) { + throw new IllegalArgumentException("Unexpected size of key set: expected >= 1, but got 0"); } if (keys.stream().filter(k -> k.type == type).collect(Collectors.toSet()).size() != keys.size()) { throw new IllegalArgumentException("Unexpected key set: expected all key to be of type " + type + ", but some key were not"); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java index 3ab4f2e7e6..7d99dbeb46 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java @@ -1143,6 +1143,16 @@ protected void close(Timer timer) { } } + protected void handlePollTimeoutExpiry() { + log.warn("consumer poll timeout has expired. This means the time between subsequent calls to poll() " + + "was longer than the configured max.poll.interval.ms, which typically implies that " + + "the poll loop is spending too much time processing messages. You can address this " + + "either by increasing max.poll.interval.ms or by reducing the maximum size of batches " + + "returned in poll() with max.poll.records."); + + maybeLeaveGroup("consumer poll timeout has expired."); + } + /** * Sends LeaveGroupRequest and logs the {@code leaveReason}, unless this member is using static membership or is already * not part of the group (ie does not have a valid member id, is in the UNJOINED state, or the coordinator is unknown). @@ -1508,13 +1518,7 @@ public void run() { } else if (heartbeat.pollTimeoutExpired(now)) { // the poll timeout has expired, which means that the foreground thread has stalled // in between calls to poll(). - log.warn("consumer poll timeout has expired. This means the time between subsequent calls to poll() " + - "was longer than the configured max.poll.interval.ms, which typically implies that " + - "the poll loop is spending too much time processing messages. You can address this " + - "either by increasing max.poll.interval.ms or by reducing the maximum size of batches " + - "returned in poll() with max.poll.records."); - - maybeLeaveGroup("consumer poll timeout has expired."); + handlePollTimeoutExpiry(); } else if (!heartbeat.shouldHeartbeat(now)) { // poll again after waiting for the retry backoff in case the heartbeat failed or the // coordinator disconnected. Note that the heartbeat timing takes account of @@ -1627,7 +1631,6 @@ public String toString() { } } - @SuppressWarnings("serial") private static class UnjoinedGroupException extends RetriableException { } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java index 727dac231a..587b05968b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java @@ -211,6 +211,10 @@ public boolean isSticky() { return partitionMovements.isSticky(); } + public Map partitionsTransferringOwnership() { + return partitionsTransferringOwnership; + } + private static class TopicComparator implements Comparator, Serializable { private static final long serialVersionUID = 1L; private final Map> map; @@ -579,7 +583,7 @@ protected List getAllTopicPartitions(List sortedAllTopic private class ConstrainedAssignmentBuilder extends AbstractAssignmentBuilder { private final Set partitionsWithMultiplePreviousOwners; - private final Set allRevokedPartitions; + private final Map maybeRevokedPartitions; // the consumers which may still be assigned one or more partitions to reach expected capacity private final List unfilledMembersWithUnderMinQuotaPartitions; @@ -610,7 +614,7 @@ private class ConstrainedAssignmentBuilder extends AbstractAssignmentBuilder { super(partitionsPerTopic, rackInfo, consumerToOwnedPartitions); this.partitionsWithMultiplePreviousOwners = partitionsWithMultiplePreviousOwners; - allRevokedPartitions = new HashSet<>(); + maybeRevokedPartitions = new HashMap<>(); unfilledMembersWithUnderMinQuotaPartitions = new LinkedList<>(); unfilledMembersWithExactlyMinQuotaPartitions = new LinkedList<>(); @@ -662,7 +666,13 @@ private void assignOwnedPartitions() { for (Map.Entry> consumerEntry : currentAssignment.entrySet()) { String consumer = consumerEntry.getKey(); List ownedPartitions = consumerEntry.getValue().stream() - .filter(tp -> !rackInfo.racksMismatch(consumer, tp)) + .filter(tp -> { + boolean mismatch = rackInfo.racksMismatch(consumer, tp); + if (mismatch) { + maybeRevokedPartitions.put(tp, consumer); + } + return !mismatch; + }) .sorted(Comparator.comparing(TopicPartition::partition).thenComparing(TopicPartition::topic)) .collect(Collectors.toList()); @@ -695,14 +705,18 @@ private void assignOwnedPartitions() { List maxQuotaPartitions = ownedPartitions.subList(0, maxQuota); consumerAssignment.addAll(maxQuotaPartitions); assignedPartitions.addAll(maxQuotaPartitions); - allRevokedPartitions.addAll(ownedPartitions.subList(maxQuota, ownedPartitions.size())); + for (TopicPartition topicPartition : ownedPartitions.subList(maxQuota, ownedPartitions.size())) { + maybeRevokedPartitions.put(topicPartition, consumer); + } } else { // consumer owned at least "minQuota" of partitions // so keep "minQuota" of the owned partitions, and revoke the rest of the partitions List minQuotaPartitions = ownedPartitions.subList(0, minQuota); consumerAssignment.addAll(minQuotaPartitions); assignedPartitions.addAll(minQuotaPartitions); - allRevokedPartitions.addAll(ownedPartitions.subList(minQuota, ownedPartitions.size())); + for (TopicPartition topicPartition : ownedPartitions.subList(minQuota, ownedPartitions.size())) { + maybeRevokedPartitions.put(topicPartition, consumer); + } // this consumer is potential maxQuota candidate since we're still under the number of expected members // with more than the minQuota partitions. Note, if the number of expected members with more than // the minQuota partitions is 0, it means minQuota == maxQuota, and there are no potentially unfilled @@ -729,8 +743,12 @@ private void assignRackAwareRoundRobin(List unassignedPartitions int assignmentCount = assignment.get(consumer).size() + 1; if (assignmentCount >= minQuota) { unfilledMembersWithUnderMinQuotaPartitions.remove(consumer); - if (assignmentCount < maxQuota) + // Only add this consumer if the current num members at maxQuota is less than the expected number + // since a consumer at minQuota can only be considered unfilled if it's possible to add another partition, + // which would bump it to maxQuota and exceed the expectedNumMembersWithOverMinQuotaPartitions + if (assignmentCount < maxQuota && (currentNumMembersWithOverMinQuotaPartitions < expectedNumMembersWithOverMinQuotaPartitions)) { unfilledMembersWithExactlyMinQuotaPartitions.add(consumer); + } } else { nextIndex++; } @@ -739,8 +757,15 @@ private void assignRackAwareRoundRobin(List unassignedPartitions int firstIndex = rackInfo.nextRackConsumer(unassignedPartition, unfilledMembersWithExactlyMinQuotaPartitions, 0); if (firstIndex >= 0) { consumer = unfilledMembersWithExactlyMinQuotaPartitions.get(firstIndex); - if (assignment.get(consumer).size() + 1 == maxQuota) + if (assignment.get(consumer).size() + 1 == maxQuota) { unfilledMembersWithExactlyMinQuotaPartitions.remove(firstIndex); + currentNumMembersWithOverMinQuotaPartitions++; + // Clear this once the current num consumers over minQuota reaches the expected number since this + // means all consumers at minQuota are now considered filled + if (currentNumMembersWithOverMinQuotaPartitions == expectedNumMembersWithOverMinQuotaPartitions) { + unfilledMembersWithExactlyMinQuotaPartitions.clear(); + } + } } } @@ -803,8 +828,10 @@ private int assignNewPartition(TopicPartition unassignedPartition, String consum // We already assigned all possible ownedPartitions, so we know this must be newly assigned to this consumer // or else the partition was actually claimed by multiple previous owners and had to be invalidated from all // members claimed ownedPartitions - if (allRevokedPartitions.contains(unassignedPartition) || partitionsWithMultiplePreviousOwners.contains(unassignedPartition)) + if ((maybeRevokedPartitions.containsKey(unassignedPartition) && !maybeRevokedPartitions.get(unassignedPartition).equals(consumer)) + || partitionsWithMultiplePreviousOwners.contains(unassignedPartition)) { partitionsTransferringOwnership.put(unassignedPartition, consumer); + } return consumerAssignment.size(); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java index fcd57469c2..6930cd0295 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java @@ -47,6 +47,8 @@ import org.apache.kafka.clients.consumer.internals.events.CommitEvent; import org.apache.kafka.clients.consumer.internals.events.CommitOnCloseEvent; import org.apache.kafka.clients.consumer.internals.events.CompletableApplicationEvent; +import org.apache.kafka.clients.consumer.internals.events.CompletableEvent; +import org.apache.kafka.clients.consumer.internals.events.CompletableEventReaper; import org.apache.kafka.clients.consumer.internals.events.ConsumerRebalanceListenerCallbackCompletedEvent; import org.apache.kafka.clients.consumer.internals.events.ConsumerRebalanceListenerCallbackNeededEvent; import org.apache.kafka.clients.consumer.internals.events.ErrorEvent; @@ -76,6 +78,7 @@ import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.InvalidGroupIdException; import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.internals.ClusterResourceListeners; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.MetricsReporter; @@ -99,6 +102,7 @@ import java.util.Collections; import java.util.ConcurrentModificationException; import java.util.HashSet; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Objects; @@ -131,9 +135,9 @@ import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createMetrics; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createSubscriptionState; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.refreshCommittedOffsets; +import static org.apache.kafka.clients.consumer.internals.events.CompletableEvent.calculateDeadlineMs; import static org.apache.kafka.common.utils.Utils.closeQuietly; import static org.apache.kafka.common.utils.Utils.isBlank; -import static org.apache.kafka.common.utils.Utils.join; import static org.apache.kafka.common.utils.Utils.swallow; /** @@ -164,48 +168,14 @@ public class AsyncKafkaConsumer implements ConsumerDelegate { *
  • {@link ConsumerRebalanceListener} callbacks that are to be executed on the application thread
  • * */ - private class BackgroundEventProcessor extends EventProcessor { + private class BackgroundEventProcessor implements EventProcessor { - private final ApplicationEventHandler applicationEventHandler; private final ConsumerRebalanceListenerInvoker rebalanceListenerInvoker; - public BackgroundEventProcessor(final LogContext logContext, - final BlockingQueue backgroundEventQueue, - final ApplicationEventHandler applicationEventHandler, - final ConsumerRebalanceListenerInvoker rebalanceListenerInvoker) { - super(logContext, backgroundEventQueue); - this.applicationEventHandler = applicationEventHandler; + public BackgroundEventProcessor(final ConsumerRebalanceListenerInvoker rebalanceListenerInvoker) { this.rebalanceListenerInvoker = rebalanceListenerInvoker; } - /** - * Process the events—if any—that were produced by the {@link ConsumerNetworkThread network thread}. - * It is possible that {@link ErrorEvent an error} - * could occur when processing the events. In such cases, the processor will take a reference to the first - * error, continue to process the remaining events, and then throw the first error that occurred. - */ - @Override - public boolean process() { - AtomicReference firstError = new AtomicReference<>(); - - ProcessHandler processHandler = (event, error) -> { - if (error.isPresent()) { - KafkaException e = error.get(); - - if (!firstError.compareAndSet(null, e)) { - log.warn("An error occurred when processing the event: {}", e.getMessage(), e); - } - } - }; - - boolean hadEvents = process(processHandler); - - if (firstError.get() != null) - throw firstError.get(); - - return hadEvents; - } - @Override public void process(final BackgroundEvent event) { switch (event.type()) { @@ -228,13 +198,16 @@ private void process(final ErrorEvent event) { } private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { - ApplicationEvent invokedEvent = invokeRebalanceCallbacks( + ConsumerRebalanceListenerCallbackCompletedEvent invokedEvent = invokeRebalanceCallbacks( rebalanceListenerInvoker, event.methodName(), event.partitions(), event.future() ); applicationEventHandler.add(invokedEvent); + if (invokedEvent.error().isPresent()) { + throw invokedEvent.error().get(); + } } } @@ -244,7 +217,9 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { private final KafkaConsumerMetrics kafkaConsumerMetrics; private Logger log; private final String clientId; + private final BlockingQueue backgroundEventQueue; private final BackgroundEventProcessor backgroundEventProcessor; + private final CompletableEventReaper backgroundEventReaper; private final Deserializers deserializers; /** @@ -273,6 +248,9 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { private final WakeupTrigger wakeupTrigger = new WakeupTrigger(); private final OffsetCommitCallbackInvoker offsetCommitCallbackInvoker; private final AtomicBoolean asyncCommitFenced; + // Last triggered async commit future. Used to wait until all previous async commits are completed. + // We only need to keep track of the last one, since they are guaranteed to complete in order. + private CompletableFuture lastPendingAsyncCommit = null; // currentThread holds the threadId of the current thread accessing the AsyncKafkaConsumer // and is used to prevent multithreaded access @@ -288,6 +266,7 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { valueDeserializer, Time.SYSTEM, ApplicationEventHandler::new, + CompletableEventReaper::new, FetchCollector::new, ConsumerMetadata::new, new LinkedBlockingQueue<>() @@ -300,6 +279,7 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { final Deserializer valueDeserializer, final Time time, final ApplicationEventHandlerFactory applicationEventHandlerFactory, + final CompletableEventReaperFactory backgroundEventReaperFactory, final FetchCollectorFactory fetchCollectorFactory, final ConsumerMetadataFactory metadataFactory, final LinkedBlockingQueue backgroundEventQueue) { @@ -311,6 +291,7 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { this.clientId = config.getString(CommonClientConfigs.CLIENT_ID_CONFIG); this.autoCommitEnabled = config.getBoolean(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG); LogContext logContext = createLogContext(config, groupRebalanceConfig); + this.backgroundEventQueue = backgroundEventQueue; this.log = logContext.logger(getClass()); log.debug("Initializing the Kafka consumer"); @@ -339,10 +320,7 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { ApiVersions apiVersions = new ApiVersions(); final BlockingQueue applicationEventQueue = new LinkedBlockingQueue<>(); - final BackgroundEventHandler backgroundEventHandler = new BackgroundEventHandler( - logContext, - backgroundEventQueue - ); + final BackgroundEventHandler backgroundEventHandler = new BackgroundEventHandler(backgroundEventQueue); // This FetchBuffer is shared between the application and network threads. this.fetchBuffer = new FetchBuffer(logContext); @@ -375,12 +353,12 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { ); final Supplier applicationEventProcessorSupplier = ApplicationEventProcessor.supplier(logContext, metadata, - applicationEventQueue, requestManagersSupplier); this.applicationEventHandler = applicationEventHandlerFactory.build( logContext, time, applicationEventQueue, + new CompletableEventReaper(logContext), applicationEventProcessorSupplier, networkClientDelegateSupplier, requestManagersSupplier); @@ -392,11 +370,9 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { new RebalanceCallbackMetricsManager(metrics) ); this.backgroundEventProcessor = new BackgroundEventProcessor( - logContext, - backgroundEventQueue, - applicationEventHandler, rebalanceListenerInvoker ); + this.backgroundEventReaper = backgroundEventReaperFactory.build(logContext); this.assignors = ConsumerPartitionAssignor.getAssignorInstances( config.getList(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG), config.originals(Collections.singletonMap(ConsumerConfig.CLIENT_ID_CONFIG, clientId)) @@ -441,6 +417,7 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { Time time, ApplicationEventHandler applicationEventHandler, BlockingQueue backgroundEventQueue, + CompletableEventReaper backgroundEventReaper, ConsumerRebalanceListenerInvoker rebalanceListenerInvoker, Metrics metrics, SubscriptionState subscriptions, @@ -458,12 +435,9 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { this.isolationLevel = IsolationLevel.READ_UNCOMMITTED; this.interceptors = Objects.requireNonNull(interceptors); this.time = time; - this.backgroundEventProcessor = new BackgroundEventProcessor( - logContext, - backgroundEventQueue, - applicationEventHandler, - rebalanceListenerInvoker - ); + this.backgroundEventQueue = backgroundEventQueue; + this.backgroundEventProcessor = new BackgroundEventProcessor(rebalanceListenerInvoker); + this.backgroundEventReaper = backgroundEventReaper; this.metrics = metrics; this.groupMetadata.set(initializeGroupMetadata(groupId, Optional.empty())); this.metadata = metadata; @@ -523,11 +497,8 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { this.groupMetadata.set(initializeGroupMetadata(config, groupRebalanceConfig)); BlockingQueue applicationEventQueue = new LinkedBlockingQueue<>(); - BlockingQueue backgroundEventQueue = new LinkedBlockingQueue<>(); - BackgroundEventHandler backgroundEventHandler = new BackgroundEventHandler( - logContext, - backgroundEventQueue - ); + this.backgroundEventQueue = new LinkedBlockingQueue<>(); + BackgroundEventHandler backgroundEventHandler = new BackgroundEventHandler(backgroundEventQueue); ConsumerRebalanceListenerInvoker rebalanceListenerInvoker = new ConsumerRebalanceListenerInvoker( logContext, subscriptions, @@ -563,21 +534,17 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { Supplier applicationEventProcessorSupplier = ApplicationEventProcessor.supplier( logContext, metadata, - applicationEventQueue, requestManagersSupplier ); this.applicationEventHandler = new ApplicationEventHandler(logContext, time, applicationEventQueue, + new CompletableEventReaper(logContext), applicationEventProcessorSupplier, networkClientDelegateSupplier, requestManagersSupplier); - this.backgroundEventProcessor = new BackgroundEventProcessor( - logContext, - backgroundEventQueue, - applicationEventHandler, - rebalanceListenerInvoker - ); + this.backgroundEventProcessor = new BackgroundEventProcessor(rebalanceListenerInvoker); + this.backgroundEventReaper = new CompletableEventReaper(logContext); } // auxiliary interface for testing @@ -587,6 +554,7 @@ ApplicationEventHandler build( final LogContext logContext, final Time time, final BlockingQueue applicationEventQueue, + final CompletableEventReaper applicationEventReaper, final Supplier applicationEventProcessorSupplier, final Supplier networkClientDelegateSupplier, final Supplier requestManagersSupplier @@ -594,6 +562,13 @@ ApplicationEventHandler build( } + // auxiliary interface for testing + interface CompletableEventReaperFactory { + + CompletableEventReaper build(final LogContext logContext); + + } + // auxiliary interface for testing interface FetchCollectorFactory { @@ -767,8 +742,7 @@ public void commitAsync(Map offsets, OffsetCo acquireAndEnsureOpen(); try { AsyncCommitEvent asyncCommitEvent = new AsyncCommitEvent(offsets); - CompletableFuture future = commit(asyncCommitEvent); - future.whenComplete((r, t) -> { + lastPendingAsyncCommit = commit(asyncCommitEvent).whenComplete((r, t) -> { if (t == null) { offsetCommitCallbackInvoker.enqueueInterceptorInvocation(offsets); @@ -793,9 +767,9 @@ public void commitAsync(Map offsets, OffsetCo } private CompletableFuture commit(final CommitEvent commitEvent) { - maybeThrowFencedInstanceException(); - maybeInvokeCommitCallbacks(); maybeThrowInvalidGroupIdException(); + maybeThrowFencedInstanceException(); + offsetCommitCallbackInvoker.executeCallbacks(); Map offsets = commitEvent.offsets(); log.debug("Committing offsets: {}", offsets); @@ -901,6 +875,8 @@ public long position(TopicPartition partition, Duration timeout) { return position.offset; updateFetchPositions(timer); + timer.update(); + wakeupTrigger.maybeTriggerWakeup(); } while (timer.notExpired()); throw new TimeoutException("Timeout of " + timeout.toMillis() + "ms expired before the position " + @@ -938,14 +914,12 @@ public Map committed(final Set committedOffsets = applicationEventHandler.addAndGet(event, - timer); + final Map committedOffsets = applicationEventHandler.addAndGet(event); committedOffsets.forEach(this::updateLastSeenEpochIfNewer); return committedOffsets; } catch (TimeoutException e) { @@ -991,12 +965,11 @@ public List partitionsFor(String topic, Duration timeout) { throw new TimeoutException(); } - final Timer timer = time.timer(timeout); - final TopicMetadataEvent topicMetadataEvent = new TopicMetadataEvent(topic, timer); + final TopicMetadataEvent topicMetadataEvent = new TopicMetadataEvent(topic, calculateDeadlineMs(time, timeout)); wakeupTrigger.setActiveTask(topicMetadataEvent.future()); try { Map> topicMetadata = - applicationEventHandler.addAndGet(topicMetadataEvent, timer); + applicationEventHandler.addAndGet(topicMetadataEvent); return topicMetadata.getOrDefault(topic, Collections.emptyList()); } finally { @@ -1020,11 +993,10 @@ public Map> listTopics(Duration timeout) { throw new TimeoutException(); } - final Timer timer = time.timer(timeout); - final AllTopicsMetadataEvent topicMetadataEvent = new AllTopicsMetadataEvent(timer); + final AllTopicsMetadataEvent topicMetadataEvent = new AllTopicsMetadataEvent(calculateDeadlineMs(time, timeout)); wakeupTrigger.setActiveTask(topicMetadataEvent.future()); try { - return applicationEventHandler.addAndGet(topicMetadataEvent, timer); + return applicationEventHandler.addAndGet(topicMetadataEvent); } finally { wakeupTrigger.clearTask(); } @@ -1092,18 +1064,28 @@ public Map offsetsForTimes(Map entry.getValue().buildOffsetAndTimestamp())); + } catch (TimeoutException e) { + throw new TimeoutException("Failed to get offsets by times in " + timeout.toMillis() + "ms"); + } } finally { release(); } @@ -1141,21 +1123,33 @@ private Map beginningOrEndOffset(Collection timestampToSearch = partitions - .stream() - .collect(Collectors.toMap(Function.identity(), tp -> timestamp)); - Timer timer = time.timer(timeout); + .stream() + .collect(Collectors.toMap(Function.identity(), tp -> timestamp)); ListOffsetsEvent listOffsetsEvent = new ListOffsetsEvent( - timestampToSearch, - false, - timer); - Map offsetAndTimestampMap = applicationEventHandler.addAndGet( - listOffsetsEvent, - timer); - return offsetAndTimestampMap - .entrySet() - .stream() - .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset())); + timestampToSearch, + calculateDeadlineMs(time, timeout), + false); + + // If timeout is set to zero return empty immediately; otherwise try to get the results + // and throw timeout exception if it cannot complete in time. + if (timeout.isZero()) { + applicationEventHandler.add(listOffsetsEvent); + return listOffsetsEvent.emptyResults(); + } + + Map offsetAndTimestampMap; + try { + offsetAndTimestampMap = applicationEventHandler.addAndGet(listOffsetsEvent); + return offsetAndTimestampMap.entrySet() + .stream() + .collect(Collectors.toMap( + Map.Entry::getKey, + entry -> entry.getValue().offset())); + } catch (TimeoutException e) { + throw new TimeoutException("Failed to get offsets by times in " + timeout.toMillis() + "ms"); + } } finally { release(); } @@ -1245,11 +1239,17 @@ private void close(Duration timeout, boolean swallowException) { // Prepare shutting down the network thread prepareShutdown(closeTimer, firstException); closeTimer.update(); + swallow(log, Level.ERROR, "Failed invoking asynchronous commit callback.", + () -> awaitPendingAsyncCommitsAndExecuteCommitCallbacks(closeTimer, false), firstException); if (applicationEventHandler != null) closeQuietly(() -> applicationEventHandler.close(Duration.ofMillis(closeTimer.remainingMs())), "Failed shutting down network thread", firstException); - swallow(log, Level.ERROR, "Failed invoking asynchronous commit callback.", this::maybeInvokeCommitCallbacks, - firstException); closeTimer.update(); + + // close() can be called from inside one of the constructors. In that case, it's possible that neither + // the reaper nor the background event queue were constructed, so check them first to avoid NPE. + if (backgroundEventReaper != null && backgroundEventQueue != null) + backgroundEventReaper.reap(backgroundEventQueue); + closeQuietly(interceptors, "consumer interceptors", firstException); closeQuietly(kafkaConsumerMetrics, "kafka consumer metrics", firstException); closeQuietly(metrics, "consumer metrics", firstException); @@ -1276,21 +1276,21 @@ private void close(Duration timeout, boolean swallowException) { void prepareShutdown(final Timer timer, final AtomicReference firstException) { if (!groupMetadata.get().isPresent()) return; - maybeAutoCommitSync(autoCommitEnabled, timer); + + if (autoCommitEnabled) + autoCommitSync(timer); + applicationEventHandler.add(new CommitOnCloseEvent()); completeQuietly( () -> { maybeRevokePartitions(); - applicationEventHandler.addAndGet(new LeaveOnCloseEvent(timer), timer); + applicationEventHandler.addAndGet(new LeaveOnCloseEvent(calculateDeadlineMs(timer))); }, "Failed to send leaveGroup heartbeat with a timeout(ms)=" + timer.timeoutMs(), firstException); } // Visible for testing - void maybeAutoCommitSync(final boolean shouldAutoCommit, - final Timer timer) { - if (!shouldAutoCommit) - return; + void autoCommitSync(final Timer timer) { Map allConsumed = subscriptions.allConsumed(); log.debug("Sending synchronous auto-commit of offsets {} on closing", allConsumed); try { @@ -1357,9 +1357,12 @@ public void commitSync(Map offsets, Duration acquireAndEnsureOpen(); long commitStart = time.nanoseconds(); try { - Timer requestTimer = time.timer(timeout.toMillis()); - SyncCommitEvent syncCommitEvent = new SyncCommitEvent(offsets, requestTimer); + SyncCommitEvent syncCommitEvent = new SyncCommitEvent(offsets, calculateDeadlineMs(time, timeout)); CompletableFuture commitFuture = commit(syncCommitEvent); + + Timer requestTimer = time.timer(timeout.toMillis()); + awaitPendingAsyncCommitsAndExecuteCommitCallbacks(requestTimer, true); + wakeupTrigger.setActiveTask(commitFuture); ConsumerUtils.getResult(commitFuture, requestTimer); interceptors.onCommit(offsets); @@ -1370,6 +1373,31 @@ public void commitSync(Map offsets, Duration } } + private void awaitPendingAsyncCommitsAndExecuteCommitCallbacks(Timer timer, boolean enableWakeup) { + if (lastPendingAsyncCommit == null) { + return; + } + + try { + final CompletableFuture futureToAwait = new CompletableFuture<>(); + // We don't want the wake-up trigger to complete our pending async commit future, + // so create new future here. Any errors in the pending async commit will be handled + // by the async commit future / the commit callback - here, we just want to wait for it to complete. + lastPendingAsyncCommit.whenComplete((v, t) -> futureToAwait.complete(null)); + if (enableWakeup) { + wakeupTrigger.setActiveTask(futureToAwait); + } + ConsumerUtils.getResult(futureToAwait, timer); + lastPendingAsyncCommit = null; + } finally { + if (enableWakeup) { + wakeupTrigger.clearTask(); + } + timer.update(); + } + offsetCommitCallbackInvoker.executeCallbacks(); + } + @Override public Uuid clientInstanceId(Duration timeout) { if (!clientTelemetryReporter.isPresent()) { @@ -1440,7 +1468,8 @@ public void assign(Collection partitions) { // See the ApplicationEventProcessor.process() method that handles this event for more detail. applicationEventHandler.add(new AssignmentChangeEvent(subscriptions.allConsumed(), time.milliseconds())); - log.info("Assigned to partition(s): {}", join(partitions, ", ")); + log.info("Assigned to partition(s): {}", partitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); + if (subscriptions.assignFromUser(new HashSet<>(partitions))) applicationEventHandler.add(new NewTopicsMetadataUpdateRequestEvent()); } finally { @@ -1462,8 +1491,10 @@ private void updatePatternSubscription(Cluster cluster) { final Set topicsToSubscribe = cluster.topics().stream() .filter(subscriptions::matchesSubscribedPattern) .collect(Collectors.toSet()); - if (subscriptions.subscribeFromPattern(topicsToSubscribe)) + if (subscriptions.subscribeFromPattern(topicsToSubscribe)) { + applicationEventHandler.add(new SubscriptionChangeEvent()); metadata.requestUpdateForNewTopics(); + } } @Override @@ -1473,12 +1504,12 @@ public void unsubscribe() { fetchBuffer.retainAll(Collections.emptySet()); if (groupMetadata.get().isPresent()) { Timer timer = time.timer(Long.MAX_VALUE); - UnsubscribeEvent unsubscribeEvent = new UnsubscribeEvent(timer); + UnsubscribeEvent unsubscribeEvent = new UnsubscribeEvent(calculateDeadlineMs(timer)); applicationEventHandler.add(unsubscribeEvent); log.info("Unsubscribing all topics or patterns and assigned partitions"); try { - processBackgroundEvents(backgroundEventProcessor, unsubscribeEvent.future(), timer); + processBackgroundEvents(unsubscribeEvent.future(), timer); log.info("Unsubscribed all topics or patterns and assigned partitions"); } catch (TimeoutException e) { log.error("Failed while waiting for the unsubscribe event to complete"); @@ -1544,7 +1575,8 @@ private Fetch pollForFetches(Timer timer) { try { fetchBuffer.awaitNotEmpty(pollTimer); } catch (InterruptException e) { - log.trace("Timeout during fetch", e); + log.trace("Interrupt during fetch", e); + throw e; } finally { timer.update(pollTimer.currentTimeMs()); wakeupTrigger.clearTask(); @@ -1576,7 +1608,7 @@ private Fetch collectFetch() { * Set the fetch position to the committed position (if there is one) * or reset it using the offset reset policy the user has configured. * - * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details + * @throws AuthenticationException If authentication fails. See the exception for more details * @throws NoOffsetForPartitionException If no offset is stored for a given partition and no offset reset policy is * defined * @return true iff the operation completed without timing out @@ -1586,7 +1618,7 @@ private boolean updateFetchPositions(final Timer timer) { // Validate positions using the partition leader end offsets, to detect if any partition // has been truncated due to a leader change. This will trigger an OffsetForLeaderEpoch // request, retrieve the partition end offsets, and validate the current position against it. - applicationEventHandler.addAndGet(new ValidatePositionsEvent(timer), timer); + applicationEventHandler.addAndGet(new ValidatePositionsEvent(calculateDeadlineMs(timer))); cachedSubscriptionHasAllFetchPositions = subscriptions.hasAllFetchPositions(); if (cachedSubscriptionHasAllFetchPositions) return true; @@ -1609,7 +1641,7 @@ private boolean updateFetchPositions(final Timer timer) { // which are awaiting reset. This will trigger a ListOffset request, retrieve the // partition offsets according to the strategy (ex. earliest, latest), and update the // positions. - applicationEventHandler.addAndGet(new ResetPositionsEvent(timer), timer); + applicationEventHandler.addAndGet(new ResetPositionsEvent(calculateDeadlineMs(timer))); return true; } catch (TimeoutException e) { return false; @@ -1642,13 +1674,16 @@ private boolean initWithCommittedOffsetsIfNeeded(Timer timer) { final FetchCommittedOffsetsEvent event = new FetchCommittedOffsetsEvent( initializingPartitions, - timer); - final Map offsets = applicationEventHandler.addAndGet(event, timer); + calculateDeadlineMs(timer)); + wakeupTrigger.setActiveTask(event.future()); + final Map offsets = applicationEventHandler.addAndGet(event); refreshCommittedOffsets(offsets, metadata, subscriptions); return true; } catch (TimeoutException e) { log.error("Couldn't refresh committed offsets before timeout expired"); return false; + } finally { + wakeupTrigger.clearTask(); } } @@ -1666,12 +1701,10 @@ private void updateLastSeenEpochIfNewer(TopicPartition topicPartition, OffsetAnd @Override public boolean updateAssignmentMetadataIfNeeded(Timer timer) { maybeThrowFencedInstanceException(); - maybeInvokeCommitCallbacks(); - backgroundEventProcessor.process(); + offsetCommitCallbackInvoker.executeCallbacks(); + maybeUpdateSubscriptionMetadata(); + processBackgroundEvents(); - // Keeping this updateAssignmentMetadataIfNeeded wrapping up the updateFetchPositions as - // in the previous implementation, because it will eventually involve group coordination - // logic return updateFetchPositions(timer); } @@ -1750,8 +1783,8 @@ private void subscribeInternal(Pattern pattern, Optional topics, Optional(topics), listener)) metadata.requestUpdateForNewTopics(); @@ -1796,6 +1829,40 @@ private void subscribeInternal(Collection topics, Optional firstError = new AtomicReference<>(); + + LinkedList events = new LinkedList<>(); + backgroundEventQueue.drainTo(events); + + for (BackgroundEvent event : events) { + try { + if (event instanceof CompletableEvent) + backgroundEventReaper.add((CompletableEvent) event); + + backgroundEventProcessor.process(event); + } catch (Throwable t) { + KafkaException e = ConsumerUtils.maybeWrapAsKafkaException(t); + + if (!firstError.compareAndSet(null, e)) + log.warn("An error occurred when processing the background event: {}", e.getMessage(), e); + } + } + + backgroundEventReaper.reap(time.milliseconds()); + + if (firstError.get() != null) + throw firstError.get(); + + return !events.isEmpty(); + } + /** * This method can be used by cases where the caller has an event that needs to both block for completion but * also process background events. For some events, in order to fully process the associated logic, the @@ -1818,48 +1885,39 @@ private void subscribeInternal(Collection topics, Optional T processBackgroundEvents(EventProcessor eventProcessor, - Future future, - Timer timer) { - log.trace("Will wait up to {} ms for future {} to complete", timer.remainingMs(), future); - + T processBackgroundEvents(Future future, Timer timer) { do { - boolean hadEvents = eventProcessor.process(); + boolean hadEvents = processBackgroundEvents(); try { if (future.isDone()) { // If the event is done (either successfully or otherwise), go ahead and attempt to return // without waiting. We use the ConsumerUtils.getResult() method here to handle the conversion // of the exception types. - T result = ConsumerUtils.getResult(future); - log.trace("Future {} completed successfully", future); - return result; + return ConsumerUtils.getResult(future); } else if (!hadEvents) { // If the above processing yielded no events, then let's sit tight for a bit to allow the - // background thread to either a) finish the task, or b) populate the background event + // background thread to either finish the task, or populate the background event // queue with things to process in our next loop. Timer pollInterval = time.timer(100L); - log.trace("Waiting {} ms for future {} to complete", pollInterval.remainingMs(), future); - T result = ConsumerUtils.getResult(future, pollInterval); - log.trace("Future {} completed successfully", future); - return result; + return ConsumerUtils.getResult(future, pollInterval); } } catch (TimeoutException e) { // Ignore this as we will retry the event until the timeout expires. @@ -1868,7 +1926,6 @@ T processBackgroundEvents(EventProcessor eventProcessor, } } while (timer.notExpired()); - log.trace("Future {} did not complete within timeout", future); throw new TimeoutException("Operation timed out before completion"); } @@ -1934,13 +1991,15 @@ private void maybeThrowFencedInstanceException() { } } - private void maybeInvokeCommitCallbacks() { - offsetCommitCallbackInvoker.executeCallbacks(); - } - // Visible for testing SubscriptionState subscriptions() { return subscriptions; } + private void maybeUpdateSubscriptionMetadata() { + if (subscriptions.hasPatternSubscription()) { + updatePatternSubscription(metadata.fetch()); + } + } + } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java index 67ea6eb313..577cf7dee6 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java @@ -139,7 +139,7 @@ public CommitRequestManager( if (config.getBoolean(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)) { final long autoCommitInterval = Integer.toUnsignedLong(config.getInt(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG)); - this.autoCommitState = Optional.of(new AutoCommitState(time, autoCommitInterval)); + this.autoCommitState = Optional.of(new AutoCommitState(time, autoCommitInterval, logContext)); } else { this.autoCommitState = Optional.empty(); } @@ -236,6 +236,10 @@ private CompletableFuture> requestAutoCom * If the request completes with a retriable error, this will reset the auto-commit timer with * the exponential backoff. If it fails with a non-retriable error, no action is taken, so * the next commit will be generated when the interval expires. + *

    + * This will not generate a new commit request if a previous one hasn't received a response. + * In that case, the next auto-commit request will be sent on the next call to poll, after a + * response for the in-flight is received. */ public void maybeAutoCommitAsync() { if (autoCommitEnabled() && autoCommitState.get().shouldAutoCommit()) { @@ -274,11 +278,17 @@ private void maybeResetTimerWithBackoff(final CompletableFuture + *

  • Considers {@link Errors#STALE_MEMBER_EPOCH} as a retriable error, and will retry it + * including the latest member ID and epoch received from the broker.
  • + *
  • Considers {@link Errors#UNKNOWN_TOPIC_OR_PARTITION} as a fatal error, and will not + * retry it although the error extends RetriableException. The reason is that if a topic + * or partition is deleted, revocation would not finish in time since the auto commit would keep retrying.
  • + * + * + * Also note that this will generate a commit request even if there is another one in-flight, + * generated by the auto-commit on the interval logic, to ensure that the latest offsets are + * committed before revoking partitions. * * @return Future that will complete when the offsets are successfully committed. It will * complete exceptionally if the commit fails with a non-retriable error, or if the retry @@ -1068,9 +1078,9 @@ private void onSuccess(final long currentTimeMs, } } - private CompletableFuture> chainFuture( - final CompletableFuture> otherFuture) { - return this.future.whenComplete((r, t) -> { + private void chainFuture( + final CompletableFuture> otherFuture) { + this.future.whenComplete((r, t) -> { if (t != null) { otherFuture.completeExceptionally(t); } else { @@ -1136,7 +1146,7 @@ private CompletableFuture> addOffsetFetch if (dupe.isPresent() || inflight.isPresent()) { log.info("Duplicated OffsetFetchRequest: " + request.requestedPartitions); - dupe.orElseGet(() -> inflight.get()).chainFuture(request.future); + dupe.orElseGet(inflight::get).chainFuture(request.future); } else { this.unsentOffsetFetches.add(request); } @@ -1150,8 +1160,6 @@ private CompletableFuture> addOffsetFetch * backoff on failed attempt. See {@link RequestState}. */ List drain(final long currentTimeMs) { - List unsentRequests = new ArrayList<>(); - // not ready to sent request List unreadyCommitRequests = unsentOffsetCommits.stream() .filter(request -> !request.canSendRequest(currentTimeMs)) @@ -1160,12 +1168,11 @@ List drain(final long currentTimeMs) { failAndRemoveExpiredCommitRequests(currentTimeMs); // Add all unsent offset commit requests to the unsentRequests list - unsentRequests.addAll( - unsentOffsetCommits.stream() - .filter(request -> request.canSendRequest(currentTimeMs)) - .peek(request -> request.onSendAttempt(currentTimeMs)) - .map(OffsetCommitRequestState::toUnsentRequest) - .collect(Collectors.toList())); + List unsentRequests = unsentOffsetCommits.stream() + .filter(request -> request.canSendRequest(currentTimeMs)) + .peek(request -> request.onSendAttempt(currentTimeMs)) + .map(OffsetCommitRequestState::toUnsentRequest) + .collect(Collectors.toCollection(ArrayList::new)); // Partition the unsent offset fetch requests into sendable and non-sendable lists Map> partitionedBySendability = @@ -1213,8 +1220,9 @@ private void clearAll() { } private List drainPendingCommits() { - ArrayList res = new ArrayList<>(); - res.addAll(unsentOffsetCommits.stream().map(OffsetCommitRequestState::toUnsentRequest).collect(Collectors.toList())); + List res = unsentOffsetCommits.stream() + .map(OffsetCommitRequestState::toUnsentRequest) + .collect(Collectors.toCollection(ArrayList::new)); clearAll(); return res; } @@ -1228,16 +1236,27 @@ private static class AutoCommitState { private final long autoCommitInterval; private boolean hasInflightCommit; + private final Logger log; + public AutoCommitState( final Time time, - final long autoCommitInterval) { + final long autoCommitInterval, + final LogContext logContext) { this.autoCommitInterval = autoCommitInterval; this.timer = time.timer(autoCommitInterval); this.hasInflightCommit = false; + this.log = logContext.logger(getClass()); } public boolean shouldAutoCommit() { - return !this.hasInflightCommit && this.timer.isExpired(); + if (!this.timer.isExpired()) { + return false; + } + if (this.hasInflightCommit) { + log.trace("Skipping auto-commit on the interval because a previous one is still in-flight."); + return false; + } + return true; } public void resetTimer() { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CompletedFetch.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CompletedFetch.java index 8959345bff..5244af9c82 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CompletedFetch.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CompletedFetch.java @@ -22,6 +22,7 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.CorruptRecordException; import org.apache.kafka.common.errors.RecordDeserializationException; +import org.apache.kafka.common.errors.RecordDeserializationException.DeserializationExceptionOrigin; import org.apache.kafka.common.errors.SerializationException; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeaders; @@ -311,25 +312,39 @@ ConsumerRecord parseRecord(Deserializers deserializers, Optional leaderEpoch, TimestampType timestampType, Record record) { + ByteBuffer keyBytes = record.key(); + ByteBuffer valueBytes = record.value(); + Headers headers = new RecordHeaders(record.headers()); + K key; + V value; try { - long offset = record.offset(); - long timestamp = record.timestamp(); - Headers headers = new RecordHeaders(record.headers()); - ByteBuffer keyBytes = record.key(); - K key = keyBytes == null ? null : deserializers.keyDeserializer.deserialize(partition.topic(), headers, keyBytes); - ByteBuffer valueBytes = record.value(); - V value = valueBytes == null ? null : deserializers.valueDeserializer.deserialize(partition.topic(), headers, valueBytes); - return new ConsumerRecord<>(partition.topic(), partition.partition(), offset, - timestamp, timestampType, - keyBytes == null ? ConsumerRecord.NULL_SIZE : keyBytes.remaining(), - valueBytes == null ? ConsumerRecord.NULL_SIZE : valueBytes.remaining(), - key, value, headers, leaderEpoch); + key = keyBytes == null ? null : deserializers.keyDeserializer.deserialize(partition.topic(), headers, keyBytes); } catch (RuntimeException e) { - log.error("Deserializers with error: {}", deserializers); - throw new RecordDeserializationException(partition, record.offset(), - "Error deserializing key/value for partition " + partition + - " at offset " + record.offset() + ". If needed, please seek past the record to continue consumption.", e); + log.error("Key Deserializers with error: {}", deserializers); + throw newRecordDeserializationException(DeserializationExceptionOrigin.KEY, partition, timestampType, record, e, headers); } + try { + value = valueBytes == null ? null : deserializers.valueDeserializer.deserialize(partition.topic(), headers, valueBytes); + } catch (RuntimeException e) { + log.error("Value Deserializers with error: {}", deserializers); + throw newRecordDeserializationException(DeserializationExceptionOrigin.VALUE, partition, timestampType, record, e, headers); + } + return new ConsumerRecord<>(partition.topic(), partition.partition(), record.offset(), + record.timestamp(), timestampType, + keyBytes == null ? ConsumerRecord.NULL_SIZE : keyBytes.remaining(), + valueBytes == null ? ConsumerRecord.NULL_SIZE : valueBytes.remaining(), + key, value, headers, leaderEpoch); + } + + private static RecordDeserializationException newRecordDeserializationException(DeserializationExceptionOrigin origin, + TopicPartition partition, + TimestampType timestampType, + Record record, + RuntimeException e, + Headers headers) { + return new RecordDeserializationException(origin, partition, record.offset(), record.timestamp(), timestampType, record.key(), record.value(), headers, + "Error deserializing " + origin.name() + " for partition " + partition + " at offset " + record.offset() + + ". If needed, please seek past the record to continue consumption.", e); } private Optional maybeLeaderEpoch(int leaderEpoch) { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java index 5af8633806..13e4a194ea 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java @@ -1554,24 +1554,23 @@ public String toString() { } private class ConsumerCoordinatorMetrics { - private final String metricGrpName; private final Sensor commitSensor; private ConsumerCoordinatorMetrics(Metrics metrics, String metricGrpPrefix) { - this.metricGrpName = metricGrpPrefix + COORDINATOR_METRICS_SUFFIX; + String metricGrpName = metricGrpPrefix + COORDINATOR_METRICS_SUFFIX; this.commitSensor = metrics.sensor("commit-latency"); this.commitSensor.add(metrics.metricName("commit-latency-avg", - this.metricGrpName, + metricGrpName, "The average time taken for a commit request"), new Avg()); this.commitSensor.add(metrics.metricName("commit-latency-max", - this.metricGrpName, + metricGrpName, "The max time taken for a commit request"), new Max()); this.commitSensor.add(createMeter(metrics, metricGrpName, "commit", "commit calls")); Measurable numParts = (config, now) -> subscriptions.numAssignedPartitions(); metrics.addMetric(metrics.metricName("assigned-partitions", - this.metricGrpName, + metricGrpName, "The number of partitions currently assigned to this consumer"), numParts); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetrics.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetrics.java index e119153af0..7b552b1a05 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetrics.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetrics.java @@ -33,12 +33,11 @@ public ConsumerMetrics(Set metricsTags, String metricGrpPrefix) { } public ConsumerMetrics(String metricGroupPrefix) { - this(new HashSet(), metricGroupPrefix); + this(new HashSet<>(), metricGroupPrefix); } private List getAllTemplates() { - List l = new ArrayList<>(this.fetcherMetrics.getAllTemplates()); - return l; + return new ArrayList<>(this.fetcherMetrics.getAllTemplates()); } public static void main(String[] args) { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java index 81b6d9a1b7..4699f00c15 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java @@ -708,12 +708,7 @@ public void clean() { // the lock protects removal from a concurrent put which could otherwise mutate the // queue after it has been removed from the map synchronized (unsent) { - Iterator> iterator = unsent.values().iterator(); - while (iterator.hasNext()) { - ConcurrentLinkedQueue requests = iterator.next(); - if (requests.isEmpty()) - iterator.remove(); - } + unsent.values().removeIf(ConcurrentLinkedQueue::isEmpty); } } @@ -722,13 +717,13 @@ public Collection remove(Node node) { // queue after it has been removed from the map synchronized (unsent) { ConcurrentLinkedQueue requests = unsent.remove(node); - return requests == null ? Collections.emptyList() : requests; + return requests == null ? Collections.emptyList() : requests; } } public Iterator requestIterator(Node node) { ConcurrentLinkedQueue requests = unsent.get(node); - return requests == null ? Collections.emptyIterator() : requests.iterator(); + return requests == null ? Collections.emptyIterator() : requests.iterator(); } public Collection nodes() { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java index aa352cd68a..adee659460 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java @@ -20,6 +20,8 @@ import org.apache.kafka.clients.consumer.internals.events.ApplicationEvent; import org.apache.kafka.clients.consumer.internals.events.ApplicationEventProcessor; import org.apache.kafka.clients.consumer.internals.events.BackgroundEvent; +import org.apache.kafka.clients.consumer.internals.events.CompletableEvent; +import org.apache.kafka.clients.consumer.internals.events.CompletableEventReaper; import org.apache.kafka.common.internals.IdempotentCloser; import org.apache.kafka.common.requests.AbstractRequest; import org.apache.kafka.common.utils.KafkaThread; @@ -31,9 +33,11 @@ import java.io.Closeable; import java.time.Duration; import java.util.Collection; +import java.util.LinkedList; import java.util.List; import java.util.Objects; import java.util.Optional; +import java.util.concurrent.BlockingQueue; import java.util.function.Supplier; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.DEFAULT_CLOSE_TIMEOUT_MS; @@ -50,6 +54,8 @@ public class ConsumerNetworkThread extends KafkaThread implements Closeable { private static final String BACKGROUND_THREAD_NAME = "consumer_background_thread"; private final Time time; private final Logger log; + private final BlockingQueue applicationEventQueue; + private final CompletableEventReaper applicationEventReaper; private final Supplier applicationEventProcessorSupplier; private final Supplier networkClientDelegateSupplier; private final Supplier requestManagersSupplier; @@ -63,12 +69,16 @@ public class ConsumerNetworkThread extends KafkaThread implements Closeable { public ConsumerNetworkThread(LogContext logContext, Time time, + BlockingQueue applicationEventQueue, + CompletableEventReaper applicationEventReaper, Supplier applicationEventProcessorSupplier, Supplier networkClientDelegateSupplier, Supplier requestManagersSupplier) { super(BACKGROUND_THREAD_NAME, true); this.time = time; this.log = logContext.logger(getClass()); + this.applicationEventQueue = applicationEventQueue; + this.applicationEventReaper = applicationEventReaper; this.applicationEventProcessorSupplier = applicationEventProcessorSupplier; this.networkClientDelegateSupplier = networkClientDelegateSupplier; this.requestManagersSupplier = requestManagersSupplier; @@ -125,10 +135,7 @@ void initializeResources() { * */ void runOnce() { - // Process the events—if any—that were produced by the application thread. It is possible that when processing - // an event generates an error. In such cases, the processor will log an exception, but we do not want those - // errors to be propagated to the caller. - applicationEventProcessor.process(); + processApplicationEvents(); final long currentTimeMs = time.milliseconds(); final long pollWaitTimeMs = requestManagers.entries().stream() @@ -144,6 +151,36 @@ void runOnce() { .map(Optional::get) .map(rm -> rm.maximumTimeToWait(currentTimeMs)) .reduce(Long.MAX_VALUE, Math::min); + + reapExpiredApplicationEvents(currentTimeMs); + } + + /** + * Process the events—if any—that were produced by the application thread. + */ + private void processApplicationEvents() { + LinkedList events = new LinkedList<>(); + applicationEventQueue.drainTo(events); + + for (ApplicationEvent event : events) { + try { + if (event instanceof CompletableEvent) + applicationEventReaper.add((CompletableEvent) event); + + applicationEventProcessor.process(event); + } catch (Throwable t) { + log.warn("Error processing event {}", t.getMessage(), t); + } + } + } + + /** + * "Complete" any events that have expired. This cleanup step should only be called after the network I/O + * thread has made at least one call to {@link NetworkClientDelegate#poll(long, long) poll} so that each event + * is given least one attempt to satisfy any network requests before checking if a timeout has expired. + */ + private void reapExpiredApplicationEvents(long currentTimeMs) { + applicationEventReaper.reap(currentTimeMs); } /** @@ -273,9 +310,10 @@ void cleanup() { log.error("Unexpected error during shutdown. Proceed with closing.", e); } finally { sendUnsentRequests(timer); + applicationEventReaper.reap(applicationEventQueue); + closeQuietly(requestManagers, "request managers"); closeQuietly(networkClientDelegate, "network client delegate"); - closeQuietly(applicationEventProcessor, "application event processor"); log.debug("Closed the consumer network thread"); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerRebalanceListenerInvoker.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerRebalanceListenerInvoker.java index dcdd303fe9..1c055ba82f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerRebalanceListenerInvoker.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerRebalanceListenerInvoker.java @@ -24,12 +24,12 @@ import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; -import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import java.util.Optional; import java.util.Set; import java.util.SortedSet; +import java.util.stream.Collectors; /** * This class encapsulates the invocation of the callback methods defined in the {@link ConsumerRebalanceListener} @@ -54,7 +54,7 @@ public class ConsumerRebalanceListenerInvoker { } public Exception invokePartitionsAssigned(final SortedSet assignedPartitions) { - log.info("Adding newly assigned partitions: {}", Utils.join(assignedPartitions, ", ")); + log.info("Adding newly assigned partitions: {}", assignedPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); Optional listener = subscriptions.rebalanceListener(); @@ -76,11 +76,11 @@ public Exception invokePartitionsAssigned(final SortedSet assign } public Exception invokePartitionsRevoked(final SortedSet revokedPartitions) { - log.info("Revoke previously assigned partitions {}", Utils.join(revokedPartitions, ", ")); + log.info("Revoke previously assigned partitions {}", revokedPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); Set revokePausedPartitions = subscriptions.pausedPartitions(); revokePausedPartitions.retainAll(revokedPartitions); if (!revokePausedPartitions.isEmpty()) - log.info("The pause flag in partitions [{}] will be removed due to revocation.", Utils.join(revokePausedPartitions, ", ")); + log.info("The pause flag in partitions [{}] will be removed due to revocation.", revokePausedPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); Optional listener = subscriptions.rebalanceListener(); @@ -102,11 +102,11 @@ public Exception invokePartitionsRevoked(final SortedSet revoked } public Exception invokePartitionsLost(final SortedSet lostPartitions) { - log.info("Lost previously assigned partitions {}", Utils.join(lostPartitions, ", ")); + log.info("Lost previously assigned partitions {}", lostPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); Set lostPausedPartitions = subscriptions.pausedPartitions(); lostPausedPartitions.retainAll(lostPartitions); if (!lostPausedPartitions.isEmpty()) - log.info("The pause flag in partitions [{}] will be removed due to partition lost.", Utils.join(lostPausedPartitions, ", ")); + log.info("The pause flag in partitions [{}] will be removed due to partition lost.", lostPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); Optional listener = subscriptions.rebalanceListener(); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetch.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetch.java index 54c8e6b4b9..aa499cbf30 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetch.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetch.java @@ -61,7 +61,7 @@ private Fetch( /** * Add another {@link Fetch} to this one; all of its records will be added to this fetch's - * {@link #records()} records}, and if the other fetch + * {@link #records() records}, and if the other fetch * {@link #positionAdvanced() advanced the consume position for any topic partition}, * this fetch will be marked as having advanced the consume position as well. * @param fetch the other fetch to add; may not be null diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchBuffer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchBuffer.java index d9e365e09e..a33a650d1f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchBuffer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchBuffer.java @@ -173,15 +173,23 @@ void awaitNotEmpty(Timer timer) { // Update the timer before we head into the loop in case it took a while to get the lock. timer.update(); - if (timer.isExpired()) + if (timer.isExpired()) { + // If the thread was interrupted before we start waiting, it still counts as + // interrupted from the point of view of the KafkaConsumer.poll(Duration) contract. + // We only need to check this when we are not going to wait because waiting + // already checks whether the thread is interrupted. + if (Thread.interrupted()) + throw new InterruptException("Interrupted waiting for results from fetching records"); + break; + } if (!notEmptyCondition.await(timer.remainingMs(), TimeUnit.MILLISECONDS)) { break; } } } catch (InterruptedException e) { - throw new InterruptException("Timeout waiting for results from fetching records", e); + throw new InterruptException("Interrupted waiting for results from fetching records", e); } finally { lock.unlock(); timer.update(); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/HeartbeatRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/HeartbeatRequestManager.java index 5b54e8a436..a956ef3a93 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/HeartbeatRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/HeartbeatRequestManager.java @@ -210,7 +210,7 @@ public NetworkClientDelegate.PollResult poll(long currentTimeMs) { boolean heartbeatNow = membershipManager.shouldHeartbeatNow() && !heartbeatRequestState.requestInFlight(); if (!heartbeatRequestState.canSendRequest(currentTimeMs) && !heartbeatNow) { - return new NetworkClientDelegate.PollResult(heartbeatRequestState.nextHeartbeatMs(currentTimeMs)); + return new NetworkClientDelegate.PollResult(heartbeatRequestState.timeToNextHeartbeatMs(currentTimeMs)); } NetworkClientDelegate.UnsentRequest request = makeHeartbeatRequest(currentTimeMs, false); @@ -246,7 +246,7 @@ public long maximumTimeToWait(long currentTimeMs) { ) { return 0L; } - return Math.min(pollTimer.remainingMs() / 2, heartbeatRequestState.nextHeartbeatMs(currentTimeMs)); + return Math.min(pollTimer.remainingMs() / 2, heartbeatRequestState.timeToNextHeartbeatMs(currentTimeMs)); } /** @@ -255,11 +255,12 @@ public long maximumTimeToWait(long currentTimeMs) { * member to {@link MemberState#JOINING}, so that it rejoins the group. */ public void resetPollTimer(final long pollMs) { + pollTimer.update(pollMs); if (pollTimer.isExpired()) { - logger.debug("Poll timer has been reset after it had expired"); + logger.warn("Time between subsequent calls to poll() was longer than the configured " + + "max.poll.interval.ms, exceeded approximately by {} ms.", pollTimer.isExpiredBy()); membershipManager.maybeRejoinStaleMember(); } - pollTimer.update(pollMs); pollTimer.reset(maxPollIntervalMs); } @@ -269,6 +270,7 @@ private NetworkClientDelegate.UnsentRequest makeHeartbeatRequest(final long curr heartbeatRequestState.onSendAttempt(currentTimeMs); membershipManager.onHeartbeatRequestSent(); metricsManager.recordHeartbeatSentMs(currentTimeMs); + heartbeatRequestState.resetTimer(); return request; } @@ -325,7 +327,6 @@ private void onResponse(final ConsumerGroupHeartbeatResponse response, long curr if (Errors.forCode(response.data().errorCode()) == Errors.NONE) { heartbeatRequestState.updateHeartbeatIntervalMs(response.data().heartbeatIntervalMs()); heartbeatRequestState.onSuccessfulAttempt(currentTimeMs); - heartbeatRequestState.resetTimer(); membershipManager.onHeartbeatSuccess(response.data()); return; } @@ -380,16 +381,24 @@ private void onErrorResponse(final ConsumerGroupHeartbeatResponse response, break; case UNRELEASED_INSTANCE_ID: - logger.error("GroupHeartbeatRequest failed due to the instance id {} was not released: {}", + logger.error("GroupHeartbeatRequest failed due to unreleased instance id {}: {}", membershipManager.groupInstanceId().orElse("null"), errorMessage); - handleFatalFailure(Errors.UNRELEASED_INSTANCE_ID.exception(errorMessage)); + handleFatalFailure(error.exception(errorMessage)); + break; + + case FENCED_INSTANCE_ID: + logger.error("GroupHeartbeatRequest failed due to fenced instance id {}: {}. " + + "This is expected in the case that the member was removed from the group " + + "by an admin client, and another member joined using the same group instance id.", + membershipManager.groupInstanceId().orElse("null"), errorMessage); + handleFatalFailure(error.exception(errorMessage)); break; case INVALID_REQUEST: case GROUP_MAX_SIZE_REACHED: case UNSUPPORTED_ASSIGNOR: case UNSUPPORTED_VERSION: - logger.error("GroupHeartbeatRequest failed due to error: {}", error); + logger.error("GroupHeartbeatRequest failed due to {}: {}", error, errorMessage); handleFatalFailure(error.exception(errorMessage)); break; @@ -413,7 +422,7 @@ private void onErrorResponse(final ConsumerGroupHeartbeatResponse response, default: // If the manager receives an unknown error - there could be a bug in the code or a new error code - logger.error("GroupHeartbeatRequest failed due to unexpected error: {}", error); + logger.error("GroupHeartbeatRequest failed due to unexpected error {}: {}", error, errorMessage); handleFatalFailure(error.exception(errorMessage)); break; } @@ -469,19 +478,33 @@ public void resetTimer() { this.heartbeatTimer.reset(heartbeatIntervalMs); } + /** + * Check if a heartbeat request should be sent on the current time. A heartbeat should be + * sent if the heartbeat timer has expired, backoff has expired, and there is no request + * in-flight. + */ @Override public boolean canSendRequest(final long currentTimeMs) { update(currentTimeMs); return heartbeatTimer.isExpired() && super.canSendRequest(currentTimeMs); } - public long nextHeartbeatMs(final long currentTimeMs) { - if (heartbeatTimer.remainingMs() == 0) { + public long timeToNextHeartbeatMs(final long currentTimeMs) { + if (heartbeatTimer.isExpired()) { return this.remainingBackoffMs(currentTimeMs); } return heartbeatTimer.remainingMs(); } + public void onFailedAttempt(final long currentTimeMs) { + // Reset timer to allow sending HB after a failure without waiting for the interval. + // After a failure, a next HB may be needed with backoff (ex. errors that lead to + // retries, like coordinator load error), or immediately (ex. errors that lead to + // rejoining, like fencing errors). + heartbeatTimer.reset(0); + super.onFailedAttempt(currentTimeMs); + } + private void updateHeartbeatIntervalMs(final long heartbeatIntervalMs) { if (this.heartbeatIntervalMs == heartbeatIntervalMs) { // no need to update the timer if the interval hasn't changed @@ -541,16 +564,11 @@ public ConsumerGroupHeartbeatRequestData buildRequestData() { sentFields.rebalanceTimeoutMs = rebalanceTimeoutMs; } - if (!this.subscriptions.hasPatternSubscription()) { - // SubscribedTopicNames - only sent when joining or if it has changed since the last heartbeat - TreeSet subscribedTopicNames = new TreeSet<>(this.subscriptions.subscription()); - if (sendAllFields || !subscribedTopicNames.equals(sentFields.subscribedTopicNames)) { - data.setSubscribedTopicNames(new ArrayList<>(this.subscriptions.subscription())); - sentFields.subscribedTopicNames = subscribedTopicNames; - } - } else { - // SubscribedTopicRegex - only sent if it has changed since the last heartbeat - // - not supported yet + // SubscribedTopicNames - only sent if has changed since the last heartbeat + TreeSet subscribedTopicNames = new TreeSet<>(this.subscriptions.subscription()); + if (sendAllFields || !subscribedTopicNames.equals(sentFields.subscribedTopicNames)) { + data.setSubscribedTopicNames(new ArrayList<>(this.subscriptions.subscription())); + sentFields.subscribedTopicNames = subscribedTopicNames; } // ServerAssignor - sent when joining or if it has changed since the last heartbeat diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/LegacyKafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/LegacyKafkaConsumer.java index 80be0959d6..641bc81d8a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/LegacyKafkaConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/LegacyKafkaConsumer.java @@ -77,6 +77,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.regex.Pattern; +import java.util.stream.Collectors; import static org.apache.kafka.clients.consumer.ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG; import static org.apache.kafka.clients.consumer.ConsumerConfig.CLIENT_RACK_CONFIG; @@ -93,7 +94,6 @@ import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.configuredConsumerInterceptors; import static org.apache.kafka.common.utils.Utils.closeQuietly; import static org.apache.kafka.common.utils.Utils.isBlank; -import static org.apache.kafka.common.utils.Utils.join; import static org.apache.kafka.common.utils.Utils.swallow; /** @@ -473,7 +473,7 @@ private void subscribeInternal(Collection topics, Optional(topics), listener)) metadata.requestUpdateForNewTopics(); } @@ -519,7 +519,7 @@ public void subscribe(Pattern pattern) { */ private void subscribeInternal(Pattern pattern, Optional listener) { maybeThrowInvalidGroupIdException(); - if (pattern == null || pattern.toString().equals("")) + if (pattern == null || pattern.toString().isEmpty()) throw new IllegalArgumentException("Topic pattern to subscribe to cannot be " + (pattern == null ? "null" : "empty")); @@ -571,7 +571,7 @@ public void assign(Collection partitions) { if (coordinator != null) this.coordinator.maybeAutoCommitOffsetsAsync(time.milliseconds()); - log.info("Assigned to partition(s): {}", join(partitions, ", ")); + log.info("Assigned to partition(s): {}", partitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); if (this.subscriptions.assignFromUser(new HashSet<>(partitions))) metadata.requestUpdateForNewTopics(); } @@ -801,7 +801,7 @@ public void seekToBeginning(Collection partitions) { acquireAndEnsureOpen(); try { - Collection parts = partitions.size() == 0 ? this.subscriptions.assignedPartitions() : partitions; + Collection parts = partitions.isEmpty() ? this.subscriptions.assignedPartitions() : partitions; subscriptions.requestOffsetReset(parts, OffsetResetStrategy.EARLIEST); } finally { release(); @@ -815,7 +815,7 @@ public void seekToEnd(Collection partitions) { acquireAndEnsureOpen(); try { - Collection parts = partitions.size() == 0 ? this.subscriptions.assignedPartitions() : partitions; + Collection parts = partitions.isEmpty() ? this.subscriptions.assignedPartitions() : partitions; subscriptions.requestOffsetReset(parts, OffsetResetStrategy.LATEST); } finally { release(); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/MembershipManagerImpl.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/MembershipManagerImpl.java index 4710746995..76a550ad71 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/MembershipManagerImpl.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/MembershipManagerImpl.java @@ -40,7 +40,6 @@ import org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; -import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import java.util.ArrayList; @@ -1181,7 +1180,7 @@ private Optional findTopicNameInGlobalOrLocalCache(Uuid topicId) { * Visible for testing */ CompletableFuture revokePartitions(Set revokedPartitions) { - log.info("Revoking previously assigned partitions {}", Utils.join(revokedPartitions, ", ")); + log.info("Revoking previously assigned partitions {}", revokedPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); logPausedPartitionsBeingRevoked(revokedPartitions); @@ -1338,6 +1337,7 @@ private CompletableFuture enqueueConsumerRebalanceListenerCallback(Consume Set partitions) { SortedSet sortedPartitions = new TreeSet<>(TOPIC_PARTITION_COMPARATOR); sortedPartitions.addAll(partitions); + CompletableBackgroundEvent event = new ConsumerRebalanceListenerCallbackNeededEvent(methodName, sortedPartitions); backgroundEventHandler.add(event); log.debug("The event to trigger the {} method execution was enqueued successfully", methodName.fullyQualifiedMethodName()); @@ -1377,7 +1377,7 @@ private void logPausedPartitionsBeingRevoked(Set partitionsToRev Set revokePausedPartitions = subscriptions.pausedPartitions(); revokePausedPartitions.retainAll(partitionsToRevoke); if (!revokePausedPartitions.isEmpty()) { - log.info("The pause flag in partitions [{}] will be removed due to revocation.", Utils.join(revokePausedPartitions, ", ")); + log.info("The pause flag in partitions [{}] will be removed due to revocation.", revokePausedPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetAndTimestampInternal.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetAndTimestampInternal.java new file mode 100644 index 0000000000..08d451da47 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetAndTimestampInternal.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer.internals; + +import org.apache.kafka.clients.consumer.OffsetAndTimestamp; + +import java.util.Optional; + +/** + * Internal representation of {@link OffsetAndTimestamp} to allow negative timestamps and offset. + */ +public class OffsetAndTimestampInternal { + private final long timestamp; + private final long offset; + private final Optional leaderEpoch; + + public OffsetAndTimestampInternal(long offset, long timestamp, Optional leaderEpoch) { + this.offset = offset; + this.timestamp = timestamp; + this.leaderEpoch = leaderEpoch; + } + + long offset() { + return offset; + } + + long timestamp() { + return timestamp; + } + + Optional leaderEpoch() { + return leaderEpoch; + } + + public OffsetAndTimestamp buildOffsetAndTimestamp() { + return new OffsetAndTimestamp(offset, timestamp, leaderEpoch); + } + + @Override + public int hashCode() { + int result = (int) (timestamp ^ (timestamp >>> 32)); + result = 31 * result + (int) (offset ^ (offset >>> 32)); + result = 31 * result + leaderEpoch.hashCode(); + return result; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof OffsetAndTimestampInternal)) return false; + + OffsetAndTimestampInternal that = (OffsetAndTimestampInternal) o; + + if (timestamp != that.timestamp) return false; + if (offset != that.offset) return false; + return leaderEpoch.equals(that.leaderEpoch); + } + + @Override + public String toString() { + return "OffsetAndTimestampInternal{" + + "timestamp=" + timestamp + + ", offset=" + offset + + ", leaderEpoch=" + leaderEpoch + + '}'; + } +} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetCommitCallbackInvoker.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetCommitCallbackInvoker.java index 47a5df6d1d..3c1ebc6dec 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetCommitCallbackInvoker.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetCommitCallbackInvoker.java @@ -25,7 +25,7 @@ import java.util.concurrent.LinkedBlockingQueue; /** - * Utility class that helps the application thread to invoke user registered {@link OffsetCommitCallback} amd + * Utility class that helps the application thread to invoke user registered {@link OffsetCommitCallback} and * {@link org.apache.kafka.clients.consumer.ConsumerInterceptor}s. This is * achieved by having the background thread register a {@link OffsetCommitCallbackTask} to the invoker upon the * future completion, and execute the callbacks when user polls/commits/closes the consumer. diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherUtils.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherUtils.java index 9239811f7d..ac5d8a4acf 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherUtils.java @@ -50,6 +50,7 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiFunction; import java.util.function.Function; import java.util.stream.Collectors; @@ -240,20 +241,48 @@ Map getOffsetResetTimestamp() { return offsetResetTimestamps; } - static Map buildOffsetsForTimesResult(final Map timestampsToSearch, - final Map fetchedOffsets) { - HashMap offsetsByTimes = new HashMap<>(timestampsToSearch.size()); + static Map buildListOffsetsResult( + final Map timestampsToSearch, + final Map fetchedOffsets, + BiFunction resultMapper) { + + HashMap offsetsResults = new HashMap<>(timestampsToSearch.size()); for (Map.Entry entry : timestampsToSearch.entrySet()) - offsetsByTimes.put(entry.getKey(), null); + offsetsResults.put(entry.getKey(), null); for (Map.Entry entry : fetchedOffsets.entrySet()) { - // 'entry.getValue().timestamp' will not be null since we are guaranteed - // to work with a v1 (or later) ListOffset request ListOffsetData offsetData = entry.getValue(); - offsetsByTimes.put(entry.getKey(), new OffsetAndTimestamp(offsetData.offset, offsetData.timestamp, offsetData.leaderEpoch)); + offsetsResults.put(entry.getKey(), resultMapper.apply(entry.getKey(), offsetData)); } - return offsetsByTimes; + return offsetsResults; + } + + static Map buildOffsetsForTimesResult( + final Map timestampsToSearch, + final Map fetchedOffsets) { + return buildListOffsetsResult(timestampsToSearch, fetchedOffsets, + (topicPartition, offsetData) -> new OffsetAndTimestamp( + offsetData.offset, + offsetData.timestamp, + offsetData.leaderEpoch)); + } + + static Map buildOffsetsForTimeInternalResult( + final Map timestampsToSearch, + final Map fetchedOffsets) { + HashMap offsetsResults = new HashMap<>(timestampsToSearch.size()); + for (Map.Entry entry : timestampsToSearch.entrySet()) { + offsetsResults.put(entry.getKey(), null); + } + for (Map.Entry entry : fetchedOffsets.entrySet()) { + ListOffsetData offsetData = entry.getValue(); + offsetsResults.put(entry.getKey(), new OffsetAndTimestampInternal( + offsetData.offset, + offsetData.timestamp, + offsetData.leaderEpoch)); + } + return offsetsResults; } private Long offsetResetStrategyTimestamp(final TopicPartition partition) { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManager.java index c5156e9e0b..22e56111b4 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManager.java @@ -151,14 +151,13 @@ public NetworkClientDelegate.PollResult poll(final long currentTimeMs) { * found .The future will complete when the requests responses are received and * processed, following a call to {@link #poll(long)} */ - public CompletableFuture> fetchOffsets( - final Map timestampsToSearch, - final boolean requireTimestamps) { + public CompletableFuture> fetchOffsets( + Map timestampsToSearch, + boolean requireTimestamps) { if (timestampsToSearch.isEmpty()) { return CompletableFuture.completedFuture(Collections.emptyMap()); } metadata.addTransientTopics(OffsetFetcherUtils.topicsForPartitions(timestampsToSearch.keySet())); - ListOffsetsRequestState listOffsetsRequestState = new ListOffsetsRequestState( timestampsToSearch, requireTimestamps, @@ -175,10 +174,11 @@ public CompletableFuture> fetchOffsets( } }); - fetchOffsetsByTimes(timestampsToSearch, requireTimestamps, listOffsetsRequestState); - - return listOffsetsRequestState.globalResult.thenApply(result -> - OffsetFetcherUtils.buildOffsetsForTimesResult(timestampsToSearch, result.fetchedOffsets)); + prepareFetchOffsetsRequests(timestampsToSearch, requireTimestamps, listOffsetsRequestState); + return listOffsetsRequestState.globalResult.thenApply( + result -> OffsetFetcherUtils.buildOffsetsForTimeInternalResult( + timestampsToSearch, + result.fetchedOffsets)); } /** @@ -235,14 +235,9 @@ public CompletableFuture validatePositionsIfNeeded() { * Generate requests for partitions with known leaders. Update the listOffsetsRequestState by adding * partitions with unknown leader to the listOffsetsRequestState.remainingToSearch */ - private void fetchOffsetsByTimes(final Map timestampsToSearch, - final boolean requireTimestamps, - final ListOffsetsRequestState listOffsetsRequestState) { - if (timestampsToSearch.isEmpty()) { - // Early return if empty map to avoid wrongfully raising StaleMetadataException on - // empty grouping - return; - } + private void prepareFetchOffsetsRequests(final Map timestampsToSearch, + final boolean requireTimestamps, + final ListOffsetsRequestState listOffsetsRequestState) { try { List unsentRequests = buildListOffsetsRequests( timestampsToSearch, requireTimestamps, listOffsetsRequestState); @@ -263,7 +258,7 @@ public void onUpdate(ClusterResource clusterResource) { Map timestampsToSearch = new HashMap<>(requestState.remainingToSearch); requestState.remainingToSearch.clear(); - fetchOffsetsByTimes(timestampsToSearch, requestState.requireTimestamps, requestState); + prepareFetchOffsetsRequests(timestampsToSearch, requestState.requireTimestamps, requestState); }); } @@ -298,7 +293,7 @@ private List buildListOffsetsRequests( offsetFetcherUtils.updateSubscriptionState(multiNodeResult.fetchedOffsets, isolationLevel); - if (listOffsetsRequestState.remainingToSearch.size() == 0) { + if (listOffsetsRequestState.remainingToSearch.isEmpty()) { ListOffsetResult listOffsetResult = new ListOffsetResult(listOffsetsRequestState.fetchedOffsets, listOffsetsRequestState.remainingToSearch.keySet()); @@ -314,7 +309,6 @@ private List buildListOffsetsRequests( for (Map.Entry> entry : timestampsToSearchByNode.entrySet()) { Node node = entry.getKey(); - CompletableFuture partialResult = buildListOffsetRequestToNode( node, entry.getValue(), @@ -364,8 +358,7 @@ private CompletableFuture buildListOffsetRequestToNode( ListOffsetsResponse lor = (ListOffsetsResponse) response.responseBody(); log.trace("Received ListOffsetResponse {} from broker {}", lor, node); try { - ListOffsetResult listOffsetResult = - offsetFetcherUtils.handleListOffsetResponse(lor); + ListOffsetResult listOffsetResult = offsetFetcherUtils.handleListOffsetResponse(lor); result.complete(listOffsetResult); } catch (RuntimeException e) { result.completeExceptionally(e); @@ -423,11 +416,11 @@ private CompletableFuture sendListOffsetsRequestsAndResetPositions( }); }); - if (unsentRequests.size() > 0) { + if (unsentRequests.isEmpty()) { + globalResult.complete(null); + } else { expectedResponses.set(unsentRequests.size()); requestsToSend.addAll(unsentRequests); - } else { - globalResult.complete(null); } return globalResult; @@ -503,11 +496,11 @@ private CompletableFuture sendOffsetsForLeaderEpochRequestsAndValidatePosi }); }); - if (unsentRequests.size() > 0) { + if (unsentRequests.isEmpty()) { + globalResult.complete(null); + } else { expectedResponses.set(unsentRequests.size()); requestsToSend.addAll(unsentRequests); - } else { - globalResult.complete(null); } return globalResult; diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestFuture.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestFuture.java index 8a9d970182..9bba53e3d3 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestFuture.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestFuture.java @@ -92,7 +92,7 @@ public boolean failed() { } /** - * Check if the request is retriable (convenience method for checking if + * Check if the request is retriable. This is a convenience method for checking if * the exception is an instance of {@link RetriableException}. * @return true if it is retriable, false otherwise * @throws IllegalStateException if the future is not complete or completed successfully diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestManager.java index e37032835d..afbb7a19a5 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestManager.java @@ -81,7 +81,5 @@ default long maximumTimeToWait(long currentTimeMs) { /** * Signals the request manager that the consumer is closing to prepare for the proper actions to be taken. */ - default void signalClose() { - return; - } + default void signalClose() { } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestState.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestState.java index a888e7831a..fa9af73422 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestState.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestState.java @@ -31,6 +31,7 @@ class RequestState { protected long lastReceivedMs = -1; protected int numAttempts = 0; protected long backoffMs = 0; + private boolean requestInFlight = false; public RequestState(final LogContext logContext, final String owner, @@ -66,6 +67,7 @@ public RequestState(final LogContext logContext, * and the backoff is restored to its minimal configuration. */ public void reset() { + this.requestInFlight = false; this.lastSentMs = -1; this.lastReceivedMs = -1; this.numAttempts = 0; @@ -73,11 +75,6 @@ public void reset() { } public boolean canSendRequest(final long currentTimeMs) { - if (this.lastSentMs == -1) { - // no request has been sent - return true; - } - if (requestInFlight()) { log.trace("An inflight request already exists for {}", this); return false; @@ -98,10 +95,12 @@ public boolean canSendRequest(final long currentTimeMs) { * is a request in-flight. */ public boolean requestInFlight() { - return this.lastSentMs > -1 && this.lastReceivedMs < this.lastSentMs; + return requestInFlight; } public void onSendAttempt(final long currentTimeMs) { + this.requestInFlight = true; + // Here we update the timer everytime we try to send a request. this.lastSentMs = currentTimeMs; } @@ -114,6 +113,7 @@ public void onSendAttempt(final long currentTimeMs) { * @param currentTimeMs Current time in milliseconds */ public void onSuccessfulAttempt(final long currentTimeMs) { + this.requestInFlight = false; this.lastReceivedMs = currentTimeMs; this.backoffMs = exponentialBackoff.backoff(0); this.numAttempts = 0; @@ -127,6 +127,7 @@ public void onSuccessfulAttempt(final long currentTimeMs) { * @param currentTimeMs Current time in milliseconds */ public void onFailedAttempt(final long currentTimeMs) { + this.requestInFlight = false; this.lastReceivedMs = currentTimeMs; this.backoffMs = exponentialBackoff.backoff(numAttempts); this.numAttempts++; @@ -150,11 +151,12 @@ protected String toStringBase() { ", lastSentMs=" + lastSentMs + ", lastReceivedMs=" + lastReceivedMs + ", numAttempts=" + numAttempts + - ", backoffMs=" + backoffMs; + ", backoffMs=" + backoffMs + + ", requestInFlight=" + requestInFlight; } @Override public String toString() { - return "RequestState{" + toStringBase() + '}'; + return getClass().getSimpleName() + "{" + toStringBase() + '}'; } } \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java index 87d471d2e1..06183751f8 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java @@ -20,6 +20,7 @@ import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.NodeApiVersions; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; +import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.consumer.NoOffsetForPartitionException; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.OffsetResetStrategy; @@ -30,6 +31,7 @@ import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; +import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -672,7 +674,7 @@ public synchronized Optional preferredReadReplica(TopicPartition tp, lo * Unset the preferred read replica. This causes the fetcher to go back to the leader for fetches. * * @param tp The topic partition - * @return the removed preferred read replica if set, None otherwise. + * @return the removed preferred read replica if set, Empty otherwise. */ public synchronized Optional clearPreferredReadReplica(TopicPartition tp) { final TopicPartitionState topicPartitionState = assignedStateOrNull(tp); @@ -746,7 +748,7 @@ public synchronized boolean hasAllFetchPositions() { } public synchronized Set initializingPartitions() { - return collectPartitions(state -> state.fetchState.equals(FetchStates.INITIALIZING) && !state.pendingOnAssignedCallback); + return collectPartitions(TopicPartitionState::shouldInitialize); } private Set collectPartitions(Predicate filter) { @@ -759,11 +761,22 @@ private Set collectPartitions(Predicate fil return result; } - + /** + * Note: this will not attempt to reset partitions that are in the process of being assigned + * and are pending the completion of any {@link ConsumerRebalanceListener#onPartitionsAssigned(Collection)} + * callbacks. + * + *

    + * + * This method only appears to be invoked the by the {@link KafkaConsumer} during its + * {@link KafkaConsumer#poll(Duration)} logic. Direct calls to methods like + * {@link #requestOffsetReset(TopicPartition)}, {@link #requestOffsetResetIfPartitionAssigned(TopicPartition)}, + * etc. do not skip partitions pending assignment. + */ public synchronized void resetInitializingPositions() { final Set partitionsWithNoOffsets = new HashSet<>(); assignment.forEach((tp, partitionState) -> { - if (partitionState.fetchState.equals(FetchStates.INITIALIZING)) { + if (partitionState.shouldInitialize()) { if (defaultResetStrategy == OffsetResetStrategy.NONE) partitionsWithNoOffsets.add(tp); else @@ -1086,6 +1099,17 @@ private void resume() { this.paused = false; } + /** + * True if the partition is in {@link FetchStates#INITIALIZING} state. While in this + * state, a position for the partition can be retrieved (based on committed offsets or + * partitions offsets). + * Note that retrieving a position does not mean that we can start fetching from the + * partition (see {@link #isFetchable()}) + */ + private boolean shouldInitialize() { + return fetchState.equals(FetchStates.INITIALIZING); + } + private boolean isFetchable() { return !paused && !pendingRevocation && !pendingOnAssignedCallback && hasValidPosition(); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/WakeupTrigger.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/WakeupTrigger.java index 209d5e41be..4c70797dbf 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/WakeupTrigger.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/WakeupTrigger.java @@ -43,8 +43,12 @@ public void wakeup() { return new WakeupFuture(); } else if (task instanceof ActiveFuture) { ActiveFuture active = (ActiveFuture) task; - active.future().completeExceptionally(new WakeupException()); - return null; + boolean wasTriggered = active.future().completeExceptionally(new WakeupException()); + + // If the Future was *already* completed when we invoke completeExceptionally, the WakeupException + // will be ignored. If it was already completed, we then need to return a new WakeupFuture so that the + // next call to setActiveTask will throw the WakeupException. + return wasTriggered ? null : new WakeupFuture(); } else if (task instanceof FetchAction) { FetchAction fetchAction = (FetchAction) task; fetchAction.fetchBuffer().wakeup(); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/AbstractTopicMetadataEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/AbstractTopicMetadataEvent.java index 3347002cc6..9621e34ef5 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/AbstractTopicMetadataEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/AbstractTopicMetadataEvent.java @@ -17,14 +17,13 @@ package org.apache.kafka.clients.consumer.internals.events; import org.apache.kafka.common.PartitionInfo; -import org.apache.kafka.common.utils.Timer; import java.util.List; import java.util.Map; public abstract class AbstractTopicMetadataEvent extends CompletableApplicationEvent>> { - protected AbstractTopicMetadataEvent(final Type type, final Timer timer) { - super(type, timer); + protected AbstractTopicMetadataEvent(final Type type, final long deadlineMs) { + super(type, deadlineMs); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/AllTopicsMetadataEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/AllTopicsMetadataEvent.java index bda18e6421..8fe1702c85 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/AllTopicsMetadataEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/AllTopicsMetadataEvent.java @@ -16,11 +16,9 @@ */ package org.apache.kafka.clients.consumer.internals.events; -import org.apache.kafka.common.utils.Timer; - public class AllTopicsMetadataEvent extends AbstractTopicMetadataEvent { - public AllTopicsMetadataEvent(final Timer timer) { - super(Type.ALL_TOPICS_METADATA, timer); + public AllTopicsMetadataEvent(final long deadlineMs) { + super(Type.ALL_TOPICS_METADATA, deadlineMs); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventHandler.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventHandler.java index eac1cc3d62..1e082e1197 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventHandler.java @@ -23,7 +23,6 @@ import org.apache.kafka.common.internals.IdempotentCloser; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; -import org.apache.kafka.common.utils.Timer; import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; @@ -32,7 +31,6 @@ import java.util.Objects; import java.util.concurrent.BlockingQueue; import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; import java.util.function.Supplier; /** @@ -49,6 +47,7 @@ public class ApplicationEventHandler implements Closeable { public ApplicationEventHandler(final LogContext logContext, final Time time, final BlockingQueue applicationEventQueue, + final CompletableEventReaper applicationEventReaper, final Supplier applicationEventProcessorSupplier, final Supplier networkClientDelegateSupplier, final Supplier requestManagersSupplier) { @@ -56,6 +55,8 @@ public ApplicationEventHandler(final LogContext logContext, this.applicationEventQueue = applicationEventQueue; this.networkThread = new ConsumerNetworkThread(logContext, time, + applicationEventQueue, + applicationEventReaper, applicationEventProcessorSupplier, networkClientDelegateSupplier, requestManagersSupplier); @@ -71,7 +72,6 @@ public ApplicationEventHandler(final LogContext logContext, public void add(final ApplicationEvent event) { Objects.requireNonNull(event, "ApplicationEvent provided to add must be non-null"); applicationEventQueue.add(event); - log.trace("Enqueued event: {}", event); wakeupNetworkThread(); } @@ -100,17 +100,16 @@ public long maximumTimeToWait() { * *

    * - * See {@link ConsumerUtils#getResult(Future, Timer)} and {@link Future#get(long, TimeUnit)} for more details. + * See {@link ConsumerUtils#getResult(Future)} for more details. * * @param event A {@link CompletableApplicationEvent} created by the polling thread * @return Value that is the result of the event * @param Type of return value of the event */ - public T addAndGet(final CompletableApplicationEvent event, final Timer timer) { + public T addAndGet(final CompletableApplicationEvent event) { Objects.requireNonNull(event, "CompletableApplicationEvent provided to addAndGet must be non-null"); - Objects.requireNonNull(timer, "Timer provided to addAndGet must be non-null"); add(event); - return ConsumerUtils.getResult(event.future(), timer); + return ConsumerUtils.getResult(event.future()); } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java index 3382530746..7ee0c09d40 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java @@ -17,12 +17,12 @@ package org.apache.kafka.clients.consumer.internals.events; import org.apache.kafka.clients.consumer.OffsetAndMetadata; -import org.apache.kafka.clients.consumer.OffsetAndTimestamp; import org.apache.kafka.clients.consumer.internals.CachedSupplier; import org.apache.kafka.clients.consumer.internals.CommitRequestManager; import org.apache.kafka.clients.consumer.internals.ConsumerMetadata; import org.apache.kafka.clients.consumer.internals.ConsumerNetworkThread; import org.apache.kafka.clients.consumer.internals.MembershipManager; +import org.apache.kafka.clients.consumer.internals.OffsetAndTimestampInternal; import org.apache.kafka.clients.consumer.internals.RequestManagers; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.PartitionInfo; @@ -33,7 +33,6 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.concurrent.BlockingQueue; import java.util.concurrent.CompletableFuture; import java.util.function.BiConsumer; import java.util.function.Supplier; @@ -42,31 +41,20 @@ * An {@link EventProcessor} that is created and executes in the {@link ConsumerNetworkThread network thread} * which processes {@link ApplicationEvent application events} generated by the application thread. */ -public class ApplicationEventProcessor extends EventProcessor { +public class ApplicationEventProcessor implements EventProcessor { private final Logger log; private final ConsumerMetadata metadata; private final RequestManagers requestManagers; public ApplicationEventProcessor(final LogContext logContext, - final BlockingQueue applicationEventQueue, final RequestManagers requestManagers, final ConsumerMetadata metadata) { - super(logContext, applicationEventQueue); this.log = logContext.logger(ApplicationEventProcessor.class); this.requestManagers = requestManagers; this.metadata = metadata; } - /** - * Process the events—if any—that were produced by the application thread. It is possible that when processing - * an event generates an error. In such cases, the processor will log an exception, but we do not want those - * errors to be propagated to the caller. - */ - public boolean process() { - return process((event, error) -> error.ifPresent(e -> log.warn("Error processing event {}", e.getMessage(), e))); - } - @SuppressWarnings({"CyclomaticComplexity"}) @Override public void process(ApplicationEvent event) { @@ -197,10 +185,12 @@ private void process(final AssignmentChangeEvent event) { manager.maybeAutoCommitAsync(); } + /** + * Handles ListOffsetsEvent by fetching the offsets for the given partitions and timestamps. + */ private void process(final ListOffsetsEvent event) { - final CompletableFuture> future = - requestManagers.offsetsRequestManager.fetchOffsets(event.timestampsToSearch(), - event.requireTimestamps()); + final CompletableFuture> future = + requestManagers.offsetsRequestManager.fetchOffsets(event.timestampsToSearch(), event.requireTimestamps()); future.whenComplete(complete(event.future())); } @@ -271,7 +261,7 @@ private void process(final ConsumerRebalanceListenerCallbackCompletedEvent event manager.consumerRebalanceListenerCallbackCompleted(event); } - private void process(final CommitOnCloseEvent event) { + private void process(@SuppressWarnings("unused") final CommitOnCloseEvent event) { if (!requestManagers.commitRequestManager.isPresent()) return; log.debug("Signal CommitRequestManager closing"); @@ -307,7 +297,6 @@ private void process(final LeaveOnCloseEvent event) { */ public static Supplier supplier(final LogContext logContext, final ConsumerMetadata metadata, - final BlockingQueue applicationEventQueue, final Supplier requestManagersSupplier) { return new CachedSupplier() { @Override @@ -315,7 +304,6 @@ protected ApplicationEventProcessor create() { RequestManagers requestManagers = requestManagersSupplier.get(); return new ApplicationEventProcessor( logContext, - applicationEventQueue, requestManagers, metadata ); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/BackgroundEventHandler.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/BackgroundEventHandler.java index 48421484f1..f6ded0bf73 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/BackgroundEventHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/BackgroundEventHandler.java @@ -17,8 +17,6 @@ package org.apache.kafka.clients.consumer.internals.events; import org.apache.kafka.clients.consumer.internals.ConsumerNetworkThread; -import org.apache.kafka.common.utils.LogContext; -import org.slf4j.Logger; import java.util.Objects; import java.util.Queue; @@ -31,11 +29,9 @@ public class BackgroundEventHandler { - private final Logger log; private final Queue backgroundEventQueue; - public BackgroundEventHandler(final LogContext logContext, final Queue backgroundEventQueue) { - this.log = logContext.logger(BackgroundEventHandler.class); + public BackgroundEventHandler(final Queue backgroundEventQueue) { this.backgroundEventQueue = backgroundEventQueue; } @@ -47,6 +43,5 @@ public BackgroundEventHandler(final LogContext logContext, final Queue { */ private final Map offsets; - protected CommitEvent(final Type type, final Map offsets, final Timer timer) { - super(type, timer); - this.offsets = validate(offsets); - } - protected CommitEvent(final Type type, final Map offsets, final long deadlineMs) { super(type, deadlineMs); this.offsets = validate(offsets); @@ -62,4 +56,4 @@ public Map offsets() { protected String toStringBase() { return super.toStringBase() + ", offsets=" + offsets; } -} +} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableApplicationEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableApplicationEvent.java index dae9e9f101..dffac12902 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableApplicationEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableApplicationEvent.java @@ -16,9 +16,6 @@ */ package org.apache.kafka.clients.consumer.internals.events; -import org.apache.kafka.common.utils.Timer; - -import java.util.Objects; import java.util.concurrent.CompletableFuture; /** @@ -32,13 +29,9 @@ public abstract class CompletableApplicationEvent extends ApplicationEvent im private final CompletableFuture future; private final long deadlineMs; - protected CompletableApplicationEvent(final Type type, final Timer timer) { - super(type); - this.future = new CompletableFuture<>(); - Objects.requireNonNull(timer); - this.deadlineMs = timer.remainingMs() + timer.currentTimeMs(); - } - + /** + * Note: the {@code deadlineMs} is the future time of expiration, not a timeout. + */ protected CompletableApplicationEvent(final Type type, final long deadlineMs) { super(type); this.future = new CompletableFuture<>(); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableBackgroundEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableBackgroundEvent.java index 1a58515a5c..d02010496e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableBackgroundEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableBackgroundEvent.java @@ -27,10 +27,15 @@ public abstract class CompletableBackgroundEvent extends BackgroundEvent implements CompletableEvent { private final CompletableFuture future; + private final long deadlineMs; - protected CompletableBackgroundEvent(final Type type) { + /** + * Note: the {@code deadlineMs} is the future time of expiration, not a timeout. + */ + protected CompletableBackgroundEvent(final Type type, final long deadlineMs) { super(type); this.future = new CompletableFuture<>(); + this.deadlineMs = deadlineMs; } @Override @@ -38,8 +43,13 @@ public CompletableFuture future() { return future; } + @Override + public long deadlineMs() { + return deadlineMs; + } + @Override protected String toStringBase() { - return super.toStringBase() + ", future=" + future; + return super.toStringBase() + ", future=" + future + ", deadlineMs=" + deadlineMs; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEvent.java index 97559d8cb9..20231b0f99 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEvent.java @@ -16,9 +16,112 @@ */ package org.apache.kafka.clients.consumer.internals.events; +import org.apache.kafka.common.utils.Time; +import org.apache.kafka.common.utils.Timer; + +import java.time.Duration; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeoutException; + +import static java.util.Objects.requireNonNull; +/** + * {@code CompletableEvent} is an interface that is used by both {@link CompletableApplicationEvent} and + * {@link CompletableBackgroundEvent} for common processing and logic. A {@code CompletableEvent} is one that + * allows the caller to get the {@link #future() future} related to the event and the event's + * {@link #deadlineMs() expiration timestamp}. + * + * @param Return type for the event when completed + */ public interface CompletableEvent { + /** + * Returns the {@link CompletableFuture future} associated with this event. Any event will have some related + * logic that is executed on its behalf. The event can complete in one of the following ways: + * + *

      + *
    • + * Success: when the logic for the event completes successfully, the data generated by that event + * (if applicable) is passed to {@link CompletableFuture#complete(Object)}. In the case where the generic + * bound type is specified as {@link Void}, {@code null} is provided.
    • + *
    • + * Error: when the the event logic generates an error, the error is passed to + * {@link CompletableFuture#completeExceptionally(Throwable)}. + *
    • + *
    • + * Timeout: when the time spent executing the event logic exceeds the {@link #deadlineMs() deadline}, an + * instance of {@link TimeoutException} should be created and passed to + * {@link CompletableFuture#completeExceptionally(Throwable)}. This also occurs when an event remains + * incomplete when the consumer closes. + *
    • + *
    + * + * @return Future on which the caller may block or query for completion + * + * @see CompletableEventReaper + */ CompletableFuture future(); + + /** + * This is the deadline that represents the absolute wall clock time by which any event-specific execution should + * complete. This is not a timeout value. After this time has passed, + * {@link CompletableFuture#completeExceptionally(Throwable)} will be invoked with an instance of + * {@link TimeoutException}. + * + * @return Absolute time for event to be completed + * + * @see CompletableEventReaper + */ + long deadlineMs(); + + /** + * Calculate the deadline timestamp based on {@link Timer#currentTimeMs()} and {@link Timer#remainingMs()}. + * + * @param timer Timer + * + * @return Absolute time by which event should be completed + */ + static long calculateDeadlineMs(final Timer timer) { + requireNonNull(timer); + return calculateDeadlineMs(timer.currentTimeMs(), timer.remainingMs()); + } + + /** + * Calculate the deadline timestamp based on {@link Timer#currentTimeMs()} and {@link Duration#toMillis()}. + * + * @param time Time + * @param duration Duration + * + * @return Absolute time by which event should be completed + */ + static long calculateDeadlineMs(final Time time, final Duration duration) { + return calculateDeadlineMs(requireNonNull(time).milliseconds(), requireNonNull(duration).toMillis()); + } + + /** + * Calculate the deadline timestamp based on {@link Timer#currentTimeMs()} and timeout. + * + * @param time Time + * @param timeoutMs Timeout, in milliseconds + * + * @return Absolute time by which event should be completed + */ + static long calculateDeadlineMs(final Time time, final long timeoutMs) { + return calculateDeadlineMs(requireNonNull(time).milliseconds(), timeoutMs); + } + + /** + * Calculate the deadline timestamp based on the current time and timeout. + * + * @param currentTimeMs Current time, in milliseconds + * @param timeoutMs Timeout, in milliseconds + * + * @return Absolute time by which event should be completed + */ + static long calculateDeadlineMs(final long currentTimeMs, final long timeoutMs) { + if (currentTimeMs > Long.MAX_VALUE - timeoutMs) + return Long.MAX_VALUE; + else + return currentTimeMs + timeoutMs; + } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaper.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaper.java new file mode 100644 index 0000000000..545a03df8b --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaper.java @@ -0,0 +1,157 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer.internals.events; + +import org.apache.kafka.clients.consumer.internals.AsyncKafkaConsumer; +import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.common.utils.LogContext; +import org.slf4j.Logger; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.CompletableFuture; +import java.util.function.Consumer; + +/** + * {@code CompletableEventReaper} is responsible for tracking {@link CompletableEvent time-bound events} and removing + * any that exceed their {@link CompletableEvent#deadlineMs() deadline} (unless they've already completed). This + * mechanism is used by the {@link AsyncKafkaConsumer} to enforce the timeout provided by the user in its API + * calls (e.g. {@link AsyncKafkaConsumer#commitSync(Duration)}). + */ +public class CompletableEventReaper { + + private final Logger log; + + /** + * List of tracked events that are candidates for expiration. + */ + private final List> tracked; + + public CompletableEventReaper(LogContext logContext) { + this.log = logContext.logger(CompletableEventReaper.class); + this.tracked = new ArrayList<>(); + } + + /** + * Adds a new {@link CompletableEvent event} to track for later completion/expiration. + * + * @param event Event to track + */ + public void add(CompletableEvent event) { + tracked.add(Objects.requireNonNull(event, "Event to track must be non-null")); + } + + /** + * This method performs a two-step process to "complete" {@link CompletableEvent events} that have either expired + * or completed normally: + * + *
      + *
    1. + * For each tracked event which has exceeded its {@link CompletableEvent#deadlineMs() deadline}, an + * instance of {@link TimeoutException} is created and passed to + * {@link CompletableFuture#completeExceptionally(Throwable)}. + *
    2. + *
    3. + * For each tracked event of which its {@link CompletableEvent#future() future} is already in the + * {@link CompletableFuture#isDone() done} state, it will be removed from the list of tracked events. + *
    4. + *
    + * + *

    + * + * This method should be called at regular intervals, based upon the needs of the resource that owns the reaper. + * + * @param currentTimeMs Current time with which to compare against the + * {@link CompletableEvent#deadlineMs() expiration time} + */ + public void reap(long currentTimeMs) { + Consumer> expireEvent = event -> { + long pastDueMs = currentTimeMs - event.deadlineMs(); + TimeoutException error = new TimeoutException(String.format("%s was %s ms past its expiration of %s", event.getClass().getSimpleName(), pastDueMs, event.deadlineMs())); + + if (event.future().completeExceptionally(error)) { + log.debug("Event {} completed exceptionally since its expiration of {} passed {} ms ago", event, event.deadlineMs(), pastDueMs); + } else { + log.trace("Event {} not completed exceptionally since it was previously completed", event); + } + }; + + // First, complete (exceptionally) any events that have passed their deadline AND aren't already complete. + tracked.stream() + .filter(e -> !e.future().isDone()) + .filter(e -> currentTimeMs >= e.deadlineMs()) + .forEach(expireEvent); + // Second, remove any events that are already complete, just to make sure we don't hold references. This will + // include any events that finished successfully as well as any events we just completed exceptionally above. + tracked.removeIf(e -> e.future().isDone()); + } + + /** + * It is possible for the {@link AsyncKafkaConsumer#close() consumer to close} before completing the processing of + * all the events in the queue. In this case, we need to + * {@link CompletableFuture#completeExceptionally(Throwable) expire} any remaining events. + * + *

    + * + * Check each of the {@link #add(CompletableEvent) previously-added} {@link CompletableEvent completable events}, + * and for any that are incomplete, expire them. Also check the core event queue for any incomplete events and + * likewise expire them. + * + *

    + * + * Note: because this is called in the context of {@link AsyncKafkaConsumer#close() closing consumer}, + * don't take the deadline into consideration, just close it regardless. + * + * @param events Events from a queue that have not yet been tracked that also need to be reviewed + */ + public void reap(Collection events) { + Objects.requireNonNull(events, "Event queue to reap must be non-null"); + + Consumer> expireEvent = event -> { + TimeoutException error = new TimeoutException(String.format("%s could not be completed before the consumer closed", event.getClass().getSimpleName())); + + if (event.future().completeExceptionally(error)) { + log.debug("Event {} completed exceptionally since the consumer is closing", event); + } else { + log.trace("Event {} not completed exceptionally since it was completed prior to the consumer closing", event); + } + }; + + tracked.stream() + .filter(e -> !e.future().isDone()) + .forEach(expireEvent); + tracked.clear(); + + events.stream() + .filter(e -> e instanceof CompletableEvent) + .map(e -> (CompletableEvent) e) + .filter(e -> !e.future().isDone()) + .forEach(expireEvent); + events.clear(); + } + + public int size() { + return tracked.size(); + } + + public boolean contains(CompletableEvent event) { + return event != null && tracked.contains(event); + } +} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ConsumerRebalanceListenerCallbackNeededEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ConsumerRebalanceListenerCallbackNeededEvent.java index 6ce833580c..ecb9eedab2 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ConsumerRebalanceListenerCallbackNeededEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ConsumerRebalanceListenerCallbackNeededEvent.java @@ -39,7 +39,7 @@ public class ConsumerRebalanceListenerCallbackNeededEvent extends CompletableBac public ConsumerRebalanceListenerCallbackNeededEvent(final ConsumerRebalanceListenerMethodName methodName, final SortedSet partitions) { - super(Type.CONSUMER_REBALANCE_LISTENER_CALLBACK_NEEDED); + super(Type.CONSUMER_REBALANCE_LISTENER_CALLBACK_NEEDED, Long.MAX_VALUE); this.methodName = Objects.requireNonNull(methodName); this.partitions = Collections.unmodifiableSortedSet(partitions); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/EventProcessor.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/EventProcessor.java index 79a987e8a7..1c0bb03059 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/EventProcessor.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/EventProcessor.java @@ -16,120 +16,26 @@ */ package org.apache.kafka.clients.consumer.internals.events; -import org.apache.kafka.clients.consumer.internals.ConsumerUtils; -import org.apache.kafka.common.KafkaException; -import org.apache.kafka.common.internals.IdempotentCloser; -import org.apache.kafka.common.utils.LogContext; -import org.slf4j.Logger; - -import java.io.Closeable; -import java.util.LinkedList; -import java.util.List; -import java.util.Objects; -import java.util.Optional; import java.util.concurrent.BlockingQueue; /** - * An {@link EventProcessor} is the means by which events produced by thread A are - * processed by thread B. By definition, threads A and B run in parallel to - * each other, so a mechanism is needed with which to receive and process the events from the other thread. That - * communication channel is formed around {@link BlockingQueue a shared queue} into which thread A - * enqueues events and thread B reads and processes those events. + * An {@code EventProcessor} is the means by which events are processed, the meaning of which is left + * intentionally loose. This is in large part to keep the {@code EventProcessor} focused on what it means to process + * the events, and not linking itself too closely with the rest of the surrounding application. + * + *

    + * + * The {@code EventProcessor} is envisaged as a stateless service that acts as a conduit, receiving an event and + * dispatching to another block of code to process. The semantic meaning of each event is different, so the + * {@code EventProcessor} will need to interact with other parts of the system that maintain state. The + * implementation should not be concerned with the mechanism by which an event arrived for processing. While the + * events are shuffled around the consumer subsystem by means of {@link BlockingQueue shared queues}, it should + * be considered an anti-pattern to need to know how it arrived or what happens after its is processed. */ -public abstract class EventProcessor implements Closeable { - - private final Logger log; - private final BlockingQueue eventQueue; - private final IdempotentCloser closer; - - protected EventProcessor(final LogContext logContext, final BlockingQueue eventQueue) { - this.log = logContext.logger(EventProcessor.class); - this.eventQueue = eventQueue; - this.closer = new IdempotentCloser(); - } - - public abstract boolean process(); - - protected abstract void process(T event); - - @Override - public void close() { - closer.close(this::closeInternal, () -> log.warn("The event processor was already closed")); - } - - protected interface ProcessHandler { - - void onProcess(T event, Optional error); - } - - /** - * Drains all available events from the queue, and then processes them in order. If any errors are thrown while - * processing the individual events, these are submitted to the given {@link ProcessHandler}. - */ - protected boolean process(ProcessHandler processHandler) { - closer.assertOpen("The processor was previously closed, so no further processing can occur"); - - List events = drain(); - - if (events.isEmpty()) { - log.trace("No events to process"); - return false; - } - - try { - log.trace("Starting processing of {} event{}", events.size(), events.size() == 1 ? "" : "s"); - - for (T event : events) { - try { - Objects.requireNonNull(event, "Attempted to process a null event"); - log.trace("Processing event: {}", event); - process(event); - processHandler.onProcess(event, Optional.empty()); - } catch (Throwable t) { - KafkaException error = ConsumerUtils.maybeWrapAsKafkaException(t); - processHandler.onProcess(event, Optional.of(error)); - } - } - } finally { - log.trace("Completed processing"); - } - - return true; - } - - /** - * It is possible for the consumer to close before complete processing all the events in the queue. In - * this case, we need to throw an exception to notify the user the consumer is closed. - */ - private void closeInternal() { - log.trace("Closing event processor"); - List incompleteEvents = drain(); - - if (incompleteEvents.isEmpty()) - return; - - KafkaException exception = new KafkaException("The consumer is closed"); - - // Check each of the events and if it has a Future that is incomplete, complete it exceptionally. - incompleteEvents - .stream() - .filter(e -> e instanceof CompletableEvent) - .map(e -> ((CompletableEvent) e).future()) - .filter(f -> !f.isDone()) - .forEach(f -> { - log.debug("Completing {} with exception {}", f, exception.getMessage()); - f.completeExceptionally(exception); - }); - - log.debug("Discarding {} events because the consumer is closing", incompleteEvents.size()); - } +public interface EventProcessor { /** - * Moves all the events from the queue to the returned list. + * Process an event that is received. */ - private List drain() { - LinkedList events = new LinkedList<>(); - eventQueue.drainTo(events); - return events; - } + void process(T event); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/FetchCommittedOffsetsEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/FetchCommittedOffsetsEvent.java index 980a8f1104..785736791a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/FetchCommittedOffsetsEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/FetchCommittedOffsetsEvent.java @@ -18,7 +18,6 @@ import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.utils.Timer; import java.util.Collections; import java.util.Map; @@ -31,8 +30,8 @@ public class FetchCommittedOffsetsEvent extends CompletableApplicationEvent partitions; - public FetchCommittedOffsetsEvent(final Set partitions, final Timer timer) { - super(Type.FETCH_COMMITTED_OFFSETS, timer); + public FetchCommittedOffsetsEvent(final Set partitions, final long deadlineMs) { + super(Type.FETCH_COMMITTED_OFFSETS, deadlineMs); this.partitions = Collections.unmodifiableSet(partitions); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/LeaveOnCloseEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/LeaveOnCloseEvent.java index e77b4dfb28..647265a150 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/LeaveOnCloseEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/LeaveOnCloseEvent.java @@ -16,11 +16,9 @@ */ package org.apache.kafka.clients.consumer.internals.events; -import org.apache.kafka.common.utils.Timer; - public class LeaveOnCloseEvent extends CompletableApplicationEvent { - public LeaveOnCloseEvent(final Timer timer) { - super(Type.LEAVE_ON_CLOSE, timer); + public LeaveOnCloseEvent(final long deadlineMs) { + super(Type.LEAVE_ON_CLOSE, deadlineMs); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ListOffsetsEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ListOffsetsEvent.java index e218705846..e87a328d1b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ListOffsetsEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ListOffsetsEvent.java @@ -17,8 +17,8 @@ package org.apache.kafka.clients.consumer.internals.events; import org.apache.kafka.clients.consumer.OffsetAndTimestamp; +import org.apache.kafka.clients.consumer.internals.OffsetAndTimestampInternal; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.utils.Timer; import java.util.Collections; import java.util.HashMap; @@ -32,13 +32,14 @@ * {@link OffsetAndTimestamp} found (offset of the first message whose timestamp is greater than * or equals to the target timestamp) */ -public class ListOffsetsEvent extends CompletableApplicationEvent> { - +public class ListOffsetsEvent extends CompletableApplicationEvent> { private final Map timestampsToSearch; private final boolean requireTimestamps; - public ListOffsetsEvent(final Map timestampToSearch, final boolean requireTimestamps, final Timer timer) { - super(Type.LIST_OFFSETS, timer); + public ListOffsetsEvent(Map timestampToSearch, + long deadlineMs, + boolean requireTimestamps) { + super(Type.LIST_OFFSETS, deadlineMs); this.timestampsToSearch = Collections.unmodifiableMap(timestampToSearch); this.requireTimestamps = requireTimestamps; } @@ -49,11 +50,10 @@ public ListOffsetsEvent(final Map timestampToSearch, final * @return Map containing all the partitions the event was trying to get offsets for, and * null {@link OffsetAndTimestamp} as value */ - public Map emptyResult() { - HashMap offsetsByTimes = new HashMap<>(timestampsToSearch.size()); - for (Map.Entry entry : timestampsToSearch.entrySet()) - offsetsByTimes.put(entry.getKey(), null); - return offsetsByTimes; + public Map emptyResults() { + Map result = new HashMap<>(); + timestampsToSearch.keySet().forEach(tp -> result.put(tp, null)); + return result; } public Map timestampsToSearch() { @@ -67,8 +67,7 @@ public boolean requireTimestamps() { @Override public String toStringBase() { return super.toStringBase() + - ", timestampsToSearch=" + timestampsToSearch + - ", requireTimestamps=" + requireTimestamps; + ", timestampsToSearch=" + timestampsToSearch + + ", requireTimestamps=" + requireTimestamps; } - } \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ResetPositionsEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ResetPositionsEvent.java index 65893b62ec..86dbb80c0f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ResetPositionsEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ResetPositionsEvent.java @@ -17,8 +17,6 @@ package org.apache.kafka.clients.consumer.internals.events; -import org.apache.kafka.common.utils.Timer; - /** * Event for resetting offsets for all assigned partitions that require it. This is an * asynchronous event that generates ListOffsets requests, and completes by updating in-memory @@ -26,7 +24,7 @@ */ public class ResetPositionsEvent extends CompletableApplicationEvent { - public ResetPositionsEvent(final Timer timer) { - super(Type.RESET_POSITIONS, timer); + public ResetPositionsEvent(final long deadlineMs) { + super(Type.RESET_POSITIONS, deadlineMs); } } \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/SyncCommitEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/SyncCommitEvent.java index 87945616ea..7dc7a023a8 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/SyncCommitEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/SyncCommitEvent.java @@ -18,7 +18,6 @@ import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.utils.Timer; import java.util.Map; @@ -28,7 +27,7 @@ */ public class SyncCommitEvent extends CommitEvent { - public SyncCommitEvent(final Map offsets, final Timer timer) { - super(Type.COMMIT_SYNC, offsets, timer); + public SyncCommitEvent(final Map offsets, final long deadlineMs) { + super(Type.COMMIT_SYNC, offsets, deadlineMs); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/TopicMetadataEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/TopicMetadataEvent.java index 33e1270ce6..9758ae0efa 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/TopicMetadataEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/TopicMetadataEvent.java @@ -16,16 +16,14 @@ */ package org.apache.kafka.clients.consumer.internals.events; -import org.apache.kafka.common.utils.Timer; - import java.util.Objects; public class TopicMetadataEvent extends AbstractTopicMetadataEvent { private final String topic; - public TopicMetadataEvent(final String topic, final Timer timer) { - super(Type.TOPIC_METADATA, timer); + public TopicMetadataEvent(final String topic, final long deadlineMs) { + super(Type.TOPIC_METADATA, deadlineMs); this.topic = Objects.requireNonNull(topic); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/UnsubscribeEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/UnsubscribeEvent.java index 0b98837001..327feaa22f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/UnsubscribeEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/UnsubscribeEvent.java @@ -17,8 +17,6 @@ package org.apache.kafka.clients.consumer.internals.events; -import org.apache.kafka.common.utils.Timer; - /** * Application event triggered when a user calls the unsubscribe API. This will make the consumer * release all its assignments and send a heartbeat request to leave the consumer group. @@ -28,8 +26,8 @@ */ public class UnsubscribeEvent extends CompletableApplicationEvent { - public UnsubscribeEvent(final Timer timer) { - super(Type.UNSUBSCRIBE, timer); + public UnsubscribeEvent(final long deadlineMs) { + super(Type.UNSUBSCRIBE, deadlineMs); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ValidatePositionsEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ValidatePositionsEvent.java index 21e7f3cf6e..a93ff9859a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ValidatePositionsEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ValidatePositionsEvent.java @@ -17,8 +17,6 @@ package org.apache.kafka.clients.consumer.internals.events; -import org.apache.kafka.common.utils.Timer; - /** * Event for validating offsets for all assigned partitions for which a leader change has been * detected. This is an asynchronous event that generates OffsetForLeaderEpoch requests, and @@ -26,7 +24,7 @@ */ public class ValidatePositionsEvent extends CompletableApplicationEvent { - public ValidatePositionsEvent(final Timer timer) { - super(Type.VALIDATE_POSITIONS, timer); + public ValidatePositionsEvent(final long deadlineMs) { + super(Type.VALIDATE_POSITIONS, deadlineMs); } } \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java index e084233a0d..2147614319 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java @@ -41,6 +41,7 @@ import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.common.errors.ApiException; import org.apache.kafka.common.errors.AuthenticationException; @@ -250,7 +251,7 @@ public class KafkaProducer implements Producer { private final RecordAccumulator accumulator; private final Sender sender; private final Thread ioThread; - private final CompressionType compressionType; + private final Compression compression; private final Sensor errors; private final Time time; private final Serializer keySerializer; @@ -413,7 +414,7 @@ private void warnIfPartitionerDeprecated() { Arrays.asList(this.keySerializer, this.valueSerializer)); this.maxRequestSize = config.getInt(ProducerConfig.MAX_REQUEST_SIZE_CONFIG); this.totalMemorySize = config.getLong(ProducerConfig.BUFFER_MEMORY_CONFIG); - this.compressionType = CompressionType.forName(config.getString(ProducerConfig.COMPRESSION_TYPE_CONFIG)); + this.compression = configureCompression(config); this.maxBlockTimeMs = config.getLong(ProducerConfig.MAX_BLOCK_MS_CONFIG); int deliveryTimeoutMs = configureDeliveryTimeout(config, log); @@ -432,7 +433,7 @@ private void warnIfPartitionerDeprecated() { int batchSize = Math.max(1, config.getInt(ProducerConfig.BATCH_SIZE_CONFIG)); this.accumulator = new RecordAccumulator(logContext, batchSize, - this.compressionType, + compression, lingerMs(config), retryBackoffMs, retryBackoffMaxMs, @@ -501,7 +502,7 @@ private void warnIfPartitionerDeprecated() { this.interceptors = interceptors; this.maxRequestSize = config.getInt(ProducerConfig.MAX_REQUEST_SIZE_CONFIG); this.totalMemorySize = config.getLong(ProducerConfig.BUFFER_MEMORY_CONFIG); - this.compressionType = CompressionType.forName(config.getString(ProducerConfig.COMPRESSION_TYPE_CONFIG)); + this.compression = configureCompression(config); this.maxBlockTimeMs = config.getLong(ProducerConfig.MAX_BLOCK_MS_CONFIG); this.partitionerIgnoreKeys = config.getBoolean(ProducerConfig.PARTITIONER_IGNORE_KEYS_CONFIG); this.apiVersions = new ApiVersions(); @@ -548,6 +549,29 @@ Sender newSender(LogContext logContext, KafkaClient kafkaClient, ProducerMetadat apiVersions); } + private static Compression configureCompression(ProducerConfig config) { + CompressionType type = CompressionType.forName(config.getString(ProducerConfig.COMPRESSION_TYPE_CONFIG)); + switch (type) { + case GZIP: { + return Compression.gzip() + .level(config.getInt(ProducerConfig.COMPRESSION_GZIP_LEVEL_CONFIG)) + .build(); + } + case LZ4: { + return Compression.lz4() + .level(config.getInt(ProducerConfig.COMPRESSION_LZ4_LEVEL_CONFIG)) + .build(); + } + case ZSTD: { + return Compression.zstd() + .level(config.getInt(ProducerConfig.COMPRESSION_ZSTD_LEVEL_CONFIG)) + .build(); + } + default: + return Compression.of(type).build(); + } + } + private static int lingerMs(ProducerConfig config) { return (int) Math.min(config.getLong(ProducerConfig.LINGER_MS_CONFIG), Integer.MAX_VALUE); } @@ -852,9 +876,12 @@ public Future send(ProducerRecord record) { /** * Asynchronously send a record to a topic and invoke the provided callback when the send has been acknowledged. *

    - * The send is asynchronous and this method will return immediately once the record has been stored in the buffer of - * records waiting to be sent. This allows sending many records in parallel without blocking to wait for the - * response after each one. + * The send is asynchronous and this method will return immediately (except for rare cases described below) + * once the record has been stored in the buffer of records waiting to be sent. + * This allows sending many records in parallel without blocking to wait for the response after each one. + * Can block for the following cases: 1) For the first record being sent to + * the cluster by this client for the given topic. In this case it will block for up to {@code max.block.ms} milliseconds if + * Kafka cluster is unreachable; 2) Allocating a buffer if buffer pool doesn't have any free buffers. *

    * The result of the send is a {@link RecordMetadata} specifying the partition the record was sent to, the offset * it was assigned and the timestamp of the record. If the producer is configured with acks = 0, the {@link RecordMetadata} @@ -1030,7 +1057,7 @@ private Future doSend(ProducerRecord record, Callback call Header[] headers = record.headers().toArray(); int serializedSize = AbstractRecords.estimateSizeInBytesUpperBound(apiVersions.maxUsableProduceMagic(), - compressionType, serializedKey, serializedValue, headers); + compression.type(), serializedKey, serializedValue, headers); ensureValidRecordSize(serializedSize); long timestamp = record.timestamp() == null ? nowMs : record.timestamp(); diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java index 8f90b6684c..971ea0194c 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java @@ -119,7 +119,7 @@ public MockProducer(final Cluster cluster, /** * Create a new mock producer with invented metadata the given autoComplete setting and key\value serializers. * - * Equivalent to {@link #MockProducer(Cluster, boolean, Partitioner, Serializer, Serializer)} new MockProducer(Cluster.empty(), autoComplete, new DefaultPartitioner(), keySerializer, valueSerializer)} + * Equivalent to {@link #MockProducer(Cluster, boolean, Partitioner, Serializer, Serializer) new MockProducer(Cluster.empty(), autoComplete, new DefaultPartitioner(), keySerializer, valueSerializer)} */ @SuppressWarnings("deprecation") public MockProducer(final boolean autoComplete, @@ -131,7 +131,7 @@ public MockProducer(final boolean autoComplete, /** * Create a new mock producer with invented metadata the given autoComplete setting and key\value serializers. * - * Equivalent to {@link #MockProducer(Cluster, boolean, Partitioner, Serializer, Serializer)} new MockProducer(cluster, autoComplete, new DefaultPartitioner(), keySerializer, valueSerializer)} + * Equivalent to {@link #MockProducer(Cluster, boolean, Partitioner, Serializer, Serializer) new MockProducer(cluster, autoComplete, new DefaultPartitioner(), keySerializer, valueSerializer)} */ @SuppressWarnings("deprecation") public MockProducer(final Cluster cluster, @@ -144,7 +144,7 @@ public MockProducer(final Cluster cluster, /** * Create a new mock producer with invented metadata the given autoComplete setting, partitioner and key\value serializers. * - * Equivalent to {@link #MockProducer(Cluster, boolean, Partitioner, Serializer, Serializer)} new MockProducer(Cluster.empty(), autoComplete, partitioner, keySerializer, valueSerializer)} + * Equivalent to {@link #MockProducer(Cluster, boolean, Partitioner, Serializer, Serializer) new MockProducer(Cluster.empty(), autoComplete, partitioner, keySerializer, valueSerializer)} */ public MockProducer(final boolean autoComplete, final Partitioner partitioner, @@ -156,7 +156,7 @@ public MockProducer(final boolean autoComplete, /** * Create a new mock producer with invented metadata. * - * Equivalent to {@link #MockProducer(Cluster, boolean, Partitioner, Serializer, Serializer)} new MockProducer(Cluster.empty(), false, null, null, null)} + * Equivalent to {@link #MockProducer(Cluster, boolean, Partitioner, Serializer, Serializer) new MockProducer(Cluster.empty(), false, null, null, null)} */ public MockProducer() { this(Cluster.empty(), false, null, null, null); @@ -220,7 +220,7 @@ public void sendOffsetsToTransaction(Map offs throw this.sendOffsetsToTransactionException; } - if (offsets.size() == 0) { + if (offsets.isEmpty()) { return; } Map uncommittedOffsets = diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java index 9471b48aa4..c67d60a180 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java @@ -18,6 +18,9 @@ import org.apache.kafka.clients.ClientDnsLookup; import org.apache.kafka.clients.CommonClientConfigs; +import org.apache.kafka.common.compress.GzipCompression; +import org.apache.kafka.common.compress.Lz4Compression; +import org.apache.kafka.common.compress.ZstdCompression; import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigDef.Importance; @@ -225,6 +228,18 @@ public class ProducerConfig extends AbstractConfig { + " values are none, gzip, snappy, lz4, or zstd. " + "Compression is of full batches of data, so the efficacy of batching will also impact the compression ratio (more batching means better compression)."; + /** compression.gzip.level */ + public static final String COMPRESSION_GZIP_LEVEL_CONFIG = "compression.gzip.level"; + private static final String COMPRESSION_GZIP_LEVEL_DOC = "The compression level to use if " + COMPRESSION_TYPE_CONFIG + " is set to gzip."; + + /** compression.lz4.level */ + public static final String COMPRESSION_LZ4_LEVEL_CONFIG = "compression.lz4.level"; + private static final String COMPRESSION_LZ4_LEVEL_DOC = "The compression level to use if " + COMPRESSION_TYPE_CONFIG + " is set to lz4."; + + /** compression.zstd.level */ + public static final String COMPRESSION_ZSTD_LEVEL_CONFIG = "compression.zstd.level"; + private static final String COMPRESSION_ZSTD_LEVEL_DOC = "The compression level to use if " + COMPRESSION_TYPE_CONFIG + " is set to zstd."; + /** metrics.sample.window.ms */ public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG = CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG; @@ -364,6 +379,9 @@ public class ProducerConfig extends AbstractConfig { Importance.LOW, ACKS_DOC) .define(COMPRESSION_TYPE_CONFIG, Type.STRING, CompressionType.NONE.name, in(Utils.enumOptions(CompressionType.class)), Importance.HIGH, COMPRESSION_TYPE_DOC) + .define(COMPRESSION_GZIP_LEVEL_CONFIG, Type.INT, GzipCompression.DEFAULT_LEVEL, new GzipCompression.LevelValidator(), Importance.MEDIUM, COMPRESSION_GZIP_LEVEL_DOC) + .define(COMPRESSION_LZ4_LEVEL_CONFIG, Type.INT, Lz4Compression.DEFAULT_LEVEL, between(Lz4Compression.MIN_LEVEL, Lz4Compression.MAX_LEVEL), Importance.MEDIUM, COMPRESSION_LZ4_LEVEL_DOC) + .define(COMPRESSION_ZSTD_LEVEL_CONFIG, Type.INT, ZstdCompression.DEFAULT_LEVEL, between(ZstdCompression.MIN_LEVEL, ZstdCompression.MAX_LEVEL), Importance.MEDIUM, COMPRESSION_ZSTD_LEVEL_DOC) .define(BATCH_SIZE_CONFIG, Type.INT, 16384, atLeast(0), Importance.MEDIUM, BATCH_SIZE_DOC) .define(PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG, Type.BOOLEAN, true, Importance.LOW, PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_DOC) .define(PARTITIONER_AVAILABILITY_TIMEOUT_MS_CONFIG, Type.LONG, 0, atLeast(0), Importance.LOW, PARTITIONER_AVAILABILITY_TIMEOUT_MS_DOC) diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/BuiltInPartitioner.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/BuiltInPartitioner.java index a1493d2edd..31129b435d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/BuiltInPartitioner.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/BuiltInPartitioner.java @@ -76,7 +76,7 @@ private int nextPartition(Cluster cluster) { // We don't have stats to do adaptive partitioning (or it's disabled), just switch to the next // partition based on uniform distribution. List availablePartitions = cluster.availablePartitionsForTopic(topic); - if (availablePartitions.size() > 0) { + if (!availablePartitions.isEmpty()) { partition = availablePartitions.get(random % availablePartitions.size()).partition(); } else { // We don't have available partitions, just pick one among all partitions. diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerBatch.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerBatch.java index 391cc1b344..d21c3fa732 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerBatch.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerBatch.java @@ -99,7 +99,7 @@ public ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, lon this.retry = false; this.isSplitBatch = isSplitBatch; float compressionRatioEstimation = CompressionRatioEstimator.estimation(topicPartition.topic(), - recordsBuilder.compressionType()); + recordsBuilder.compression().type()); this.currentLeaderEpoch = OptionalInt.empty(); this.attemptsWhenLeaderLastChanged = 0; recordsBuilder.setEstimatedCompressionRatio(compressionRatioEstimation); @@ -131,9 +131,7 @@ boolean hasLeaderChangedForTheOngoingRetry() { boolean isRetry = attempts >= 1; if (!isRetry) return false; - if (attempts == attemptsWhenLeaderLastChanged) - return true; - return false; + return attempts == attemptsWhenLeaderLastChanged; } @@ -148,7 +146,7 @@ public FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, } else { this.recordsBuilder.append(timestamp, key, value, headers); this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), - recordsBuilder.compressionType(), key, value, headers)); + recordsBuilder.compression().type(), key, value, headers)); this.lastAppendTime = now; FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount, timestamp, @@ -174,7 +172,7 @@ private boolean tryAppendForSplit(long timestamp, ByteBuffer key, ByteBuffer val // No need to get the CRC. this.recordsBuilder.append(timestamp, key, value, headers); this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), - recordsBuilder.compressionType(), key, value, headers)); + recordsBuilder.compression().type(), key, value, headers)); FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount, timestamp, key == null ? -1 : key.remaining(), @@ -379,19 +377,19 @@ public Deque split(int splitBatchSize) { private ProducerBatch createBatchOffAccumulatorForRecord(Record record, int batchSize) { int initialSize = Math.max(AbstractRecords.estimateSizeInBytesUpperBound(magic(), - recordsBuilder.compressionType(), record.key(), record.value(), record.headers()), batchSize); + recordsBuilder.compression().type(), record.key(), record.value(), record.headers()), batchSize); ByteBuffer buffer = ByteBuffer.allocate(initialSize); // Note that we intentionally do not set producer state (producerId, epoch, sequence, and isTransactional) // for the newly created batch. This will be set when the batch is dequeued for sending (which is consistent // with how normal batches are handled). - MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, magic(), recordsBuilder.compressionType(), + MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, magic(), recordsBuilder.compression(), TimestampType.CREATE_TIME, 0L); return new ProducerBatch(topicPartition, builder, this.createdMs, true); } public boolean isCompressed() { - return recordsBuilder.compressionType() != CompressionType.NONE; + return recordsBuilder.compression().type() != CompressionType.NONE; } /** @@ -493,7 +491,7 @@ public void close() { recordsBuilder.close(); if (!recordsBuilder.isControlBatch()) { CompressionRatioEstimator.updateEstimation(topicPartition.topic(), - recordsBuilder.compressionType(), + recordsBuilder.compression().type(), (float) recordsBuilder.compressionRatio()); } reopened = false; diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerMetrics.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerMetrics.java index 030a23299b..ad270f6156 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerMetrics.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerMetrics.java @@ -28,16 +28,13 @@ public class ProducerMetrics { public final SenderMetricsRegistry senderMetrics; - private final Metrics metrics; public ProducerMetrics(Metrics metrics) { - this.metrics = metrics; - this.senderMetrics = new SenderMetricsRegistry(this.metrics); + this.senderMetrics = new SenderMetricsRegistry(metrics); } private List getAllTemplates() { - List l = new ArrayList<>(this.senderMetrics.allTemplates()); - return l; + return new ArrayList<>(this.senderMetrics.allTemplates()); } public static void main(String[] args) { diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java index 013ad32dc7..12e77b0d51 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java @@ -36,6 +36,7 @@ import org.apache.kafka.clients.MetadataSnapshot; import org.apache.kafka.clients.producer.Callback; import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.utils.ExponentialBackoff; import org.apache.kafka.common.utils.ProducerIdAndEpoch; import org.apache.kafka.common.Cluster; @@ -48,7 +49,6 @@ import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.record.AbstractRecords; import org.apache.kafka.common.record.CompressionRatioEstimator; -import org.apache.kafka.common.record.CompressionType; import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.record.MemoryRecordsBuilder; import org.apache.kafka.common.record.Record; @@ -74,7 +74,7 @@ public class RecordAccumulator { private final AtomicInteger flushesInProgress; private final AtomicInteger appendsInProgress; private final int batchSize; - private final CompressionType compression; + private final Compression compression; private final int lingerMs; private final ExponentialBackoff retryBackoff; private final int deliveryTimeoutMs; @@ -116,7 +116,7 @@ public class RecordAccumulator { */ public RecordAccumulator(LogContext logContext, int batchSize, - CompressionType compression, + Compression compression, int lingerMs, long retryBackoffMs, long retryBackoffMaxMs, @@ -176,7 +176,7 @@ public RecordAccumulator(LogContext logContext, */ public RecordAccumulator(LogContext logContext, int batchSize, - CompressionType compression, + Compression compression, int lingerMs, long retryBackoffMs, long retryBackoffMaxMs, @@ -344,7 +344,7 @@ public RecordAppendResult append(String topic, if (buffer == null) { byte maxUsableMagic = apiVersions.maxUsableProduceMagic(); - int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression, key, value, headers)); + int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression.type(), key, value, headers)); log.trace("Allocating a new {} byte message buffer for topic {} partition {} with remaining timeout {}ms", size, topic, partition, maxTimeToBlock); // This call may block if we exhausted buffer space. buffer = free.allocate(size, maxTimeToBlock); @@ -533,7 +533,7 @@ public int splitAndReenqueue(ProducerBatch bigBatch) { // Reset the estimated compression ratio to the initial value or the big batch compression ratio, whichever // is bigger. There are several different ways to do the reset. We chose the most conservative one to ensure // the split doesn't happen too often. - CompressionRatioEstimator.setEstimation(bigBatch.topicPartition.topic(), compression, + CompressionRatioEstimator.setEstimation(bigBatch.topicPartition.topic(), compression.type(), Math.max(1.0f, (float) bigBatch.compressionRatio())); Deque dq = bigBatch.split(this.batchSize); int numSplitBatches = dq.size(); diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/Sender.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/Sender.java index 99bc1d68b0..c4e2b73e8b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/Sender.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/Sender.java @@ -270,13 +270,14 @@ public void run() { while (!forceClose && transactionManager != null && transactionManager.hasOngoingTransaction()) { if (!transactionManager.isCompleting()) { log.info("Aborting incomplete transaction due to shutdown"); - try { // It is possible for the transaction manager to throw errors when aborting. Catch these // so as not to interfere with the rest of the shutdown logic. transactionManager.beginAbort(); } catch (Exception e) { - log.error("Error in kafka producer I/O thread while aborting transaction: ", e); + log.error("Error in kafka producer I/O thread while aborting transaction when during closing: ", e); + // Force close in case the transactionManager is in error states. + forceClose = true; } } try { diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/StickyPartitionCache.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/StickyPartitionCache.java index b432009261..f6312fda1b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/StickyPartitionCache.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/StickyPartitionCache.java @@ -51,8 +51,8 @@ public int nextPartition(String topic, Cluster cluster, int prevPartition) { // triggered the new batch matches the sticky partition that needs to be changed. if (oldPart == null || oldPart == prevPartition) { List availablePartitions = cluster.availablePartitionsForTopic(topic); - if (availablePartitions.size() < 1) { - Integer random = Utils.toPositive(ThreadLocalRandom.current().nextInt()); + if (availablePartitions.isEmpty()) { + int random = Utils.toPositive(ThreadLocalRandom.current().nextInt()); newPart = random % partitions.size(); } else if (availablePartitions.size() == 1) { newPart = availablePartitions.get(0).partition(); diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java index 4ff54b1759..fab24c5e5e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java @@ -1330,7 +1330,7 @@ public void handleResponse(AbstractResponse response) { // We could still receive INVALID_PRODUCER_EPOCH from old versioned transaction coordinator, // just treat it the same as PRODUCE_FENCED. fatalError(Errors.PRODUCER_FENCED.exception()); - } else if (error == Errors.ABORTABLE_TRANSACTION) { + } else if (error == Errors.TRANSACTION_ABORTABLE) { abortableError(error.exception()); } else { fatalError(new KafkaException("Unexpected error in InitProducerIdResponse; " + error.message())); @@ -1401,7 +1401,7 @@ public void handleResponse(AbstractResponse response) { } else if (error == Errors.UNKNOWN_PRODUCER_ID || error == Errors.INVALID_PRODUCER_ID_MAPPING) { abortableErrorIfPossible(error.exception()); return; - } else if (error == Errors.ABORTABLE_TRANSACTION) { + } else if (error == Errors.TRANSACTION_ABORTABLE) { abortableError(error.exception()); return; } else { @@ -1507,7 +1507,7 @@ public void handleResponse(AbstractResponse response) { fatalError(error.exception()); } else if (error == Errors.GROUP_AUTHORIZATION_FAILED) { abortableError(GroupAuthorizationException.forGroupId(key)); - } else if (error == Errors.ABORTABLE_TRANSACTION) { + } else if (error == Errors.TRANSACTION_ABORTABLE) { abortableError(error.exception()); } else { fatalError(new KafkaException(String.format("Could not find a coordinator with type %s with key %s due to " + @@ -1562,7 +1562,7 @@ public void handleResponse(AbstractResponse response) { fatalError(error.exception()); } else if (error == Errors.UNKNOWN_PRODUCER_ID || error == Errors.INVALID_PRODUCER_ID_MAPPING) { abortableErrorIfPossible(error.exception()); - } else if (error == Errors.ABORTABLE_TRANSACTION) { + } else if (error == Errors.TRANSACTION_ABORTABLE) { abortableError(error.exception()); } else { fatalError(new KafkaException("Unhandled error in EndTxnResponse: " + error.message())); @@ -1622,7 +1622,7 @@ public void handleResponse(AbstractResponse response) { fatalError(error.exception()); } else if (error == Errors.GROUP_AUTHORIZATION_FAILED) { abortableError(GroupAuthorizationException.forGroupId(builder.data.groupId())); - } else if (error == Errors.ABORTABLE_TRANSACTION) { + } else if (error == Errors.TRANSACTION_ABORTABLE) { abortableError(error.exception()); } else { fatalError(new KafkaException("Unexpected error in AddOffsetsToTxnResponse: " + error.message())); @@ -1687,7 +1687,7 @@ public void handleResponse(AbstractResponse response) { abortableError(GroupAuthorizationException.forGroupId(builder.data.groupId())); break; } else if (error == Errors.FENCED_INSTANCE_ID || - error == Errors.ABORTABLE_TRANSACTION) { + error == Errors.TRANSACTION_ABORTABLE) { abortableError(error.exception()); break; } else if (error == Errors.UNKNOWN_MEMBER_ID diff --git a/clients/src/main/java/org/apache/kafka/common/Cluster.java b/clients/src/main/java/org/apache/kafka/common/Cluster.java index 84b77ef5f4..820adbdb5f 100644 --- a/clients/src/main/java/org/apache/kafka/common/Cluster.java +++ b/clients/src/main/java/org/apache/kafka/common/Cluster.java @@ -385,12 +385,13 @@ public boolean equals(Object o) { Objects.equals(internalTopics, cluster.internalTopics) && Objects.equals(controller, cluster.controller) && Objects.equals(partitionsByTopicPartition, cluster.partitionsByTopicPartition) && - Objects.equals(clusterResource, cluster.clusterResource); + Objects.equals(clusterResource, cluster.clusterResource) && + Objects.equals(topicIds, cluster.topicIds); } @Override public int hashCode() { return Objects.hash(isBootstrapConfigured, nodes, unauthorizedTopics, invalidTopics, internalTopics, controller, - partitionsByTopicPartition, clusterResource); + partitionsByTopicPartition, clusterResource, topicIds); } } diff --git a/clients/src/main/java/org/apache/kafka/common/PartitionInfo.java b/clients/src/main/java/org/apache/kafka/common/PartitionInfo.java index 29bedff0cf..0577b4200a 100644 --- a/clients/src/main/java/org/apache/kafka/common/PartitionInfo.java +++ b/clients/src/main/java/org/apache/kafka/common/PartitionInfo.java @@ -16,6 +16,9 @@ */ package org.apache.kafka.common; +import java.util.Arrays; +import java.util.Objects; + /** * This is used to describe per-partition state in the MetadataResponse. */ @@ -88,6 +91,29 @@ public Node[] offlineReplicas() { return offlineReplicas; } + @Override + public int hashCode() { + return Objects.hash(topic, partition, leader, Arrays.hashCode(replicas), + Arrays.hashCode(inSyncReplicas), Arrays.hashCode(offlineReplicas)); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + PartitionInfo other = (PartitionInfo) obj; + return Objects.equals(topic, other.topic) && + partition == other.partition && + Objects.equals(leader, other.leader) && + Objects.deepEquals(replicas, other.replicas) && + Objects.deepEquals(inSyncReplicas, other.inSyncReplicas) && + Objects.deepEquals(offlineReplicas, other.offlineReplicas); + } + @Override public String toString() { return String.format("Partition(topic = %s, partition = %d, leader = %s, replicas = %s, isr = %s, offlineReplicas = %s)", diff --git a/clients/src/main/java/org/apache/kafka/common/TopicPartitionInfo.java b/clients/src/main/java/org/apache/kafka/common/TopicPartitionInfo.java index 60b5d37b04..73d7a27d94 100644 --- a/clients/src/main/java/org/apache/kafka/common/TopicPartitionInfo.java +++ b/clients/src/main/java/org/apache/kafka/common/TopicPartitionInfo.java @@ -17,11 +17,10 @@ package org.apache.kafka.common; -import org.apache.kafka.common.utils.Utils; - import java.util.Collections; import java.util.List; import java.util.Objects; +import java.util.stream.Collectors; /** * A class containing leadership, replicas and ISR information for a topic partition. @@ -31,6 +30,8 @@ public class TopicPartitionInfo { private final Node leader; private final List replicas; private final List isr; + private final List elr; + private final List lastKnownElr; /** * Create an instance of this class with the provided parameters. @@ -40,12 +41,32 @@ public class TopicPartitionInfo { * @param replicas the replicas of the partition in the same order as the replica assignment (the preferred replica * is the head of the list) * @param isr the in-sync replicas + * @param elr the eligible leader replicas + * @param lastKnownElr the last known eligible leader replicas. */ + public TopicPartitionInfo( + int partition, + Node leader, + List replicas, + List isr, + List elr, + List lastKnownElr + ) { + this.partition = partition; + this.leader = leader; + this.replicas = Collections.unmodifiableList(replicas); + this.isr = Collections.unmodifiableList(isr); + this.elr = Collections.unmodifiableList(elr); + this.lastKnownElr = Collections.unmodifiableList(lastKnownElr); + } + public TopicPartitionInfo(int partition, Node leader, List replicas, List isr) { this.partition = partition; this.leader = leader; this.replicas = Collections.unmodifiableList(replicas); this.isr = Collections.unmodifiableList(isr); + this.elr = null; + this.lastKnownElr = null; } /** @@ -79,9 +100,26 @@ public List isr() { return isr; } + /** + * Return the eligible leader replicas of the partition. Note that the ordering of the result is unspecified. + */ + public List elr() { + return elr; + } + + /** + * Return the last known eligible leader replicas of the partition. Note that the ordering of the result is unspecified. + */ + public List lastKnownElr() { + return lastKnownElr; + } + public String toString() { + String elrString = elr != null ? elr.stream().map(Node::toString).collect(Collectors.joining(", ")) : "N/A"; + String lastKnownElrString = lastKnownElr != null ? lastKnownElr.stream().map(Node::toString).collect(Collectors.joining(", ")) : "N/A"; return "(partition=" + partition + ", leader=" + leader + ", replicas=" + - Utils.join(replicas, ", ") + ", isr=" + Utils.join(isr, ", ") + ")"; + replicas.stream().map(Node::toString).collect(Collectors.joining(", ")) + ", isr=" + isr.stream().map(Node::toString).collect(Collectors.joining(", ")) + + ", elr=" + elrString + ", lastKnownElr=" + lastKnownElrString + ")"; } @Override @@ -94,7 +132,9 @@ public boolean equals(Object o) { return partition == that.partition && Objects.equals(leader, that.leader) && Objects.equals(replicas, that.replicas) && - Objects.equals(isr, that.isr); + Objects.equals(isr, that.isr) && + Objects.equals(elr, that.elr) && + Objects.equals(lastKnownElr, that.lastKnownElr); } @Override @@ -103,6 +143,8 @@ public int hashCode() { result = 31 * result + (leader != null ? leader.hashCode() : 0); result = 31 * result + (replicas != null ? replicas.hashCode() : 0); result = 31 * result + (isr != null ? isr.hashCode() : 0); + result = 31 * result + (elr != null ? elr.hashCode() : 0); + result = 31 * result + (lastKnownElr != null ? lastKnownElr.hashCode() : 0); return result; } } diff --git a/clients/src/main/java/org/apache/kafka/common/compress/Compression.java b/clients/src/main/java/org/apache/kafka/common/compress/Compression.java new file mode 100644 index 0000000000..866f7c67e1 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/compress/Compression.java @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.compress; + +import org.apache.kafka.common.record.CompressionType; +import org.apache.kafka.common.utils.BufferSupplier; +import org.apache.kafka.common.utils.ByteBufferOutputStream; + +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; + +public interface Compression { + + /** + * The compression type for this compression codec + */ + CompressionType type(); + + /** + * Wrap bufferStream with an OutputStream that will compress data with this Compression. + * + * @param bufferStream The buffer to write the compressed data to + * @param messageVersion The record format version to use. + * Note: Unlike {@link #wrapForInput}, this cannot take {@link ByteBuffer}s directly. + * Currently, MemoryRecordsBuilder writes to the underlying buffer in the given {@link ByteBufferOutputStream} after the compressed data has been written. + * In the event that the buffer needs to be expanded while writing the data, access to the underlying buffer needs to be preserved. + */ + OutputStream wrapForOutput(ByteBufferOutputStream bufferStream, byte messageVersion); + + /** + * Wrap buffer with an InputStream that will decompress data with this Compression. + * + * @param buffer The {@link ByteBuffer} instance holding the data to decompress. + * @param messageVersion The record format version to use. + * @param decompressionBufferSupplier The supplier of ByteBuffer(s) used for decompression if supported. + * For small record batches, allocating a potentially large buffer (64 KB for LZ4) + * will dominate the cost of decompressing and iterating over the records in the + * batch. As such, a supplier that reuses buffers will have a significant + * performance impact. + */ + InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier); + + /** + * Recommended size of buffer for storing decompressed output. + */ + default int decompressionOutputSize() { + throw new UnsupportedOperationException("Size of decompression buffer is not defined for this compression type=" + type().name); + } + + interface Builder { + T build(); + } + + static Builder of(final String compressionName) { + CompressionType compressionType = CompressionType.forName(compressionName); + return of(compressionType); + } + + static Builder of(final CompressionType compressionType) { + switch (compressionType) { + case NONE: + return none(); + case GZIP: + return gzip(); + case SNAPPY: + return snappy(); + case LZ4: + return lz4(); + case ZSTD: + return zstd(); + default: + throw new IllegalArgumentException("Unknown compression type: " + compressionType.name); + } + } + + NoCompression NONE = none().build(); + + static NoCompression.Builder none() { + return new NoCompression.Builder(); + } + + static GzipCompression.Builder gzip() { + return new GzipCompression.Builder(); + } + + static SnappyCompression.Builder snappy() { + return new SnappyCompression.Builder(); + } + + static Lz4Compression.Builder lz4() { + return new Lz4Compression.Builder(); + } + + static ZstdCompression.Builder zstd() { + return new ZstdCompression.Builder(); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/compress/GzipCompression.java b/clients/src/main/java/org/apache/kafka/common/compress/GzipCompression.java new file mode 100644 index 0000000000..52e38700c4 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/compress/GzipCompression.java @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.compress; + +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.common.config.ConfigException; +import org.apache.kafka.common.record.CompressionType; +import org.apache.kafka.common.utils.BufferSupplier; +import org.apache.kafka.common.utils.ByteBufferInputStream; +import org.apache.kafka.common.utils.ByteBufferOutputStream; +import org.apache.kafka.common.utils.ChunkedBytesStream; + +import java.io.BufferedOutputStream; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.util.Objects; +import java.util.zip.Deflater; +import java.util.zip.GZIPInputStream; + +public class GzipCompression implements Compression { + + public static final int MIN_LEVEL = Deflater.BEST_SPEED; + public static final int MAX_LEVEL = Deflater.BEST_COMPRESSION; + public static final int DEFAULT_LEVEL = Deflater.DEFAULT_COMPRESSION; + + private final int level; + + private GzipCompression(int level) { + this.level = level; + } + + @Override + public CompressionType type() { + return CompressionType.GZIP; + } + + @Override + public OutputStream wrapForOutput(ByteBufferOutputStream buffer, byte messageVersion) { + try { + // Set input buffer (uncompressed) to 16 KB (none by default) and output buffer (compressed) to + // 8 KB (0.5 KB by default) to ensure reasonable performance in cases where the caller passes a small + // number of bytes to write (potentially a single byte) + return new BufferedOutputStream(new GzipOutputStream(buffer, 8 * 1024, level), 16 * 1024); + } catch (Exception e) { + throw new KafkaException(e); + } + } + + @Override + public InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { + try { + // Set input buffer (compressed) to 8 KB (GZIPInputStream uses 0.5 KB by default) to ensure reasonable + // performance in cases where the caller reads a small number of bytes (potentially a single byte). + // + // Size of output buffer (uncompressed) is provided by decompressionOutputSize. + // + // ChunkedBytesStream is used to wrap the GZIPInputStream because the default implementation of + // GZIPInputStream does not use an intermediate buffer for decompression in chunks. + return new ChunkedBytesStream(new GZIPInputStream(new ByteBufferInputStream(buffer), 8 * 1024), + decompressionBufferSupplier, + decompressionOutputSize(), + false); + } catch (Exception e) { + throw new KafkaException(e); + } + } + + @Override + public int decompressionOutputSize() { + // 16KB has been chosen based on legacy implementation introduced in https://github.com/apache/kafka/pull/6785 + return 16 * 1024; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GzipCompression that = (GzipCompression) o; + return level == that.level; + } + + @Override + public int hashCode() { + return Objects.hash(level); + } + + public static class Builder implements Compression.Builder { + private int level = DEFAULT_LEVEL; + + public Builder level(int level) { + if ((level < MIN_LEVEL || MAX_LEVEL < level) && level != DEFAULT_LEVEL) { + throw new IllegalArgumentException("gzip doesn't support given compression level: " + level); + } + + this.level = level; + return this; + } + + @Override + public GzipCompression build() { + return new GzipCompression(level); + } + } + + public static class LevelValidator implements ConfigDef.Validator { + + @Override + public void ensureValid(String name, Object o) { + if (o == null) + throw new ConfigException(name, null, "Value must be non-null"); + int level = ((Number) o).intValue(); + if (level > MAX_LEVEL || (level < MIN_LEVEL && level != DEFAULT_LEVEL)) { + throw new ConfigException(name, o, "Value must be between " + MIN_LEVEL + " and " + MAX_LEVEL + " or equal to " + DEFAULT_LEVEL); + } + } + + @Override + public String toString() { + return "[" + MIN_LEVEL + ",...," + MAX_LEVEL + "] or " + DEFAULT_LEVEL; + } + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/compress/GzipOutputStream.java b/clients/src/main/java/org/apache/kafka/common/compress/GzipOutputStream.java new file mode 100644 index 0000000000..f6ac4fde35 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/compress/GzipOutputStream.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.compress; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.zip.GZIPOutputStream; + +/** + * An extension of {@link GZIPOutputStream}, with compression level functionality. + */ +public class GzipOutputStream extends GZIPOutputStream { + /** + * Creates a new {@link OutputStream} with the specified buffer size and compression level. + * + * @param out the output stream + * @param size the output buffer size + * @param level the compression level + * @throws IOException If an I/O error has occurred. + */ + public GzipOutputStream(OutputStream out, int size, int level) throws IOException { + super(out, size); + setLevel(level); + } + + /** + * Sets the compression level. + * + * @param level the compression level + */ + private void setLevel(int level) { + def.setLevel(level); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/compress/KafkaLZ4BlockInputStream.java b/clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockInputStream.java similarity index 95% rename from clients/src/main/java/org/apache/kafka/common/compress/KafkaLZ4BlockInputStream.java rename to clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockInputStream.java index 037af8c8dc..e2c9ef8a63 100644 --- a/clients/src/main/java/org/apache/kafka/common/compress/KafkaLZ4BlockInputStream.java +++ b/clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockInputStream.java @@ -23,8 +23,8 @@ import net.jpountz.xxhash.XXHash32; import net.jpountz.xxhash.XXHashFactory; -import org.apache.kafka.common.compress.KafkaLZ4BlockOutputStream.BD; -import org.apache.kafka.common.compress.KafkaLZ4BlockOutputStream.FLG; +import org.apache.kafka.common.compress.Lz4BlockOutputStream.BD; +import org.apache.kafka.common.compress.Lz4BlockOutputStream.FLG; import org.apache.kafka.common.utils.BufferSupplier; import java.io.IOException; @@ -32,8 +32,8 @@ import java.nio.ByteBuffer; import java.nio.ByteOrder; -import static org.apache.kafka.common.compress.KafkaLZ4BlockOutputStream.LZ4_FRAME_INCOMPRESSIBLE_MASK; -import static org.apache.kafka.common.compress.KafkaLZ4BlockOutputStream.MAGIC; +import static org.apache.kafka.common.compress.Lz4BlockOutputStream.LZ4_FRAME_INCOMPRESSIBLE_MASK; +import static org.apache.kafka.common.compress.Lz4BlockOutputStream.MAGIC; /** * A partial implementation of the v1.5.1 LZ4 Frame format. @@ -42,7 +42,7 @@ * * This class is not thread-safe. */ -public final class KafkaLZ4BlockInputStream extends InputStream { +public final class Lz4BlockInputStream extends InputStream { public static final String PREMATURE_EOS = "Stream ended prematurely"; public static final String NOT_SUPPORTED = "Stream unsupported (invalid magic bytes)"; @@ -86,7 +86,7 @@ public final class KafkaLZ4BlockInputStream extends InputStream { * @param ignoreFlagDescriptorChecksum for compatibility with old kafka clients, ignore incorrect HC byte * @throws IOException */ - public KafkaLZ4BlockInputStream(ByteBuffer in, BufferSupplier bufferSupplier, boolean ignoreFlagDescriptorChecksum) throws IOException { + public Lz4BlockInputStream(ByteBuffer in, BufferSupplier bufferSupplier, boolean ignoreFlagDescriptorChecksum) throws IOException { if (BROKEN_LZ4_EXCEPTION != null) { throw BROKEN_LZ4_EXCEPTION; } diff --git a/clients/src/main/java/org/apache/kafka/common/compress/KafkaLZ4BlockOutputStream.java b/clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockOutputStream.java similarity index 85% rename from clients/src/main/java/org/apache/kafka/common/compress/KafkaLZ4BlockOutputStream.java rename to clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockOutputStream.java index 5c5aee416f..97e370a383 100644 --- a/clients/src/main/java/org/apache/kafka/common/compress/KafkaLZ4BlockOutputStream.java +++ b/clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockOutputStream.java @@ -33,18 +33,14 @@ * * This class is not thread-safe. */ -public final class KafkaLZ4BlockOutputStream extends OutputStream { +public final class Lz4BlockOutputStream extends OutputStream { public static final int MAGIC = 0x184D2204; - public static final int LZ4_MAX_HEADER_LENGTH = 19; public static final int LZ4_FRAME_INCOMPRESSIBLE_MASK = 0x80000000; public static final String CLOSED_STREAM = "The stream is already closed"; public static final int BLOCKSIZE_64KB = 4; - public static final int BLOCKSIZE_256KB = 5; - public static final int BLOCKSIZE_1MB = 6; - public static final int BLOCKSIZE_4MB = 7; private final LZ4Compressor compressor; private final XXHash32 checksum; @@ -64,15 +60,22 @@ public final class KafkaLZ4BlockOutputStream extends OutputStream { * @param out The output stream to compress * @param blockSize Default: 4. The block size used during compression. 4=64kb, 5=256kb, 6=1mb, 7=4mb. All other * values will generate an exception + * @param level The compression level to use * @param blockChecksum Default: false. When true, a XXHash32 checksum is computed and appended to the stream for * every block of data * @param useBrokenFlagDescriptorChecksum Default: false. When true, writes an incorrect FrameDescriptor checksum * compatible with older kafka clients. * @throws IOException */ - public KafkaLZ4BlockOutputStream(OutputStream out, int blockSize, boolean blockChecksum, boolean useBrokenFlagDescriptorChecksum) throws IOException { + public Lz4BlockOutputStream(OutputStream out, int blockSize, int level, boolean blockChecksum, boolean useBrokenFlagDescriptorChecksum) throws IOException { this.out = out; - compressor = LZ4Factory.fastestInstance().fastCompressor(); + /* + * lz4-java provides two types of compressors; fastCompressor, which requires less memory but fast compression speed (with default compression level only), + * and highCompressor which requires more memory and slower speed but compresses more efficiently (with various compression level). + * + * For backward compatibility, Lz4BlockOutputStream uses fastCompressor with default compression level but, with the other level, it uses highCompressor. + */ + compressor = level == Lz4Compression.DEFAULT_LEVEL ? LZ4Factory.fastestInstance().fastCompressor() : LZ4Factory.fastestInstance().highCompressor(level); checksum = XXHashFactory.fastestInstance().hash32(); this.useBrokenFlagDescriptorChecksum = useBrokenFlagDescriptorChecksum; bd = new BD(blockSize); @@ -89,40 +92,14 @@ public KafkaLZ4BlockOutputStream(OutputStream out, int blockSize, boolean blockC * Create a new {@link OutputStream} that will compress data using the LZ4 algorithm. * * @param out The output stream to compress - * @param blockSize Default: 4. The block size used during compression. 4=64kb, 5=256kb, 6=1mb, 7=4mb. All other - * values will generate an exception - * @param blockChecksum Default: false. When true, a XXHash32 checksum is computed and appended to the stream for - * every block of data - * @throws IOException - */ - public KafkaLZ4BlockOutputStream(OutputStream out, int blockSize, boolean blockChecksum) throws IOException { - this(out, blockSize, blockChecksum, false); - } - - /** - * Create a new {@link OutputStream} that will compress data using the LZ4 algorithm. - * - * @param out The stream to compress - * @param blockSize Default: 4. The block size used during compression. 4=64kb, 5=256kb, 6=1mb, 7=4mb. All other - * values will generate an exception - * @throws IOException - */ - public KafkaLZ4BlockOutputStream(OutputStream out, int blockSize) throws IOException { - this(out, blockSize, false, false); - } - - /** - * Create a new {@link OutputStream} that will compress data using the LZ4 algorithm. - * - * @param out The output stream to compress + * @param level The compression level to use + * @param useBrokenFlagDescriptorChecksum Default: false. When true, writes an incorrect FrameDescriptor checksum + * compatible with older kafka clients. * @throws IOException */ - public KafkaLZ4BlockOutputStream(OutputStream out) throws IOException { - this(out, BLOCKSIZE_64KB); - } - public KafkaLZ4BlockOutputStream(OutputStream out, boolean useBrokenHC) throws IOException { - this(out, BLOCKSIZE_64KB, false, useBrokenHC); + public Lz4BlockOutputStream(OutputStream out, int level, boolean useBrokenFlagDescriptorChecksum) throws IOException { + this(out, BLOCKSIZE_64KB, level, false, useBrokenFlagDescriptorChecksum); } /** @@ -292,10 +269,6 @@ public static class FLG { private final int blockIndependence; private final int version; - public FLG() { - this(false); - } - public FLG(boolean blockChecksum) { this(0, 0, 0, blockChecksum ? 1 : 0, 1, VERSION); } @@ -375,10 +348,6 @@ public static class BD { private final int blockSizeValue; private final int reserved3; - public BD() { - this(0, BLOCKSIZE_64KB, 0); - } - public BD(int blockSizeValue) { this(0, blockSizeValue, 0); } diff --git a/clients/src/main/java/org/apache/kafka/common/compress/Lz4Compression.java b/clients/src/main/java/org/apache/kafka/common/compress/Lz4Compression.java new file mode 100644 index 0000000000..42c1a1a141 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/compress/Lz4Compression.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.compress; + +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.record.CompressionType; +import org.apache.kafka.common.record.RecordBatch; +import org.apache.kafka.common.utils.BufferSupplier; +import org.apache.kafka.common.utils.ByteBufferOutputStream; +import org.apache.kafka.common.utils.ChunkedBytesStream; + +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.util.Objects; + +public class Lz4Compression implements Compression { + + // These values come from net.jpountz.lz4.LZ4Constants + // We may need to update them if the lz4 library changes these values. + public static final int MIN_LEVEL = 1; + public static final int MAX_LEVEL = 17; + public static final int DEFAULT_LEVEL = 9; + + private final int level; + + private Lz4Compression(int level) { + this.level = level; + } + + @Override + public CompressionType type() { + return CompressionType.LZ4; + } + + @Override + public OutputStream wrapForOutput(ByteBufferOutputStream buffer, byte messageVersion) { + try { + return new Lz4BlockOutputStream(buffer, level, messageVersion == RecordBatch.MAGIC_VALUE_V0); + } catch (Throwable e) { + throw new KafkaException(e); + } + } + + @Override + public InputStream wrapForInput(ByteBuffer inputBuffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { + try { + return new ChunkedBytesStream( + new Lz4BlockInputStream(inputBuffer, decompressionBufferSupplier, messageVersion == RecordBatch.MAGIC_VALUE_V0), + decompressionBufferSupplier, decompressionOutputSize(), true); + } catch (Throwable e) { + throw new KafkaException(e); + } + } + + @Override + public int decompressionOutputSize() { + // Lz4BlockInputStream uses an internal intermediate buffer to store decompressed data. The size + // of this buffer is based on legacy implementation based on skipArray introduced in + // https://github.com/apache/kafka/pull/6785 + return 2 * 1024; // 2KB + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Lz4Compression that = (Lz4Compression) o; + return level == that.level; + } + + @Override + public int hashCode() { + return Objects.hash(level); + } + + public static class Builder implements Compression.Builder { + private int level = DEFAULT_LEVEL; + + public Builder level(int level) { + if (level < MIN_LEVEL || MAX_LEVEL < level) { + throw new IllegalArgumentException("lz4 doesn't support given compression level: " + level); + } + + this.level = level; + return this; + } + + @Override + public Lz4Compression build() { + return new Lz4Compression(level); + } + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/compress/SnappyFactory.java b/clients/src/main/java/org/apache/kafka/common/compress/NoCompression.java similarity index 57% rename from clients/src/main/java/org/apache/kafka/common/compress/SnappyFactory.java rename to clients/src/main/java/org/apache/kafka/common/compress/NoCompression.java index b56273df8e..770fd114d5 100644 --- a/clients/src/main/java/org/apache/kafka/common/compress/SnappyFactory.java +++ b/clients/src/main/java/org/apache/kafka/common/compress/NoCompression.java @@ -14,37 +14,41 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.kafka.common.compress; -import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.record.CompressionType; +import org.apache.kafka.common.utils.BufferSupplier; import org.apache.kafka.common.utils.ByteBufferInputStream; import org.apache.kafka.common.utils.ByteBufferOutputStream; -import org.xerial.snappy.SnappyInputStream; -import org.xerial.snappy.SnappyOutputStream; import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; -public class SnappyFactory { +public class NoCompression implements Compression { - private SnappyFactory() { } + private NoCompression() {} - public static OutputStream wrapForOutput(ByteBufferOutputStream buffer) { - try { - return new SnappyOutputStream(buffer); - } catch (Throwable e) { - throw new KafkaException(e); - } + @Override + public CompressionType type() { + return CompressionType.NONE; } - public static InputStream wrapForInput(ByteBuffer buffer) { - try { - return new SnappyInputStream(new ByteBufferInputStream(buffer)); - } catch (Throwable e) { - throw new KafkaException(e); - } + @Override + public OutputStream wrapForOutput(ByteBufferOutputStream bufferStream, byte messageVersion) { + return bufferStream; + } + + @Override + public InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { + return new ByteBufferInputStream(buffer); } + public static class Builder implements Compression.Builder { + + @Override + public NoCompression build() { + return new NoCompression(); + } + } } diff --git a/clients/src/main/java/org/apache/kafka/common/compress/SnappyCompression.java b/clients/src/main/java/org/apache/kafka/common/compress/SnappyCompression.java new file mode 100644 index 0000000000..3ec09f2355 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/compress/SnappyCompression.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.compress; + +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.record.CompressionType; +import org.apache.kafka.common.utils.BufferSupplier; +import org.apache.kafka.common.utils.ByteBufferInputStream; +import org.apache.kafka.common.utils.ByteBufferOutputStream; +import org.apache.kafka.common.utils.ChunkedBytesStream; +import org.xerial.snappy.SnappyInputStream; +import org.xerial.snappy.SnappyOutputStream; + +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; + +public class SnappyCompression implements Compression { + + private SnappyCompression() {} + + @Override + public CompressionType type() { + return CompressionType.SNAPPY; + } + + @Override + public OutputStream wrapForOutput(ByteBufferOutputStream bufferStream, byte messageVersion) { + try { + return new SnappyOutputStream(bufferStream); + } catch (Throwable e) { + throw new KafkaException(e); + } + } + + @Override + public InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { + // SnappyInputStream uses default implementation of InputStream for skip. Default implementation of + // SnappyInputStream allocates a new skip buffer every time, hence, we prefer our own implementation. + try { + return new ChunkedBytesStream(new SnappyInputStream(new ByteBufferInputStream(buffer)), + decompressionBufferSupplier, + decompressionOutputSize(), + false); + } catch (Throwable e) { + throw new KafkaException(e); + } + } + + @Override + public int decompressionOutputSize() { + // SnappyInputStream already uses an intermediate buffer internally. The size + // of this buffer is based on legacy implementation based on skipArray introduced in + // https://github.com/apache/kafka/pull/6785 + return 2 * 1024; // 2KB + } + + @Override + public boolean equals(Object o) { + return o instanceof SnappyCompression; + } + + @Override + public int hashCode() { + return super.hashCode(); + } + + public static class Builder implements Compression.Builder { + + @Override + public SnappyCompression build() { + return new SnappyCompression(); + } + } + +} diff --git a/clients/src/main/java/org/apache/kafka/common/compress/ZstdCompression.java b/clients/src/main/java/org/apache/kafka/common/compress/ZstdCompression.java new file mode 100644 index 0000000000..0664fd2dbb --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/compress/ZstdCompression.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.compress; + +import com.github.luben.zstd.BufferPool; +import com.github.luben.zstd.RecyclingBufferPool; +import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdInputStreamNoFinalizer; +import com.github.luben.zstd.ZstdOutputStreamNoFinalizer; +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.record.CompressionType; +import org.apache.kafka.common.utils.BufferSupplier; +import org.apache.kafka.common.utils.ByteBufferInputStream; +import org.apache.kafka.common.utils.ByteBufferOutputStream; +import org.apache.kafka.common.utils.ChunkedBytesStream; + +import java.io.BufferedOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.util.Objects; + +public class ZstdCompression implements Compression { + + public static final int MIN_LEVEL = Zstd.minCompressionLevel(); + public static final int MAX_LEVEL = Zstd.maxCompressionLevel(); + public static final int DEFAULT_LEVEL = Zstd.defaultCompressionLevel(); + + private final int level; + + private ZstdCompression(int level) { + this.level = level; + } + + @Override + public CompressionType type() { + return CompressionType.ZSTD; + } + + @Override + public OutputStream wrapForOutput(ByteBufferOutputStream bufferStream, byte messageVersion) { + try { + // Set input buffer (uncompressed) to 16 KB (none by default) to ensure reasonable performance + // in cases where the caller passes a small number of bytes to write (potentially a single byte). + return new BufferedOutputStream(new ZstdOutputStreamNoFinalizer(bufferStream, RecyclingBufferPool.INSTANCE, level), 16 * 1024); + } catch (Throwable e) { + throw new KafkaException(e); + } + } + + @Override + public InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { + try { + return new ChunkedBytesStream(wrapForZstdInput(buffer, decompressionBufferSupplier), + decompressionBufferSupplier, + decompressionOutputSize(), + false); + } catch (Throwable e) { + throw new KafkaException(e); + } + } + + // visible for testing + public static ZstdInputStreamNoFinalizer wrapForZstdInput(ByteBuffer buffer, BufferSupplier decompressionBufferSupplier) throws IOException { + // We use our own BufferSupplier instead of com.github.luben.zstd.RecyclingBufferPool since our + // implementation doesn't require locking or soft references. The buffer allocated by this buffer pool is + // used by zstd-jni for 1\ reading compressed data from input stream into a buffer before passing it over JNI + // 2\ implementation of skip inside zstd-jni where buffer is obtained and released with every call + final BufferPool bufferPool = new BufferPool() { + @Override + public ByteBuffer get(int capacity) { + return decompressionBufferSupplier.get(capacity); + } + + @Override + public void release(ByteBuffer buffer) { + decompressionBufferSupplier.release(buffer); + } + }; + // Ideally, data from ZstdInputStreamNoFinalizer should be read in a bulk because every call to + // `ZstdInputStreamNoFinalizer#read()` is a JNI call. The caller is expected to + // balance the tradeoff between reading large amount of data vs. making multiple JNI calls. + return new ZstdInputStreamNoFinalizer(new ByteBufferInputStream(buffer), bufferPool); + } + + /** + * Size of intermediate buffer which contains uncompressed data. + * This size should be <= ZSTD_BLOCKSIZE_MAX + * see: https://github.com/facebook/zstd/blob/189653a9c10c9f4224a5413a6d6a69dd01d7c3bd/lib/zstd.h#L854 + */ + @Override + public int decompressionOutputSize() { + // 16KB has been chosen based on legacy implementation introduced in https://github.com/apache/kafka/pull/6785 + return 16 * 1024; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ZstdCompression that = (ZstdCompression) o; + return level == that.level; + } + + @Override + public int hashCode() { + return Objects.hash(level); + } + + public static class Builder implements Compression.Builder { + private int level = DEFAULT_LEVEL; + + public Builder level(int level) { + if (MAX_LEVEL < level || level < MIN_LEVEL) { + throw new IllegalArgumentException("zstd doesn't support given compression level: " + level); + } + + this.level = level; + return this; + } + + @Override + public ZstdCompression build() { + return new ZstdCompression(level); + } + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/compress/ZstdFactory.java b/clients/src/main/java/org/apache/kafka/common/compress/ZstdFactory.java deleted file mode 100644 index feac017c92..0000000000 --- a/clients/src/main/java/org/apache/kafka/common/compress/ZstdFactory.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.common.compress; - -import com.github.luben.zstd.BufferPool; -import com.github.luben.zstd.RecyclingBufferPool; -import com.github.luben.zstd.ZstdInputStreamNoFinalizer; -import com.github.luben.zstd.ZstdOutputStreamNoFinalizer; -import org.apache.kafka.common.KafkaException; -import org.apache.kafka.common.utils.BufferSupplier; -import org.apache.kafka.common.utils.ByteBufferInputStream; -import org.apache.kafka.common.utils.ByteBufferOutputStream; - -import java.io.BufferedOutputStream; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.ByteBuffer; - -public class ZstdFactory { - - private ZstdFactory() { } - - public static OutputStream wrapForOutput(ByteBufferOutputStream buffer) { - try { - // Set input buffer (uncompressed) to 16 KB (none by default) to ensure reasonable performance - // in cases where the caller passes a small number of bytes to write (potentially a single byte). - return new BufferedOutputStream(new ZstdOutputStreamNoFinalizer(buffer, RecyclingBufferPool.INSTANCE), 16 * 1024); - } catch (Throwable e) { - throw new KafkaException(e); - } - } - - public static InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { - try { - // We use our own BufferSupplier instead of com.github.luben.zstd.RecyclingBufferPool since our - // implementation doesn't require locking or soft references. The buffer allocated by this buffer pool is - // used by zstd-jni for 1\ reading compressed data from input stream into a buffer before passing it over JNI - // 2\ implementation of skip inside zstd-jni where buffer is obtained and released with every call - final BufferPool bufferPool = new BufferPool() { - @Override - public ByteBuffer get(int capacity) { - return decompressionBufferSupplier.get(capacity); - } - - @Override - public void release(ByteBuffer buffer) { - decompressionBufferSupplier.release(buffer); - } - }; - // Ideally, data from ZstdInputStreamNoFinalizer should be read in a bulk because every call to - // `ZstdInputStreamNoFinalizer#read()` is a JNI call. The caller is expected to - // balance the tradeoff between reading large amount of data vs. making multiple JNI calls. - return new ZstdInputStreamNoFinalizer(new ByteBufferInputStream(buffer), bufferPool); - } catch (Throwable e) { - throw new KafkaException(e); - } - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java index 84bae97a03..b69ca06219 100644 --- a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java +++ b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java @@ -25,6 +25,7 @@ import org.slf4j.LoggerFactory; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -33,6 +34,8 @@ import java.util.Set; import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Predicate; +import java.util.stream.Collectors; /** * A convenient base class for configurations to extend. @@ -58,6 +61,8 @@ public class AbstractConfig { private final ConfigDef definition; + public static final String AUTOMATIC_CONFIG_PROVIDERS_PROPERTY = "org.apache.kafka.automatic.config.providers"; + public static final String CONFIG_PROVIDERS_CONFIG = "config.providers"; private static final String CONFIG_PROVIDERS_PARAM = ".param."; @@ -101,14 +106,11 @@ public class AbstractConfig { * the constructor to resolve any variables in {@code originals}; may be null or empty * @param doLog whether the configurations should be logged */ - @SuppressWarnings({"unchecked", "this-escape"}) + @SuppressWarnings({"this-escape"}) public AbstractConfig(ConfigDef definition, Map originals, Map configProviderProps, boolean doLog) { - /* check that all the keys are really strings */ - for (Map.Entry entry : originals.entrySet()) - if (!(entry.getKey() instanceof String)) - throw new ConfigException(entry.getKey().toString(), entry.getValue(), "Key must be a string."); + Map originalMap = Utils.castToStringObjectMap(originals); - this.originals = resolveConfigVariables(configProviderProps, (Map) originals); + this.originals = resolveConfigVariables(configProviderProps, originalMap); this.values = definition.parse(this.originals); Map configUpdates = postProcessParsedConfig(Collections.unmodifiableMap(this.values)); for (Map.Entry update : configUpdates.entrySet()) { @@ -374,9 +376,9 @@ private void logAll() { * Info level log for any unused configurations */ public void logUnused() { - Set unusedkeys = unused(); - if (!unusedkeys.isEmpty()) { - log.info("These configurations '{}' were supplied but are not used yet.", unusedkeys); + Set unusedKeys = unused(); + if (!unusedKeys.isEmpty()) { + log.info("These configurations '{}' were supplied but are not used yet.", unusedKeys); } } @@ -521,6 +523,7 @@ private Map extractPotentialVariables(Map configMap) { private Map resolveConfigVariables(Map configProviderProps, Map originals) { Map providerConfigString; Map configProperties; + Predicate classNameFilter; Map resolvedOriginals = new HashMap<>(); // As variable configs are strings, parse the originals and obtain the potential variable configs. Map indirectVariables = extractPotentialVariables(originals); @@ -529,11 +532,13 @@ private Map extractPotentialVariables(Map configMap) { if (configProviderProps == null || configProviderProps.isEmpty()) { providerConfigString = indirectVariables; configProperties = originals; + classNameFilter = automaticConfigProvidersFilter(); } else { providerConfigString = extractPotentialVariables(configProviderProps); configProperties = configProviderProps; + classNameFilter = ignored -> true; } - Map providers = instantiateConfigProviders(providerConfigString, configProperties); + Map providers = instantiateConfigProviders(providerConfigString, configProperties, classNameFilter); if (!providers.isEmpty()) { ConfigTransformer configTransformer = new ConfigTransformer(providers); @@ -547,6 +552,17 @@ private Map extractPotentialVariables(Map configMap) { return new ResolvingMap<>(resolvedOriginals, originals); } + private Predicate automaticConfigProvidersFilter() { + String systemProperty = System.getProperty(AUTOMATIC_CONFIG_PROVIDERS_PROPERTY); + if (systemProperty == null) { + return ignored -> true; + } else { + return Arrays.stream(systemProperty.split(",")) + .map(String::trim) + .collect(Collectors.toSet())::contains; + } + } + private Map configProviderProperties(String configProviderPrefix, Map providerConfigProperties) { Map result = new HashMap<>(); for (Map.Entry entry : providerConfigProperties.entrySet()) { @@ -567,9 +583,14 @@ private Map configProviderProperties(String configProviderPrefix * * @param indirectConfigs The map of potential variable configs * @param providerConfigProperties The map of config provider configs - * @return map map of config provider name and its instance. + * @param classNameFilter Filter for config provider class names + * @return map of config provider name and its instance. */ - private Map instantiateConfigProviders(Map indirectConfigs, Map providerConfigProperties) { + private Map instantiateConfigProviders( + Map indirectConfigs, + Map providerConfigProperties, + Predicate classNameFilter + ) { final String configProviders = indirectConfigs.get(CONFIG_PROVIDERS_CONFIG); if (configProviders == null || configProviders.isEmpty()) { @@ -580,9 +601,15 @@ private Map instantiateConfigProviders(Map configProviderInstances = new HashMap<>(); diff --git a/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java b/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java index 57df493347..d82d06fa16 100644 --- a/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java +++ b/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java @@ -515,7 +515,7 @@ public Map parse(Map props) { // Check all configurations are defined List undefinedConfigKeys = undefinedDependentConfigs(); if (!undefinedConfigKeys.isEmpty()) { - String joined = Utils.join(undefinedConfigKeys, ","); + String joined = undefinedConfigKeys.stream().map(String::toString).collect(Collectors.joining(",")); throw new ConfigException("Some configurations in are referred in the dependents, but not defined: " + joined); } // parse all known keys @@ -806,7 +806,7 @@ public static String convertToString(Object parsedValue, Type type) { return parsedValue.toString(); case LIST: List valueList = (List) parsedValue; - return Utils.join(valueList, ","); + return valueList.stream().map(Object::toString).collect(Collectors.joining(",")); case CLASS: Class clazz = (Class) parsedValue; return clazz.getName(); @@ -1051,13 +1051,13 @@ public static ValidString in(String... validStrings) { public void ensureValid(String name, Object o) { String s = (String) o; if (!validStrings.contains(s)) { - throw new ConfigException(name, o, "String must be one of: " + Utils.join(validStrings, ", ")); + throw new ConfigException(name, o, "String must be one of: " + String.join(", ", validStrings)); } } public String toString() { - return "[" + Utils.join(validStrings, ", ") + "]"; + return "[" + String.join(", ", validStrings) + "]"; } } @@ -1079,12 +1079,12 @@ public static CaseInsensitiveValidString in(String... validStrings) { public void ensureValid(String name, Object o) { String s = (String) o; if (s == null || !validStrings.contains(s.toUpperCase(Locale.ROOT))) { - throw new ConfigException(name, o, "String must be one of (case insensitive): " + Utils.join(validStrings, ", ")); + throw new ConfigException(name, o, "String must be one of (case insensitive): " + String.join(", ", validStrings)); } } public String toString() { - return "(case insensitive) [" + Utils.join(validStrings, ", ") + "]"; + return "(case insensitive) [" + String.join(", ", validStrings) + "]"; } } @@ -1205,7 +1205,8 @@ public void ensureValid(String name, Object value) { } if (!foundIllegalCharacters.isEmpty()) { - throw new ConfigException(name, value, "String may not contain control sequences but had the following ASCII chars: " + Utils.join(foundIllegalCharacters, ", ")); + throw new ConfigException(name, value, "String may not contain control sequences but had the following ASCII chars: " + + foundIllegalCharacters.stream().map(Object::toString).collect(Collectors.joining(", "))); } } @@ -1256,7 +1257,17 @@ public static class ConfigKey { public final boolean internalConfig; public final String alternativeString; + // This constructor is present for backward compatibility reasons. public ConfigKey(String name, Type type, Object defaultValue, Validator validator, + Importance importance, String documentation, String group, + int orderInGroup, Width width, String displayName, + List dependents, Recommender recommender, + boolean internalConfig) { + this(name, type, defaultValue, validator, importance, documentation, group, orderInGroup, width, displayName, + dependents, recommender, internalConfig, null); + } + + private ConfigKey(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation, String group, int orderInGroup, Width width, String displayName, List dependents, Recommender recommender, diff --git a/clients/src/main/java/org/apache/kafka/common/config/SslConfigs.java b/clients/src/main/java/org/apache/kafka/common/config/SslConfigs.java index 5c3bbcdfb9..bdf8bc1da7 100644 --- a/clients/src/main/java/org/apache/kafka/common/config/SslConfigs.java +++ b/clients/src/main/java/org/apache/kafka/common/config/SslConfigs.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.common.config; -import org.apache.kafka.common.config.ConfigDef.Type; import org.apache.kafka.common.config.internals.BrokerSecurityConfigs; import org.apache.kafka.common.utils.Java; import org.apache.kafka.common.utils.Utils; @@ -144,7 +143,7 @@ public static void addClientSslSupport(ConfigDef config) { .define(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, ConfigDef.Type.STRING, null, ConfigDef.Importance.HIGH, SslConfigs.SSL_KEYSTORE_LOCATION_DOC) .define(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, ConfigDef.Type.PASSWORD, null, ConfigDef.Importance.HIGH, SslConfigs.SSL_KEYSTORE_PASSWORD_DOC) .define(SslConfigs.SSL_KEY_PASSWORD_CONFIG, ConfigDef.Type.PASSWORD, null, ConfigDef.Importance.HIGH, SslConfigs.SSL_KEY_PASSWORD_DOC) - .define(SslConfigs.SSL_KEYSTORE_KEY_CONFIG, Type.PASSWORD, null, ConfigDef.Importance.HIGH, SslConfigs.SSL_KEYSTORE_KEY_DOC) + .define(SslConfigs.SSL_KEYSTORE_KEY_CONFIG, ConfigDef.Type.PASSWORD, null, ConfigDef.Importance.HIGH, SslConfigs.SSL_KEYSTORE_KEY_DOC) .define(SslConfigs.SSL_KEYSTORE_CERTIFICATE_CHAIN_CONFIG, ConfigDef.Type.PASSWORD, null, ConfigDef.Importance.HIGH, SslConfigs.SSL_KEYSTORE_CERTIFICATE_CHAIN_DOC) .define(SslConfigs.SSL_TRUSTSTORE_CERTIFICATES_CONFIG, ConfigDef.Type.PASSWORD, null, ConfigDef.Importance.HIGH, SslConfigs.SSL_TRUSTSTORE_CERTIFICATES_DOC) .define(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, ConfigDef.Type.STRING, SslConfigs.DEFAULT_SSL_TRUSTSTORE_TYPE, ConfigDef.Importance.MEDIUM, SslConfigs.SSL_TRUSTSTORE_TYPE_DOC) diff --git a/clients/src/main/java/org/apache/kafka/common/config/TopicConfig.java b/clients/src/main/java/org/apache/kafka/common/config/TopicConfig.java index dcb772c18a..1eddf0ccf2 100755 --- a/clients/src/main/java/org/apache/kafka/common/config/TopicConfig.java +++ b/clients/src/main/java/org/apache/kafka/common/config/TopicConfig.java @@ -67,13 +67,17 @@ public class TopicConfig { "(which consists of log segments) can grow to before we will discard old log segments to free up space if we " + "are using the \"delete\" retention policy. By default there is no size limit only a time limit. " + "Since this limit is enforced at the partition level, multiply it by the number of partitions to compute " + - "the topic retention in bytes."; + "the topic retention in bytes. Additionally, retention.bytes configuration " + + "operates independently of \"segment.ms\" and \"segment.bytes\" configurations. " + + "Moreover, it triggers the rolling of new segment if the retention.bytes is configured to zero."; public static final String RETENTION_MS_CONFIG = "retention.ms"; public static final String RETENTION_MS_DOC = "This configuration controls the maximum time we will retain a " + "log before we will discard old log segments to free up space if we are using the " + "\"delete\" retention policy. This represents an SLA on how soon consumers must read " + - "their data. If set to -1, no time limit is applied."; + "their data. If set to -1, no time limit is applied. Additionally, retention.ms configuration " + + "operates independently of \"segment.ms\" and \"segment.bytes\" configurations. " + + "Moreover, it triggers the rolling of new segment if the retention.ms condition is satisfied."; public static final String REMOTE_LOG_STORAGE_ENABLE_CONFIG = "remote.storage.enable"; public static final String REMOTE_LOG_STORAGE_ENABLE_DOC = "To enable tiered storage for a topic, set this configuration as true. " + @@ -169,6 +173,14 @@ public class TopicConfig { "accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the " + "original compression codec set by the producer."; + + public static final String COMPRESSION_GZIP_LEVEL_CONFIG = "compression.gzip.level"; + public static final String COMPRESSION_GZIP_LEVEL_DOC = "The compression level to use if " + COMPRESSION_TYPE_CONFIG + " is set to gzip."; + public static final String COMPRESSION_LZ4_LEVEL_CONFIG = "compression.lz4.level"; + public static final String COMPRESSION_LZ4_LEVEL_DOC = "The compression level to use if " + COMPRESSION_TYPE_CONFIG + " is set to lz4."; + public static final String COMPRESSION_ZSTD_LEVEL_CONFIG = "compression.zstd.level"; + public static final String COMPRESSION_ZSTD_LEVEL_DOC = "The compression level to use if " + COMPRESSION_TYPE_CONFIG + " is set to zstd."; + public static final String PREALLOCATE_CONFIG = "preallocate"; public static final String PREALLOCATE_DOC = "True if we should preallocate the file on disk when " + "creating a new log segment."; diff --git a/clients/src/main/java/org/apache/kafka/common/config/internals/BrokerSecurityConfigs.java b/clients/src/main/java/org/apache/kafka/common/config/internals/BrokerSecurityConfigs.java index b680321ef0..767fa9aca0 100644 --- a/clients/src/main/java/org/apache/kafka/common/config/internals/BrokerSecurityConfigs.java +++ b/clients/src/main/java/org/apache/kafka/common/config/internals/BrokerSecurityConfigs.java @@ -17,6 +17,9 @@ package org.apache.kafka.common.config.internals; import org.apache.kafka.common.config.SaslConfigs; +import org.apache.kafka.common.config.SslClientAuth; +import org.apache.kafka.common.security.auth.KafkaPrincipalBuilder; +import org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder; import java.util.Collections; import java.util.List; @@ -30,38 +33,19 @@ public class BrokerSecurityConfigs { public static final String PRINCIPAL_BUILDER_CLASS_CONFIG = "principal.builder.class"; - public static final String SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_CONFIG = "sasl.kerberos.principal.to.local.rules"; - public static final String SSL_CLIENT_AUTH_CONFIG = "ssl.client.auth"; - public static final String SASL_ENABLED_MECHANISMS_CONFIG = "sasl.enabled.mechanisms"; - public static final String SASL_SERVER_CALLBACK_HANDLER_CLASS = "sasl.server.callback.handler.class"; - public static final String SSL_PRINCIPAL_MAPPING_RULES_CONFIG = "ssl.principal.mapping.rules"; - public static final String CONNECTIONS_MAX_REAUTH_MS = "connections.max.reauth.ms"; - public static final int DEFAULT_SASL_SERVER_MAX_RECEIVE_SIZE = 524288; - public static final String SASL_SERVER_MAX_RECEIVE_SIZE_CONFIG = "sasl.server.max.receive.size"; - public static final String SSL_ALLOW_DN_CHANGES_CONFIG = "ssl.allow.dn.changes"; - public static final boolean DEFAULT_SSL_ALLOW_DN_CHANGES_VALUE = false; - public static final String SSL_ALLOW_SAN_CHANGES_CONFIG = "ssl.allow.san.changes"; - public static final boolean DEFAULT_SSL_ALLOW_SAN_CHANGES_VALUE = false; - - public static final String PRINCIPAL_BUILDER_CLASS_DOC = "The fully qualified name of a class that implements the " + - "KafkaPrincipalBuilder interface, which is used to build the KafkaPrincipal object used during " + - "authorization. If no principal builder is defined, the default behavior depends " + - "on the security protocol in use. For SSL authentication, the principal will be derived using the " + - "rules defined by " + SSL_PRINCIPAL_MAPPING_RULES_CONFIG + " applied on the distinguished " + - "name from the client certificate if one is provided; otherwise, if client authentication is not required, " + - "the principal name will be ANONYMOUS. For SASL authentication, the principal will be derived using the " + - "rules defined by " + SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_CONFIG + " if GSSAPI is in use, " + - "and the SASL authentication ID for other mechanisms. For PLAINTEXT, the principal will be ANONYMOUS."; + public static final String SSL_PRINCIPAL_MAPPING_RULES_CONFIG = "ssl.principal.mapping.rules"; + public static final String DEFAULT_SSL_PRINCIPAL_MAPPING_RULES = "DEFAULT"; public static final String SSL_PRINCIPAL_MAPPING_RULES_DOC = "A list of rules for mapping from distinguished name" + " from the client certificate to short name. The rules are evaluated in order and the first rule that matches" + " a principal name is used to map it to a short name. Any later rules in the list are ignored. By default," + " distinguished name of the X.500 certificate will be the principal. For more details on the format please" + " see security authorization and acls. Note that this configuration is ignored" + " if an extension of KafkaPrincipalBuilder is provided by the " + PRINCIPAL_BUILDER_CLASS_CONFIG + "" + - " configuration."; - public static final String DEFAULT_SSL_PRINCIPAL_MAPPING_RULES = "DEFAULT"; + " configuration."; + public static final String SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_CONFIG = "sasl.kerberos.principal.to.local.rules"; + public static final List DEFAULT_SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES = Collections.singletonList(DEFAULT_SSL_PRINCIPAL_MAPPING_RULES); public static final String SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_DOC = "A list of rules for mapping from principal " + "names to short names (typically operating system usernames). The rules are evaluated in order and the " + "first rule that matches a principal name is used to map it to a short name. Any later rules in the list are " + @@ -69,8 +53,20 @@ public class BrokerSecurityConfigs { "to {username}. For more details on the format please see " + "security authorization and acls. Note that this configuration is ignored if an extension of " + "KafkaPrincipalBuilder is provided by the " + PRINCIPAL_BUILDER_CLASS_CONFIG + " configuration."; - public static final List DEFAULT_SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES = Collections.singletonList("DEFAULT"); + public static final Class PRINCIPAL_BUILDER_CLASS_DEFAULT = DefaultKafkaPrincipalBuilder.class; + public static final String PRINCIPAL_BUILDER_CLASS_DOC = "The fully qualified name of a class that implements the " + + "KafkaPrincipalBuilder interface, which is used to build the KafkaPrincipal object used during " + + "authorization. If no principal builder is defined, the default behavior depends " + + "on the security protocol in use. For SSL authentication, the principal will be derived using the " + + "rules defined by " + SSL_PRINCIPAL_MAPPING_RULES_CONFIG + " applied on the distinguished " + + "name from the client certificate if one is provided; otherwise, if client authentication is not required, " + + "the principal name will be ANONYMOUS. For SASL authentication, the principal will be derived using the " + + "rules defined by " + SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_CONFIG + " if GSSAPI is in use, " + + "and the SASL authentication ID for other mechanisms. For PLAINTEXT, the principal will be ANONYMOUS."; + + public static final String SSL_CLIENT_AUTH_CONFIG = "ssl.client.auth"; + public static final String SSL_CLIENT_AUTH_DEFAULT = SslClientAuth.NONE.toString(); public static final String SSL_CLIENT_AUTH_DOC = "Configures kafka broker to request client authentication." + " The following settings are common: " + "

      " @@ -80,29 +76,43 @@ public class BrokerSecurityConfigs { + "
    • ssl.client.auth=none This means client authentication is not needed." + "
    "; + public static final String SASL_ENABLED_MECHANISMS_CONFIG = "sasl.enabled.mechanisms"; + public static final List DEFAULT_SASL_ENABLED_MECHANISMS = Collections.singletonList(SaslConfigs.GSSAPI_MECHANISM); public static final String SASL_ENABLED_MECHANISMS_DOC = "The list of SASL mechanisms enabled in the Kafka server. " + "The list may contain any mechanism for which a security provider is available. " + "Only GSSAPI is enabled by default."; - public static final List DEFAULT_SASL_ENABLED_MECHANISMS = Collections.singletonList(SaslConfigs.GSSAPI_MECHANISM); + public static final String SASL_SERVER_CALLBACK_HANDLER_CLASS_CONFIG = "sasl.server.callback.handler.class"; public static final String SASL_SERVER_CALLBACK_HANDLER_CLASS_DOC = "The fully qualified name of a SASL server callback handler " + "class that implements the AuthenticateCallbackHandler interface. Server callback handlers must be prefixed with " + "listener prefix and SASL mechanism name in lower-case. For example, " + "listener.name.sasl_ssl.plain.sasl.server.callback.handler.class=com.example.CustomPlainCallbackHandler."; + public static final String CONNECTIONS_MAX_REAUTH_MS_CONFIG = "connections.max.reauth.ms"; + public static final long DEFAULT_CONNECTIONS_MAX_REAUTH_MS = 0L; public static final String CONNECTIONS_MAX_REAUTH_MS_DOC = "When explicitly set to a positive number (the default is 0, not a positive number), " + "a session lifetime that will not exceed the configured value will be communicated to v2.2.0 or later clients when they authenticate. " + "The broker will disconnect any such connection that is not re-authenticated within the session lifetime and that is then subsequently " + "used for any purpose other than re-authentication. Configuration names can optionally be prefixed with listener prefix and SASL " + "mechanism name in lower-case. For example, listener.name.sasl_ssl.oauthbearer.connections.max.reauth.ms=3600000"; + public static final String SASL_SERVER_MAX_RECEIVE_SIZE_CONFIG = "sasl.server.max.receive.size"; + public static final int DEFAULT_SASL_SERVER_MAX_RECEIVE_SIZE = 524288; public static final String SASL_SERVER_MAX_RECEIVE_SIZE_DOC = "The maximum receive size allowed before and during initial SASL authentication." + " Default receive size is 512KB. GSSAPI limits requests to 64K, but we allow upto 512KB by default for custom SASL mechanisms. In practice," + " PLAIN, SCRAM and OAUTH mechanisms can use much smaller limits."; + public static final String SSL_ALLOW_DN_CHANGES_CONFIG = "ssl.allow.dn.changes"; + public static final boolean DEFAULT_SSL_ALLOW_DN_CHANGES_VALUE = false; public static final String SSL_ALLOW_DN_CHANGES_DOC = "Indicates whether changes to the certificate distinguished name should be allowed during" + " a dynamic reconfiguration of certificates or not."; + public static final String SSL_ALLOW_SAN_CHANGES_CONFIG = "ssl.allow.san.changes"; + public static final boolean DEFAULT_SSL_ALLOW_SAN_CHANGES_VALUE = false; public static final String SSL_ALLOW_SAN_CHANGES_DOC = "Indicates whether changes to the certificate subject alternative names should be allowed during " + "a dynamic reconfiguration of certificates or not."; + + public final static String SASL_MECHANISM_INTER_BROKER_PROTOCOL_CONFIG = "sasl.mechanism.inter.broker.protocol"; + public final static String SASL_MECHANISM_INTER_BROKER_PROTOCOL_DOC = "SASL mechanism used for inter-broker communication. Default is GSSAPI."; + } diff --git a/clients/src/main/java/org/apache/kafka/common/config/internals/QuotaConfigs.java b/clients/src/main/java/org/apache/kafka/common/config/internals/QuotaConfigs.java deleted file mode 100644 index e382cb0c17..0000000000 --- a/clients/src/main/java/org/apache/kafka/common/config/internals/QuotaConfigs.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.common.config.internals; - -import org.apache.kafka.common.config.ConfigDef; -import org.apache.kafka.common.security.scram.internals.ScramMechanism; - -import java.util.Arrays; -import java.util.HashSet; -import java.util.Set; - -/** - * Define the dynamic quota configs. Note that these are not normal configurations that exist in properties files. They - * only exist dynamically in the controller (or ZK, depending on which mode the cluster is running). - */ -public class QuotaConfigs { - public static final String PRODUCER_BYTE_RATE_OVERRIDE_CONFIG = "producer_byte_rate"; - public static final String CONSUMER_BYTE_RATE_OVERRIDE_CONFIG = "consumer_byte_rate"; - public static final String REQUEST_PERCENTAGE_OVERRIDE_CONFIG = "request_percentage"; - public static final String CONTROLLER_MUTATION_RATE_OVERRIDE_CONFIG = "controller_mutation_rate"; - public static final String IP_CONNECTION_RATE_OVERRIDE_CONFIG = "connection_creation_rate"; - - public static final String PRODUCER_BYTE_RATE_DOC = "A rate representing the upper bound (bytes/sec) for producer traffic."; - public static final String CONSUMER_BYTE_RATE_DOC = "A rate representing the upper bound (bytes/sec) for consumer traffic."; - public static final String REQUEST_PERCENTAGE_DOC = "A percentage representing the upper bound of time spent for processing requests."; - public static final String CONTROLLER_MUTATION_RATE_DOC = "The rate at which mutations are accepted for the create " + - "topics request, the create partitions request and the delete topics request. The rate is accumulated by " + - "the number of partitions created or deleted."; - public static final String IP_CONNECTION_RATE_DOC = "An int representing the upper bound of connections accepted " + - "for the specified IP."; - - public static final int IP_CONNECTION_RATE_DEFAULT = Integer.MAX_VALUE; - - private final static Set USER_AND_CLIENT_QUOTA_NAMES = new HashSet<>(Arrays.asList( - PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, - CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, - REQUEST_PERCENTAGE_OVERRIDE_CONFIG, - CONTROLLER_MUTATION_RATE_OVERRIDE_CONFIG - )); - - private static void buildUserClientQuotaConfigDef(ConfigDef configDef) { - configDef.define(PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, ConfigDef.Type.LONG, Long.MAX_VALUE, - ConfigDef.Importance.MEDIUM, PRODUCER_BYTE_RATE_DOC); - - configDef.define(CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, ConfigDef.Type.LONG, Long.MAX_VALUE, - ConfigDef.Importance.MEDIUM, CONSUMER_BYTE_RATE_DOC); - - configDef.define(REQUEST_PERCENTAGE_OVERRIDE_CONFIG, ConfigDef.Type.DOUBLE, - Integer.valueOf(Integer.MAX_VALUE).doubleValue(), - ConfigDef.Importance.MEDIUM, REQUEST_PERCENTAGE_DOC); - - configDef.define(CONTROLLER_MUTATION_RATE_OVERRIDE_CONFIG, ConfigDef.Type.DOUBLE, - Integer.valueOf(Integer.MAX_VALUE).doubleValue(), - ConfigDef.Importance.MEDIUM, CONTROLLER_MUTATION_RATE_DOC); - } - - public static boolean isClientOrUserConfig(String name) { - return USER_AND_CLIENT_QUOTA_NAMES.contains(name); - } - - public static ConfigDef userAndClientQuotaConfigs() { - ConfigDef configDef = new ConfigDef(); - buildUserClientQuotaConfigDef(configDef); - return configDef; - } - - public static ConfigDef scramMechanismsPlusUserAndClientQuotaConfigs() { - ConfigDef configDef = new ConfigDef(); - ScramMechanism.mechanismNames().forEach(mechanismName -> { - configDef.define(mechanismName, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, - "User credentials for SCRAM mechanism " + mechanismName); - }); - buildUserClientQuotaConfigDef(configDef); - return configDef; - } - - public static ConfigDef ipConfigs() { - ConfigDef configDef = new ConfigDef(); - configDef.define(IP_CONNECTION_RATE_OVERRIDE_CONFIG, ConfigDef.Type.INT, Integer.MAX_VALUE, - ConfigDef.Range.atLeast(0), ConfigDef.Importance.MEDIUM, IP_CONNECTION_RATE_DOC); - return configDef; - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/config/provider/ConfigProvider.java b/clients/src/main/java/org/apache/kafka/common/config/provider/ConfigProvider.java index a0ef837b2e..81f0aac0d7 100644 --- a/clients/src/main/java/org/apache/kafka/common/config/provider/ConfigProvider.java +++ b/clients/src/main/java/org/apache/kafka/common/config/provider/ConfigProvider.java @@ -55,7 +55,7 @@ public interface ConfigProvider extends Configurable, Closeable { * @param path the path where the data resides * @param keys the keys whose values will be retrieved * @param callback the callback to invoke upon change - * @throws {@link UnsupportedOperationException} if the subscribe operation is not supported + * @throws UnsupportedOperationException if the subscribe operation is not supported */ default void subscribe(String path, Set keys, ConfigChangeCallback callback) { throw new UnsupportedOperationException(); @@ -67,7 +67,7 @@ default void subscribe(String path, Set keys, ConfigChangeCallback callb * @param path the path where the data resides * @param keys the keys whose values will be retrieved * @param callback the callback to be unsubscribed from changes - * @throws {@link UnsupportedOperationException} if the unsubscribe operation is not supported + * @throws UnsupportedOperationException if the unsubscribe operation is not supported */ default void unsubscribe(String path, Set keys, ConfigChangeCallback callback) { throw new UnsupportedOperationException(); @@ -76,7 +76,7 @@ default void unsubscribe(String path, Set keys, ConfigChangeCallback cal /** * Clears all subscribers (optional operation). * - * @throws {@link UnsupportedOperationException} if the unsubscribeAll operation is not supported + * @throws UnsupportedOperationException if the unsubscribeAll operation is not supported */ default void unsubscribeAll() { throw new UnsupportedOperationException(); diff --git a/clients/src/main/java/org/apache/kafka/common/errors/RecordDeserializationException.java b/clients/src/main/java/org/apache/kafka/common/errors/RecordDeserializationException.java index a15df6c7ff..aee57c47d2 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/RecordDeserializationException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/RecordDeserializationException.java @@ -16,7 +16,12 @@ */ package org.apache.kafka.common.errors; +import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.header.Headers; +import org.apache.kafka.common.record.TimestampType; + +import java.nio.ByteBuffer; /** * This exception is raised for any error that occurs while deserializing records received by the consumer using @@ -24,14 +29,61 @@ */ public class RecordDeserializationException extends SerializationException { - private static final long serialVersionUID = 1L; + private static final long serialVersionUID = 2L; + + public enum DeserializationExceptionOrigin { + KEY, + VALUE + } + + private final DeserializationExceptionOrigin origin; private final TopicPartition partition; private final long offset; + private final TimestampType timestampType; + private final long timestamp; + private final ByteBuffer keyBuffer; + private final ByteBuffer valueBuffer; + private final Headers headers; - public RecordDeserializationException(TopicPartition partition, long offset, String message, Throwable cause) { + @Deprecated + public RecordDeserializationException(TopicPartition partition, + long offset, + String message, + Throwable cause) { super(message, cause); + this.origin = null; this.partition = partition; this.offset = offset; + this.timestampType = TimestampType.NO_TIMESTAMP_TYPE; + this.timestamp = ConsumerRecord.NO_TIMESTAMP; + this.keyBuffer = null; + this.valueBuffer = null; + this.headers = null; + } + + public RecordDeserializationException(DeserializationExceptionOrigin origin, + TopicPartition partition, + long offset, + long timestamp, + TimestampType timestampType, + ByteBuffer keyBuffer, + ByteBuffer valueBuffer, + Headers headers, + String message, + Throwable cause) { + super(message, cause); + this.origin = origin; + this.offset = offset; + this.timestampType = timestampType; + this.timestamp = timestamp; + this.partition = partition; + this.keyBuffer = keyBuffer; + this.valueBuffer = valueBuffer; + this.headers = headers; + } + + public DeserializationExceptionOrigin origin() { + return origin; } public TopicPartition topicPartition() { @@ -41,4 +93,24 @@ public TopicPartition topicPartition() { public long offset() { return offset; } + + public TimestampType timestampType() { + return timestampType; + } + + public long timestamp() { + return timestamp; + } + + public ByteBuffer keyBuffer() { + return keyBuffer; + } + + public ByteBuffer valueBuffer() { + return valueBuffer; + } + + public Headers headers() { + return headers; + } } diff --git a/transaction-coordinator/src/main/java/org/apache/kafka/coordinator/transaction/TransactionLogConfig.java b/clients/src/main/java/org/apache/kafka/common/errors/TransactionAbortableException.java similarity index 64% rename from transaction-coordinator/src/main/java/org/apache/kafka/coordinator/transaction/TransactionLogConfig.java rename to clients/src/main/java/org/apache/kafka/common/errors/TransactionAbortableException.java index 00b7c9ef03..aa592d552b 100644 --- a/transaction-coordinator/src/main/java/org/apache/kafka/coordinator/transaction/TransactionLogConfig.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/TransactionAbortableException.java @@ -14,13 +14,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.kafka.coordinator.transaction; +package org.apache.kafka.common.errors; -public class TransactionLogConfig { - // Log-level config default values - public static final int DEFAULT_NUM_PARTITIONS = 50; - public static final int DEFAULT_SEGMENT_BYTES = 100 * 1024 * 1024; - public static final short DEFAULT_REPLICATION_FACTOR = 3; - public static final int DEFAULT_MIN_IN_SYNC_REPLICAS = 2; - public static final int DEFAULT_LOAD_BUFFER_SIZE = 5 * 1024 * 1024; +public class TransactionAbortableException extends ApiException { + public TransactionAbortableException(String message) { + super(message); + } } diff --git a/clients/src/main/java/org/apache/kafka/common/feature/BaseVersionRange.java b/clients/src/main/java/org/apache/kafka/common/feature/BaseVersionRange.java index 2d6ce702e2..8c31e64cc5 100644 --- a/clients/src/main/java/org/apache/kafka/common/feature/BaseVersionRange.java +++ b/clients/src/main/java/org/apache/kafka/common/feature/BaseVersionRange.java @@ -26,7 +26,7 @@ /** * Represents an immutable basic version range using 2 attributes: min and max, each of type short. * The min and max attributes need to satisfy 2 rules: - * - they are each expected to be >= 1, as we only consider positive version values to be valid. + * - they are each expected to be >= 0, as we only consider non-negative version values to be valid. * - max should be >= min. * * The class also provides API to convert the version range to a map. @@ -48,7 +48,7 @@ class BaseVersionRange { /** * Raises an exception unless the following condition is met: - * minValue >= 1 and maxValue >= 1 and maxValue >= minValue. + * minValue >= 0 and maxValue >= 0 and maxValue >= minValue. * * @param minKeyLabel Label for the min version key, that's used only to convert to/from a map. * @param minValue The minimum version value. @@ -56,14 +56,14 @@ class BaseVersionRange { * @param maxValue The maximum version value. * * @throws IllegalArgumentException If any of the following conditions are true: - * - (minValue < 1) OR (maxValue < 1) OR (maxValue < minValue). + * - (minValue < 0) OR (maxValue < 0) OR (maxValue < minValue). * - minKeyLabel is empty, OR, minKeyLabel is empty. */ protected BaseVersionRange(String minKeyLabel, short minValue, String maxKeyLabel, short maxValue) { - if (minValue < 1 || maxValue < 1 || maxValue < minValue) { + if (minValue < 0 || maxValue < 0 || maxValue < minValue) { throw new IllegalArgumentException( String.format( - "Expected minValue >= 1, maxValue >= 1 and maxValue >= minValue, but received" + + "Expected minValue >= 0, maxValue >= 0 and maxValue >= minValue, but received" + " minValue: %d, maxValue: %d", minValue, maxValue)); } if (minKeyLabel.isEmpty()) { @@ -86,6 +86,7 @@ public short max() { return maxValue; } + @Override public String toString() { return String.format( "%s[%s]", diff --git a/clients/src/main/java/org/apache/kafka/common/feature/SupportedVersionRange.java b/clients/src/main/java/org/apache/kafka/common/feature/SupportedVersionRange.java index a864a91762..b062635490 100644 --- a/clients/src/main/java/org/apache/kafka/common/feature/SupportedVersionRange.java +++ b/clients/src/main/java/org/apache/kafka/common/feature/SupportedVersionRange.java @@ -33,7 +33,7 @@ public SupportedVersionRange(short minVersion, short maxVersion) { } public SupportedVersionRange(short maxVersion) { - this((short) 1, maxVersion); + this((short) 0, maxVersion); } public static SupportedVersionRange fromMap(Map versionRangeMap) { diff --git a/clients/src/main/java/org/apache/kafka/common/header/Headers.java b/clients/src/main/java/org/apache/kafka/common/header/Headers.java index 2353249ceb..b736cbcabc 100644 --- a/clients/src/main/java/org/apache/kafka/common/header/Headers.java +++ b/clients/src/main/java/org/apache/kafka/common/header/Headers.java @@ -50,7 +50,7 @@ public interface Headers extends Iterable
    { * Returns just one (the very last) header for the given key, if present. * * @param key to get the last header for. - * @return this last header matching the given key, returns none if not present. + * @return this last header matching the given key, returns null if not present. */ Header lastHeader(String key); diff --git a/clients/src/main/java/org/apache/kafka/common/memory/GarbageCollectedMemoryPool.java b/clients/src/main/java/org/apache/kafka/common/memory/GarbageCollectedMemoryPool.java index 18f8ffe91c..df3b1c155d 100644 --- a/clients/src/main/java/org/apache/kafka/common/memory/GarbageCollectedMemoryPool.java +++ b/clients/src/main/java/org/apache/kafka/common/memory/GarbageCollectedMemoryPool.java @@ -37,13 +37,12 @@ public class GarbageCollectedMemoryPool extends SimpleMemoryPool implements Auto //serves 2 purposes - 1st it maintains the ref objects reachable (which is a requirement for them //to ever be enqueued), 2nd keeps some (small) metadata for every buffer allocated private final Map buffersInFlight = new ConcurrentHashMap<>(); - private final GarbageCollectionListener gcListener = new GarbageCollectionListener(); private final Thread gcListenerThread; private volatile boolean alive = true; public GarbageCollectedMemoryPool(long sizeBytes, int maxSingleAllocationSize, boolean strict, Sensor oomPeriodSensor) { super(sizeBytes, maxSingleAllocationSize, strict, oomPeriodSensor); - this.alive = true; + GarbageCollectionListener gcListener = new GarbageCollectionListener(); this.gcListenerThread = new Thread(gcListener, "memory pool GC listener"); this.gcListenerThread.setDaemon(true); //so we dont need to worry about shutdown this.gcListenerThread.start(); diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/JmxReporter.java b/clients/src/main/java/org/apache/kafka/common/metrics/JmxReporter.java index 3867091db6..f8d5ad185f 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/JmxReporter.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/JmxReporter.java @@ -194,7 +194,7 @@ static String getMBeanName(String prefix, MetricName metricName) { mBeanName.append(":type="); mBeanName.append(metricName.group()); for (Map.Entry entry : metricName.tags().entrySet()) { - if (entry.getKey().length() <= 0 || entry.getValue().length() <= 0) + if (entry.getKey().isEmpty() || entry.getValue().isEmpty()) continue; mBeanName.append(","); mBeanName.append(entry.getKey()); diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/KafkaMetric.java b/clients/src/main/java/org/apache/kafka/common/metrics/KafkaMetric.java index c8e53ffc6c..1d31855db5 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/KafkaMetric.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/KafkaMetric.java @@ -75,7 +75,7 @@ public MetricName metricName() { public Object metricValue() { long now = time.milliseconds(); synchronized (this.lock) { - if (this.metricValueProvider instanceof Measurable) + if (isMeasurable()) return ((Measurable) metricValueProvider).measure(config, now); else if (this.metricValueProvider instanceof Gauge) return ((Gauge) metricValueProvider).value(config, now); @@ -84,13 +84,22 @@ else if (this.metricValueProvider instanceof Gauge) } } + /** + * The method determines if the metric value provider is of type Measurable. + * + * @return true if the metric value provider is of type Measurable, false otherwise. + */ + public boolean isMeasurable() { + return this.metricValueProvider instanceof Measurable; + } + /** * Get the underlying metric provider, which should be a {@link Measurable} * @return Return the metric provider * @throws IllegalStateException if the underlying metric is not a {@link Measurable}. */ public Measurable measurable() { - if (this.metricValueProvider instanceof Measurable) + if (isMeasurable()) return (Measurable) metricValueProvider; else throw new IllegalStateException("Not a measurable: " + this.metricValueProvider.getClass()); @@ -103,7 +112,7 @@ public Measurable measurable() { */ double measurableValue(long timeMs) { synchronized (this.lock) { - if (this.metricValueProvider instanceof Measurable) + if (isMeasurable()) return ((Measurable) metricValueProvider).measure(config, timeMs); else return 0; diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/MetricConfig.java b/clients/src/main/java/org/apache/kafka/common/metrics/MetricConfig.java index 7367e966c0..a77cc9309b 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/MetricConfig.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/MetricConfig.java @@ -25,6 +25,8 @@ */ public class MetricConfig { + public static final int DEFAULT_NUM_SAMPLES = 2; + private Quota quota; private int samples; private long eventWindow; @@ -34,7 +36,7 @@ public class MetricConfig { public MetricConfig() { this.quota = null; - this.samples = 2; + this.samples = DEFAULT_NUM_SAMPLES; this.eventWindow = Long.MAX_VALUE; this.timeWindowMs = TimeUnit.MILLISECONDS.convert(30, TimeUnit.SECONDS); this.tags = new LinkedHashMap<>(); diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/stats/Frequencies.java b/clients/src/main/java/org/apache/kafka/common/metrics/stats/Frequencies.java index 36daea6ca7..b802d3190a 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/stats/Frequencies.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/stats/Frequencies.java @@ -18,7 +18,6 @@ import org.apache.kafka.common.MetricName; import org.apache.kafka.common.metrics.CompoundStat; -import org.apache.kafka.common.metrics.Measurable; import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.metrics.stats.Histogram.BinScheme; import org.apache.kafka.common.metrics.stats.Histogram.ConstantBinScheme; @@ -35,7 +34,7 @@ * one metric to capture the percentage of operations that failed another to capture the percentage of operations * that succeeded. *

    - * This can be accomplish by created a {@link org.apache.kafka.common.metrics.Sensor Sensor} to record the values, + * This can be accomplished by creating a {@link org.apache.kafka.common.metrics.Sensor Sensor} to record the values, * with 0.0 for false and 1.0 for true. Then, create a single {@link Frequencies} object that has two * {@link Frequency} metrics: one centered around 0.0 and another centered around 1.0. The {@link Frequencies} * object is a {@link CompoundStat}, and so it can be {@link org.apache.kafka.common.metrics.Sensor#add(CompoundStat) @@ -111,11 +110,7 @@ public List stats() { List ms = new ArrayList<>(frequencies.length); for (Frequency frequency : frequencies) { final double center = frequency.centerValue(); - ms.add(new NamedMeasurable(frequency.name(), new Measurable() { - public double measure(MetricConfig config, long now) { - return frequency(config, now, center); - } - })); + ms.add(new NamedMeasurable(frequency.name(), (config, now) -> frequency(config, now, center))); } return ms; } diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/stats/Rate.java b/clients/src/main/java/org/apache/kafka/common/metrics/stats/Rate.java index 09b7c05c8f..4f15bb9607 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/stats/Rate.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/stats/Rate.java @@ -74,15 +74,18 @@ public long windowSize(MetricConfig config, long now) { /* * Here we check the total amount of time elapsed since the oldest non-obsolete window. * This give the total windowSize of the batch which is the time used for Rate computation. - * However, there is an issue if we do not have sufficient data for e.g. if only 1 second has elapsed in a 30 second + * However, there is an issue if we do not have sufficient data for e.g. if only 1 second has elapsed in a 30-second * window, the measured rate will be very high. - * Hence we assume that the elapsed time is always N-1 complete windows plus whatever fraction of the final window is complete. + * Hence, we assume that the elapsed time is always N-1 complete windows plus whatever fraction of the final window is complete. * * Note that we could simply count the amount of time elapsed in the current window and add n-1 windows to get the total time, * but this approach does not account for sleeps. SampledStat only creates samples whenever record is called, * if no record is called for a period of time that time is not accounted for in windowSize and produces incorrect results. + * + * Note also, that totalElapsedTimeMs can be larger than the monitored window size, + * if the oldest sample started before the window while overlapping it. */ - long totalElapsedTimeMs = now - stat.oldest(now).lastWindowMs; + long totalElapsedTimeMs = now - stat.oldest(now).startTimeMs; // Check how many full windows of data we have currently retained int numFullWindows = (int) (totalElapsedTimeMs / config.timeWindowMs()); int minFullWindows = config.samples() - 1; diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/stats/SampledStat.java b/clients/src/main/java/org/apache/kafka/common/metrics/stats/SampledStat.java index 66843e4e0e..f76fccc853 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/stats/SampledStat.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/stats/SampledStat.java @@ -40,7 +40,8 @@ public abstract class SampledStat implements MeasurableStat { public SampledStat(double initialValue) { this.initialValue = initialValue; - this.samples = new ArrayList<>(2); + // keep one extra placeholder for "overlapping sample" (see purgeObsoleteSamples() logic) + this.samples = new ArrayList<>(MetricConfig.DEFAULT_NUM_SAMPLES + 1); } @Override @@ -50,10 +51,13 @@ public void record(MetricConfig config, double value, long timeMs) { sample = advance(config, timeMs); update(sample, config, value, timeMs); sample.eventCount += 1; + sample.lastEventMs = timeMs; } private Sample advance(MetricConfig config, long timeMs) { - this.current = (this.current + 1) % config.samples(); + // keep one extra placeholder for "overlapping sample" (see purgeObsoleteSamples() logic) + int maxSamples = config.samples() + 1; + this.current = (this.current + 1) % maxSamples; if (this.current >= samples.size()) { Sample sample = newSample(timeMs); this.samples.add(sample); @@ -76,18 +80,18 @@ public double measure(MetricConfig config, long now) { } public Sample current(long timeMs) { - if (samples.size() == 0) + if (samples.isEmpty()) this.samples.add(newSample(timeMs)); return this.samples.get(this.current); } public Sample oldest(long now) { - if (samples.size() == 0) + if (samples.isEmpty()) this.samples.add(newSample(now)); Sample oldest = this.samples.get(0); for (int i = 1; i < this.samples.size(); i++) { Sample curr = this.samples.get(i); - if (curr.lastWindowMs < oldest.lastWindowMs) + if (curr.startTimeMs < oldest.startTimeMs) oldest = curr; } return oldest; @@ -106,36 +110,42 @@ public String toString() { public abstract double combine(List samples, MetricConfig config, long now); - /* Timeout any windows that have expired in the absence of any events */ + // purge any samples that lack observed events within the monitored window protected void purgeObsoleteSamples(MetricConfig config, long now) { long expireAge = config.samples() * config.timeWindowMs(); for (Sample sample : samples) { - if (now - sample.lastWindowMs >= expireAge) + // samples overlapping the monitored window are kept, + // even if they started before it + if (now - sample.lastEventMs >= expireAge) { sample.reset(now); + } } } protected static class Sample { public double initialValue; public long eventCount; - public long lastWindowMs; + public long startTimeMs; + public long lastEventMs; public double value; public Sample(double initialValue, long now) { this.initialValue = initialValue; this.eventCount = 0; - this.lastWindowMs = now; + this.startTimeMs = now; + this.lastEventMs = now; this.value = initialValue; } public void reset(long now) { this.eventCount = 0; - this.lastWindowMs = now; + this.startTimeMs = now; + this.lastEventMs = now; this.value = initialValue; } public boolean isComplete(long timeMs, MetricConfig config) { - return timeMs - lastWindowMs >= config.timeWindowMs() || eventCount >= config.eventWindow(); + return timeMs - startTimeMs >= config.timeWindowMs() || eventCount >= config.eventWindow(); } @Override @@ -143,7 +153,8 @@ public String toString() { return "Sample(" + "value=" + value + ", eventCount=" + eventCount + - ", lastWindowMs=" + lastWindowMs + + ", startTimeMs=" + startTimeMs + + ", lastEventMs=" + lastEventMs + ", initialValue=" + initialValue + ')'; } diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/stats/SimpleRate.java b/clients/src/main/java/org/apache/kafka/common/metrics/stats/SimpleRate.java index 931bd9c35e..a632f0254d 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/stats/SimpleRate.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/stats/SimpleRate.java @@ -33,7 +33,7 @@ public class SimpleRate extends Rate { @Override public long windowSize(MetricConfig config, long now) { stat.purgeObsoleteSamples(config, now); - long elapsed = now - stat.oldest(now).lastWindowMs; + long elapsed = now - stat.oldest(now).startTimeMs; return Math.max(elapsed, config.timeWindowMs()); } } diff --git a/clients/src/main/java/org/apache/kafka/common/network/ChannelMetadataRegistry.java b/clients/src/main/java/org/apache/kafka/common/network/ChannelMetadataRegistry.java index a3453d881c..afaf120944 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/ChannelMetadataRegistry.java +++ b/clients/src/main/java/org/apache/kafka/common/network/ChannelMetadataRegistry.java @@ -36,7 +36,7 @@ public interface ChannelMetadataRegistry extends Closeable { CipherInformation cipherInformation(); /** - * Register information about the client client we are using. + * Register information about the client we are using. * Depending on the clients, the ApiVersionsRequest could be received * multiple times or not at all. Re-registering the information will * overwrite the previous one. diff --git a/clients/src/main/java/org/apache/kafka/common/network/InvalidReceiveException.java b/clients/src/main/java/org/apache/kafka/common/network/InvalidReceiveException.java index a56353a420..3a9913f96f 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/InvalidReceiveException.java +++ b/clients/src/main/java/org/apache/kafka/common/network/InvalidReceiveException.java @@ -24,7 +24,4 @@ public InvalidReceiveException(String message) { super(message); } - public InvalidReceiveException(String message, Throwable cause) { - super(message, cause); - } } diff --git a/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java b/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java index 5f35a7b5e0..ef30ca2bbc 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java +++ b/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java @@ -353,13 +353,13 @@ public boolean isMuted() { } public boolean isInMutableState() { - //some requests do not require memory, so if we do not know what the current (or future) request is - //(receive == null) we dont mute. we also dont mute if whatever memory required has already been - //successfully allocated (if none is required for the currently-being-read request - //receive.memoryAllocated() is expected to return true) + // Some requests do not require memory, so if we do not know what the current (or future) request is + // (receive == null) we don't mute. We also don't mute if whatever memory required has already been + // successfully allocated (if none is required for the currently-being-read request + // receive.memoryAllocated() is expected to return true) if (receive == null || receive.memoryAllocated()) return false; - //also cannot mute if underlying transport is not in the ready state + // also cannot mute if underlying transport is not in the ready state return transportLayer.ready(); } diff --git a/clients/src/main/java/org/apache/kafka/common/network/NetworkReceive.java b/clients/src/main/java/org/apache/kafka/common/network/NetworkReceive.java index b1c71abd69..f6d08cf154 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/NetworkReceive.java +++ b/clients/src/main/java/org/apache/kafka/common/network/NetworkReceive.java @@ -92,13 +92,13 @@ public long readFrom(ScatteringByteChannel channel) throws IOException { throw new InvalidReceiveException("Invalid receive (size = " + receiveSize + ")"); if (maxSize != UNLIMITED && receiveSize > maxSize) throw new InvalidReceiveException("Invalid receive (size = " + receiveSize + " larger than " + maxSize + ")"); - requestedBufferSize = receiveSize; //may be 0 for some payloads (SASL) + requestedBufferSize = receiveSize; // may be 0 for some payloads (SASL) if (receiveSize == 0) { buffer = EMPTY_BUFFER; } } } - if (buffer == null && requestedBufferSize != -1) { //we know the size we want but havent been able to allocate it yet + if (buffer == null && requestedBufferSize != -1) { // we know the size we want but haven't been able to allocate it yet buffer = memoryPool.tryAllocate(requestedBufferSize); if (buffer == null) log.trace("Broker low on memory - could not allocate buffer of size {} for source {}", requestedBufferSize, source); diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlaintextTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/PlaintextTransportLayer.java index 845b1474f4..5af796af18 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/PlaintextTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/PlaintextTransportLayer.java @@ -35,7 +35,7 @@ public class PlaintextTransportLayer implements TransportLayer { private final SocketChannel socketChannel; private final Principal principal = KafkaPrincipal.ANONYMOUS; - public PlaintextTransportLayer(SelectionKey key) throws IOException { + public PlaintextTransportLayer(SelectionKey key) { this.key = key; this.socketChannel = (SocketChannel) key.channel(); } diff --git a/clients/src/main/java/org/apache/kafka/common/network/SaslChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/SaslChannelBuilder.java index 6f1db2f9cc..c5f2968065 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SaslChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SaslChannelBuilder.java @@ -320,8 +320,7 @@ private void createServerCallbackHandlers(Map configs) { AuthenticateCallbackHandler callbackHandler; String prefix = ListenerName.saslMechanismPrefix(mechanism); @SuppressWarnings("unchecked") - Class clazz = - (Class) configs.get(prefix + BrokerSecurityConfigs.SASL_SERVER_CALLBACK_HANDLER_CLASS); + Class clazz = (Class) configs.get(SaslConfigs.SASL_CLIENT_CALLBACK_HANDLER_CLASS); // AutoMQ inject start if (clazz != null) { if (Utils.hasConstructor(clazz, SaslChannelBuilder.class)) { @@ -345,9 +344,9 @@ else if (mechanism.equals(OAuthBearerLoginModule.OAUTHBEARER_MECHANISM)) private void createConnectionsMaxReauthMsMap(Map configs) { for (String mechanism : jaasContexts.keySet()) { String prefix = ListenerName.saslMechanismPrefix(mechanism); - Long connectionsMaxReauthMs = (Long) configs.get(prefix + BrokerSecurityConfigs.CONNECTIONS_MAX_REAUTH_MS); + Long connectionsMaxReauthMs = (Long) configs.get(prefix + BrokerSecurityConfigs.CONNECTIONS_MAX_REAUTH_MS_CONFIG); if (connectionsMaxReauthMs == null) - connectionsMaxReauthMs = (Long) configs.get(BrokerSecurityConfigs.CONNECTIONS_MAX_REAUTH_MS); + connectionsMaxReauthMs = (Long) configs.get(BrokerSecurityConfigs.CONNECTIONS_MAX_REAUTH_MS_CONFIG); if (connectionsMaxReauthMs != null) connectionsMaxReauthMsByMechanism.put(mechanism, connectionsMaxReauthMs); } diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selectable.java b/clients/src/main/java/org/apache/kafka/common/network/Selectable.java index 425464f7e2..1bcd39e5ea 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selectable.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selectable.java @@ -82,7 +82,7 @@ public interface Selectable { * The collection of receives that completed on the last {@link #poll(long) poll()} call. * * Note that the caller of this method assumes responsibility to close the NetworkReceive resources which may be - * backed by a {@link MemoryPool}. In such scenarios (when NetworkReceive uses a {@link MemoryPool}, it is necessary + * backed by a {@link MemoryPool}. In such scenarios (when NetworkReceive uses a {@link MemoryPool}), it is necessary * to close the {@link NetworkReceive} to prevent any memory leaks. */ Collection completedReceives(); diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selector.java b/clients/src/main/java/org/apache/kafka/common/network/Selector.java index b54e2c3158..c0236fb54c 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java @@ -181,7 +181,7 @@ public Selector(int maxReceiveSize, this.memoryPool = memoryPool; this.lowMemThreshold = (long) (0.1 * this.memoryPool.size()); this.failedAuthenticationDelayMs = failedAuthenticationDelayMs; - this.delayedClosingChannels = (failedAuthenticationDelayMs > NO_FAILED_AUTHENTICATION_DELAY) ? new LinkedHashMap() : null; + this.delayedClosingChannels = (failedAuthenticationDelayMs > NO_FAILED_AUTHENTICATION_DELAY) ? new LinkedHashMap<>() : null; } public Selector(int maxReceiveSize, @@ -229,7 +229,7 @@ public Selector(long connectionMaxIdleMS, Metrics metrics, Time time, String met } public Selector(long connectionMaxIdleMS, int failedAuthenticationDelayMs, Metrics metrics, Time time, String metricGrpPrefix, ChannelBuilder channelBuilder, LogContext logContext) { - this(NetworkReceive.UNLIMITED, connectionMaxIdleMS, failedAuthenticationDelayMs, metrics, time, metricGrpPrefix, Collections.emptyMap(), true, channelBuilder, logContext); + this(NetworkReceive.UNLIMITED, connectionMaxIdleMS, failedAuthenticationDelayMs, metrics, time, metricGrpPrefix, Collections.emptyMap(), true, channelBuilder, logContext); } /** @@ -1282,14 +1282,16 @@ private Meter createMeter(Metrics metrics, String groupName, Map metricTags, String baseName, String action) { + // this name remains relevant, non-deprecated descendant method uses the same MetricName rateMetricName = metrics.metricName(baseName + "-ratio", groupName, - String.format("*Deprecated* The fraction of time the I/O thread spent %s", action), metricTags); + String.format("The fraction of time the I/O thread spent %s", action), metricTags); + // this name is deprecated MetricName totalMetricName = metrics.metricName(baseName + "time-total", groupName, String.format("*Deprecated* The total time the I/O thread spent %s", action), metricTags); return new Meter(TimeUnit.NANOSECONDS, rateMetricName, totalMetricName); diff --git a/clients/src/main/java/org/apache/kafka/common/network/SslChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/SslChannelBuilder.java index 7d55e4c877..f5002289f1 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SslChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SslChannelBuilder.java @@ -28,7 +28,6 @@ import org.apache.kafka.common.security.ssl.SslPrincipalMapper; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Utils; -import org.slf4j.Logger; import java.io.Closeable; import java.io.IOException; @@ -44,7 +43,6 @@ public class SslChannelBuilder implements ChannelBuilder, ListenerReconfigurable private final ListenerName listenerName; private final boolean isInterBrokerListener; private final Mode mode; - private final Logger log; private SslFactory sslFactory; private Map configs; private SslPrincipalMapper sslPrincipalMapper; @@ -60,7 +58,6 @@ public SslChannelBuilder(Mode mode, this.mode = mode; this.listenerName = listenerName; this.isInterBrokerListener = isInterBrokerListener; - this.log = logContext.logger(getClass()); } public void configure(Map configs) throws KafkaException { @@ -171,7 +168,7 @@ public Optional principalSerde() { } @Override - public void close() throws IOException { + public void close() { if (principalBuilder instanceof Closeable) Utils.closeQuietly((Closeable) principalBuilder, "principal builder"); } diff --git a/clients/src/main/java/org/apache/kafka/common/network/SslTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/SslTransportLayer.java index da80e363a9..ef7f137e36 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SslTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SslTransportLayer.java @@ -199,6 +199,14 @@ public void close() throws IOException { } catch (IOException ie) { log.debug("Failed to send SSL Close message", ie); } finally { + try { + sslEngine.closeInbound(); + } catch (SSLException e) { + // This log is for debugging purposes as an exception might occur frequently + // at this point due to peers not following the TLS specs and failing to send a close_notify alert. + // Even if they do, currently, we do not read data from the socket after invoking close(). + log.debug("SSLEngine.closeInBound() raised an exception.", e); + } socketChannel.socket().close(); socketChannel.close(); netReadBuffer = null; @@ -674,7 +682,7 @@ public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { int totalRead = 0; int i = offset; - while (i < length) { + while (i < offset + length) { if (dsts[i].hasRemaining()) { int read = read(dsts[i]); if (read > 0) @@ -747,7 +755,7 @@ public long write(ByteBuffer[] srcs, int offset, int length) throws IOException throw new IndexOutOfBoundsException(); int totalWritten = 0; int i = offset; - while (i < length) { + while (i < offset + length) { if (srcs[i].hasRemaining() || hasPendingWrites()) { int written = write(srcs[i]); if (written > 0) { @@ -878,7 +886,7 @@ protected ByteBuffer appReadBuffer() { * retries and report the failure. If `flush` is true, exceptions are propagated after * any pending outgoing bytes are flushed to ensure that the peer is notified of the failure. */ - private void handshakeFailure(SSLException sslException, boolean flush) throws IOException { + private void handshakeFailure(SSLException sslException, boolean flush) { //Release all resources such as internal buffers that SSLEngine is managing log.debug("SSL Handshake failed", sslException); sslEngine.closeOutbound(); diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java index e0ece7b2a3..4d36616ef9 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java @@ -143,7 +143,7 @@ import org.apache.kafka.common.errors.UnsupportedForMessageFormatException; import org.apache.kafka.common.errors.UnsupportedSaslMechanismException; import org.apache.kafka.common.errors.UnsupportedVersionException; -import org.apache.kafka.common.errors.AbortableTransactionException; +import org.apache.kafka.common.errors.TransactionAbortableException; // AutoMQ inject start import org.apache.kafka.common.errors.s3.CompactedObjectsNotFoundException; @@ -411,7 +411,7 @@ public enum Errors { UNKNOWN_SUBSCRIPTION_ID(117, "Client sent a push telemetry request with an invalid or outdated subscription ID.", UnknownSubscriptionIdException::new), TELEMETRY_TOO_LARGE(118, "Client sent a push telemetry request larger than the maximum size the broker will accept.", TelemetryTooLargeException::new), INVALID_REGISTRATION(119, "The controller has considered the broker registration to be invalid.", InvalidRegistrationException::new), - ABORTABLE_TRANSACTION(120, "The server encountered an error with the transaction. The client can abort the transaction to continue using this transactional ID.", AbortableTransactionException::new), + TRANSACTION_ABORTABLE(120, "The server encountered an error with the transaction. The client can abort the transaction to continue using this transactional ID.", TransactionAbortableException::new), // AutoMQ for Kafka inject start STREAM_EXIST(501, "The stream already exists.", StreamExistException::new), diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/Message.java b/clients/src/main/java/org/apache/kafka/common/protocol/Message.java index e379f01100..13961869f8 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/Message.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/Message.java @@ -42,7 +42,7 @@ public interface Message { * @param cache The serialization size cache to populate. * @param version The version to use. * - * @throws {@see org.apache.kafka.common.errors.UnsupportedVersionException} + * @throws org.apache.kafka.common.errors.UnsupportedVersionException * If the specified version is too new to be supported * by this software. */ @@ -69,7 +69,7 @@ default int size(ObjectSerializationCache cache, short version) { * previously populated the size cache using #{Message#size()}. * @param version The version to use. * - * @throws {@see org.apache.kafka.common.errors.UnsupportedVersionException} + * @throws org.apache.kafka.common.errors.UnsupportedVersionException * If the specified version is too new to be supported * by this software. */ @@ -82,7 +82,7 @@ default int size(ObjectSerializationCache cache, short version) { * @param readable The source readable. * @param version The version to use. * - * @throws {@see org.apache.kafka.common.errors.UnsupportedVersionException} + * @throws org.apache.kafka.common.errors.UnsupportedVersionException * If the specified version is too new to be supported * by this software. */ diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/types/TaggedFields.java b/clients/src/main/java/org/apache/kafka/common/protocol/types/TaggedFields.java index 39a3cb880d..6d8c83b777 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/types/TaggedFields.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/types/TaggedFields.java @@ -40,7 +40,6 @@ public class TaggedFields extends DocumentedType { * by associated Field objects. * @return The new {@link TaggedFields} */ - @SuppressWarnings("unchecked") public static TaggedFields of(Object... fields) { if (fields.length % 2 != 0) { throw new RuntimeException("TaggedFields#of takes an even " + @@ -79,7 +78,6 @@ public void write(ByteBuffer buffer, Object o) { } } - @SuppressWarnings("unchecked") @Override public NavigableMap read(ByteBuffer buffer) { int numTaggedFields = ByteUtils.readUnsignedVarint(buffer); diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/types/Type.java b/clients/src/main/java/org/apache/kafka/common/protocol/types/Type.java index bb1d006d3e..c5dd2e2c10 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/types/Type.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/types/Type.java @@ -231,7 +231,7 @@ public void write(ByteBuffer buffer, Object o) { @Override public Object read(ByteBuffer buffer) { short value = buffer.getShort(); - return Integer.valueOf(Short.toUnsignedInt(value)); + return Short.toUnsignedInt(value); } @Override @@ -1071,7 +1071,7 @@ public int sizeOf(Object o) { public String documentation() { return "Represents an integer between -231 and 231-1 inclusive. " + "Encoding follows the variable-length zig-zag encoding from " + - " Google Protocol Buffers."; + " Google Protocol Buffers."; } }; @@ -1106,7 +1106,7 @@ public int sizeOf(Object o) { public String documentation() { return "Represents an integer between -263 and 263-1 inclusive. " + "Encoding follows the variable-length zig-zag encoding from " + - " Google Protocol Buffers."; + " Google Protocol Buffers."; } }; diff --git a/clients/src/main/java/org/apache/kafka/common/record/AbstractLegacyRecordBatch.java b/clients/src/main/java/org/apache/kafka/common/record/AbstractLegacyRecordBatch.java index 499dc50289..0f29581623 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/AbstractLegacyRecordBatch.java +++ b/clients/src/main/java/org/apache/kafka/common/record/AbstractLegacyRecordBatch.java @@ -18,6 +18,7 @@ import org.apache.kafka.common.InvalidRecordException; import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.CorruptRecordException; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.utils.AbstractIterator; @@ -332,7 +333,7 @@ private DeepRecordsIterator(AbstractLegacyRecordBatch wrapperEntry, throw new InvalidRecordException("Found invalid compressed record set with null value (magic = " + wrapperMagic + ")"); - InputStream stream = compressionType.wrapForInput(wrapperValue, wrapperRecord.magic(), bufferSupplier); + InputStream stream = Compression.of(compressionType).build().wrapForInput(wrapperValue, wrapperRecord.magic(), bufferSupplier); LogInputStream logStream = new DataLogInputStream(stream, maxMessageSize); long lastOffsetFromWrapper = wrapperEntry.lastOffset(); diff --git a/clients/src/main/java/org/apache/kafka/common/record/CompressionType.java b/clients/src/main/java/org/apache/kafka/common/record/CompressionType.java index a4ebf1648e..12efafc8b5 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/CompressionType.java +++ b/clients/src/main/java/org/apache/kafka/common/record/CompressionType.java @@ -16,158 +16,23 @@ */ package org.apache.kafka.common.record; -import org.apache.kafka.common.KafkaException; -import org.apache.kafka.common.compress.KafkaLZ4BlockInputStream; -import org.apache.kafka.common.compress.KafkaLZ4BlockOutputStream; -import org.apache.kafka.common.compress.SnappyFactory; -import org.apache.kafka.common.compress.ZstdFactory; -import org.apache.kafka.common.utils.BufferSupplier; -import org.apache.kafka.common.utils.ByteBufferInputStream; -import org.apache.kafka.common.utils.ByteBufferOutputStream; -import org.apache.kafka.common.utils.ChunkedBytesStream; - -import java.io.BufferedOutputStream; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.ByteBuffer; -import java.util.zip.GZIPInputStream; -import java.util.zip.GZIPOutputStream; - /** * The compression type to use */ public enum CompressionType { - NONE((byte) 0, "none", 1.0f) { - @Override - public OutputStream wrapForOutput(ByteBufferOutputStream buffer, byte messageVersion) { - return buffer; - } - - @Override - public InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { - return new ByteBufferInputStream(buffer); - } - }, + NONE((byte) 0, "none", 1.0f), // Shipped with the JDK - GZIP((byte) 1, "gzip", 1.0f) { - @Override - public OutputStream wrapForOutput(ByteBufferOutputStream buffer, byte messageVersion) { - try { - // Set input buffer (uncompressed) to 16 KB (none by default) and output buffer (compressed) to - // 8 KB (0.5 KB by default) to ensure reasonable performance in cases where the caller passes a small - // number of bytes to write (potentially a single byte) - return new BufferedOutputStream(new GZIPOutputStream(buffer, 8 * 1024), 16 * 1024); - } catch (Exception e) { - throw new KafkaException(e); - } - } - - @Override - public InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { - try { - // Set input buffer (compressed) to 8 KB (GZIPInputStream uses 0.5 KB by default) to ensure reasonable - // performance in cases where the caller reads a small number of bytes (potentially a single byte). - // - // Size of output buffer (uncompressed) is provided by decompressionOutputSize. - // - // ChunkedBytesStream is used to wrap the GZIPInputStream because the default implementation of - // GZIPInputStream does not use an intermediate buffer for decompression in chunks. - return new ChunkedBytesStream(new GZIPInputStream(new ByteBufferInputStream(buffer), 8 * 1024), decompressionBufferSupplier, decompressionOutputSize(), false); - } catch (Exception e) { - throw new KafkaException(e); - } - } - - @Override - public int decompressionOutputSize() { - // 16KB has been chosen based on legacy implementation introduced in https://github.com/apache/kafka/pull/6785 - return 16 * 1024; - } - }, + GZIP((byte) 1, "gzip", 1.0f), // We should only load classes from a given compression library when we actually use said compression library. This // is because compression libraries include native code for a set of platforms and we want to avoid errors // in case the platform is not supported and the compression library is not actually used. // To ensure this, we only reference compression library code from classes that are only invoked when actual usage // happens. - - SNAPPY((byte) 2, "snappy", 1.0f) { - @Override - public OutputStream wrapForOutput(ByteBufferOutputStream buffer, byte messageVersion) { - return SnappyFactory.wrapForOutput(buffer); - } - - @Override - public InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { - // SnappyInputStream uses default implementation of InputStream for skip. Default implementation of - // SnappyInputStream allocates a new skip buffer every time, hence, we prefer our own implementation. - return new ChunkedBytesStream(SnappyFactory.wrapForInput(buffer), decompressionBufferSupplier, decompressionOutputSize(), false); - } - - @Override - public int decompressionOutputSize() { - // SnappyInputStream already uses an intermediate buffer internally. The size - // of this buffer is based on legacy implementation based on skipArray introduced in - // https://github.com/apache/kafka/pull/6785 - return 2 * 1024; // 2KB - } - }, - - LZ4((byte) 3, "lz4", 1.0f) { - @Override - public OutputStream wrapForOutput(ByteBufferOutputStream buffer, byte messageVersion) { - try { - return new KafkaLZ4BlockOutputStream(buffer, messageVersion == RecordBatch.MAGIC_VALUE_V0); - } catch (Throwable e) { - throw new KafkaException(e); - } - } - - @Override - public InputStream wrapForInput(ByteBuffer inputBuffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { - try { - return new ChunkedBytesStream( - new KafkaLZ4BlockInputStream(inputBuffer, decompressionBufferSupplier, messageVersion == RecordBatch.MAGIC_VALUE_V0), - decompressionBufferSupplier, decompressionOutputSize(), true); - } catch (Throwable e) { - throw new KafkaException(e); - } - } - - @Override - public int decompressionOutputSize() { - // KafkaLZ4BlockInputStream uses an internal intermediate buffer to store decompressed data. The size - // of this buffer is based on legacy implementation based on skipArray introduced in - // https://github.com/apache/kafka/pull/6785 - return 2 * 1024; // 2KB - } - }, - - ZSTD((byte) 4, "zstd", 1.0f) { - @Override - public OutputStream wrapForOutput(ByteBufferOutputStream buffer, byte messageVersion) { - return ZstdFactory.wrapForOutput(buffer); - } - - @Override - public InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { - return new ChunkedBytesStream(ZstdFactory.wrapForInput(buffer, messageVersion, decompressionBufferSupplier), decompressionBufferSupplier, decompressionOutputSize(), false); - } - - /** - * Size of intermediate buffer which contains uncompressed data. - * This size should be <= ZSTD_BLOCKSIZE_MAX - * see: https://github.com/facebook/zstd/blob/189653a9c10c9f4224a5413a6d6a69dd01d7c3bd/lib/zstd.h#L854 - */ - @Override - public int decompressionOutputSize() { - // 16KB has been chosen based on legacy implementation introduced in https://github.com/apache/kafka/pull/6785 - return 16 * 1024; - } - - - }; + SNAPPY((byte) 2, "snappy", 1.0f), + LZ4((byte) 3, "lz4", 1.0f), + ZSTD((byte) 4, "zstd", 1.0f); // compression type is represented by two bits in the attributes field of the record batch header, so `byte` is // large enough @@ -181,34 +46,6 @@ public int decompressionOutputSize() { this.rate = rate; } - /** - * Wrap bufferStream with an OutputStream that will compress data with this CompressionType. - *

    - * Note: Unlike {@link #wrapForInput}, {@link #wrapForOutput} cannot take {@link ByteBuffer}s directly. - * Currently, {@link MemoryRecordsBuilder#writeDefaultBatchHeader()} and {@link MemoryRecordsBuilder#writeLegacyCompressedWrapperHeader()} - * write to the underlying buffer in the given {@link ByteBufferOutputStream} after the compressed data has been written. - * In the event that the buffer needs to be expanded while writing the data, access to the underlying buffer needs to be preserved. - */ - public abstract OutputStream wrapForOutput(ByteBufferOutputStream bufferStream, byte messageVersion); - - /** - * Wrap buffer with an InputStream that will decompress data with this CompressionType. - * - * @param decompressionBufferSupplier The supplier of ByteBuffer(s) used for decompression if supported. - * For small record batches, allocating a potentially large buffer (64 KB for LZ4) - * will dominate the cost of decompressing and iterating over the records in the - * batch. As such, a supplier that reuses buffers will have a significant - * performance impact. - */ - public abstract InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier); - - /** - * Recommended size of buffer for storing decompressed output. - */ - public int decompressionOutputSize() { - throw new UnsupportedOperationException("Size of decompression buffer is not defined for this compression type=" + this.name); - } - public static CompressionType forId(int id) { switch (id) { case 0: diff --git a/clients/src/main/java/org/apache/kafka/common/record/ControlRecordType.java b/clients/src/main/java/org/apache/kafka/common/record/ControlRecordType.java index 39268044db..8d0bbdba9e 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/ControlRecordType.java +++ b/clients/src/main/java/org/apache/kafka/common/record/ControlRecordType.java @@ -44,11 +44,15 @@ public enum ControlRecordType { ABORT((short) 0), COMMIT((short) 1), - // Raft quorum related control messages. + // KRaft quorum related control messages LEADER_CHANGE((short) 2), SNAPSHOT_HEADER((short) 3), SNAPSHOT_FOOTER((short) 4), + // KRaft membership changes messages + KRAFT_VERSION((short) 5), + KRAFT_VOTERS((short) 6), + // UNKNOWN is used to indicate a control type which the client is not aware of and should be ignored UNKNOWN((short) -1); @@ -108,6 +112,10 @@ public static ControlRecordType fromTypeId(short typeId) { return SNAPSHOT_HEADER; case 4: return SNAPSHOT_FOOTER; + case 5: + return KRAFT_VERSION; + case 6: + return KRAFT_VOTERS; default: return UNKNOWN; diff --git a/clients/src/main/java/org/apache/kafka/common/record/ControlRecordUtils.java b/clients/src/main/java/org/apache/kafka/common/record/ControlRecordUtils.java index 3b1fd21f78..1e78448643 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/ControlRecordUtils.java +++ b/clients/src/main/java/org/apache/kafka/common/record/ControlRecordUtils.java @@ -16,9 +16,11 @@ */ package org.apache.kafka.common.record; +import org.apache.kafka.common.message.KRaftVersionRecord; import org.apache.kafka.common.message.LeaderChangeMessage; -import org.apache.kafka.common.message.SnapshotHeaderRecord; import org.apache.kafka.common.message.SnapshotFooterRecord; +import org.apache.kafka.common.message.SnapshotHeaderRecord; +import org.apache.kafka.common.message.VotersRecord; import org.apache.kafka.common.protocol.ByteBufferAccessor; import java.nio.ByteBuffer; @@ -27,49 +29,77 @@ * Utility class for easy interaction with control records. */ public class ControlRecordUtils { + public static final short KRAFT_VERSION_CURRENT_VERSION = 0; public static final short LEADER_CHANGE_CURRENT_VERSION = 0; - public static final short SNAPSHOT_HEADER_CURRENT_VERSION = 0; public static final short SNAPSHOT_FOOTER_CURRENT_VERSION = 0; + public static final short SNAPSHOT_HEADER_CURRENT_VERSION = 0; + public static final short KRAFT_VOTERS_CURRENT_VERSION = 0; public static LeaderChangeMessage deserializeLeaderChangeMessage(Record record) { ControlRecordType recordType = ControlRecordType.parse(record.key()); - if (recordType != ControlRecordType.LEADER_CHANGE) { - throw new IllegalArgumentException( - "Expected LEADER_CHANGE control record type(2), but found " + recordType.toString()); - } + validateControlRecordType(ControlRecordType.LEADER_CHANGE, recordType); + return deserializeLeaderChangeMessage(record.value()); } public static LeaderChangeMessage deserializeLeaderChangeMessage(ByteBuffer data) { - ByteBufferAccessor byteBufferAccessor = new ByteBufferAccessor(data.slice()); - return new LeaderChangeMessage(byteBufferAccessor, LEADER_CHANGE_CURRENT_VERSION); + return new LeaderChangeMessage(new ByteBufferAccessor(data.slice()), LEADER_CHANGE_CURRENT_VERSION); } public static SnapshotHeaderRecord deserializeSnapshotHeaderRecord(Record record) { ControlRecordType recordType = ControlRecordType.parse(record.key()); - if (recordType != ControlRecordType.SNAPSHOT_HEADER) { - throw new IllegalArgumentException( - "Expected SNAPSHOT_HEADER control record type(3), but found " + recordType.toString()); - } + validateControlRecordType(ControlRecordType.SNAPSHOT_HEADER, recordType); + return deserializeSnapshotHeaderRecord(record.value()); } public static SnapshotHeaderRecord deserializeSnapshotHeaderRecord(ByteBuffer data) { - ByteBufferAccessor byteBufferAccessor = new ByteBufferAccessor(data.slice()); - return new SnapshotHeaderRecord(byteBufferAccessor, SNAPSHOT_HEADER_CURRENT_VERSION); + return new SnapshotHeaderRecord(new ByteBufferAccessor(data.slice()), SNAPSHOT_HEADER_CURRENT_VERSION); } public static SnapshotFooterRecord deserializeSnapshotFooterRecord(Record record) { ControlRecordType recordType = ControlRecordType.parse(record.key()); - if (recordType != ControlRecordType.SNAPSHOT_FOOTER) { - throw new IllegalArgumentException( - "Expected SNAPSHOT_FOOTER control record type(4), but found " + recordType.toString()); - } + validateControlRecordType(ControlRecordType.SNAPSHOT_FOOTER, recordType); + return deserializeSnapshotFooterRecord(record.value()); } public static SnapshotFooterRecord deserializeSnapshotFooterRecord(ByteBuffer data) { - ByteBufferAccessor byteBufferAccessor = new ByteBufferAccessor(data.slice()); - return new SnapshotFooterRecord(byteBufferAccessor, SNAPSHOT_FOOTER_CURRENT_VERSION); + return new SnapshotFooterRecord(new ByteBufferAccessor(data.slice()), SNAPSHOT_FOOTER_CURRENT_VERSION); + } + + public static KRaftVersionRecord deserializeKRaftVersionRecord(Record record) { + ControlRecordType recordType = ControlRecordType.parse(record.key()); + validateControlRecordType(ControlRecordType.KRAFT_VERSION, recordType); + + return deserializeKRaftVersionRecord(record.value()); + } + + public static KRaftVersionRecord deserializeKRaftVersionRecord(ByteBuffer data) { + return new KRaftVersionRecord(new ByteBufferAccessor(data.slice()), KRAFT_VERSION_CURRENT_VERSION); + } + + public static VotersRecord deserializeVotersRecord(Record record) { + ControlRecordType recordType = ControlRecordType.parse(record.key()); + validateControlRecordType(ControlRecordType.KRAFT_VOTERS, recordType); + + return deserializeVotersRecord(record.value()); + } + + public static VotersRecord deserializeVotersRecord(ByteBuffer data) { + return new VotersRecord(new ByteBufferAccessor(data.slice()), KRAFT_VOTERS_CURRENT_VERSION); + } + + private static void validateControlRecordType(ControlRecordType expected, ControlRecordType actual) { + if (actual != expected) { + throw new IllegalArgumentException( + String.format( + "Expected %s control record type(%d), but found %s", + expected, + expected.type(), + actual + ) + ); + } } } diff --git a/clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java b/clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java index 7b8dce6bc4..7d8ee75e19 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java +++ b/clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java @@ -18,6 +18,7 @@ import org.apache.kafka.common.InvalidRecordException; import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.CorruptRecordException; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.utils.BufferSupplier; @@ -276,7 +277,7 @@ public int partitionLeaderEpoch() { public InputStream recordInputStream(BufferSupplier bufferSupplier) { final ByteBuffer buffer = this.buffer.duplicate(); buffer.position(RECORDS_OFFSET); - return compressionType().wrapForInput(buffer, magic(), bufferSupplier); + return Compression.of(compressionType()).build().wrapForInput(buffer, magic(), bufferSupplier); } private CloseableIterator compressedIterator(BufferSupplier bufferSupplier, boolean skipKeyValue) { diff --git a/clients/src/main/java/org/apache/kafka/common/record/FileRecords.java b/clients/src/main/java/org/apache/kafka/common/record/FileRecords.java index 3a53dea697..215324d676 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/FileRecords.java +++ b/clients/src/main/java/org/apache/kafka/common/record/FileRecords.java @@ -355,18 +355,18 @@ public TimestampAndOffset searchForTimestamp(long targetTimestamp, int startingP */ public TimestampAndOffset largestTimestampAfter(int startingPosition) { long maxTimestamp = RecordBatch.NO_TIMESTAMP; - long offsetOfMaxTimestamp = -1L; + long shallowOffsetOfMaxTimestamp = -1L; int leaderEpochOfMaxTimestamp = RecordBatch.NO_PARTITION_LEADER_EPOCH; for (RecordBatch batch : batchesFrom(startingPosition)) { long timestamp = batch.maxTimestamp(); if (timestamp > maxTimestamp) { maxTimestamp = timestamp; - offsetOfMaxTimestamp = batch.lastOffset(); + shallowOffsetOfMaxTimestamp = batch.lastOffset(); leaderEpochOfMaxTimestamp = batch.partitionLeaderEpoch(); } } - return new TimestampAndOffset(maxTimestamp, offsetOfMaxTimestamp, + return new TimestampAndOffset(maxTimestamp, shallowOffsetOfMaxTimestamp, maybeLeaderEpoch(leaderEpochOfMaxTimestamp)); } diff --git a/clients/src/main/java/org/apache/kafka/common/record/LazyDownConversionRecords.java b/clients/src/main/java/org/apache/kafka/common/record/LazyDownConversionRecords.java index db9a09d812..7331efbd7c 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/LazyDownConversionRecords.java +++ b/clients/src/main/java/org/apache/kafka/common/record/LazyDownConversionRecords.java @@ -33,7 +33,7 @@ public class LazyDownConversionRecords implements BaseRecords, PooledResource { private final Records records; private final byte toMagic; private final long firstOffset; - private ConvertedRecords firstConvertedBatch; + private ConvertedRecords firstConvertedBatch; private final int sizeInBytes; private final Time time; @@ -119,7 +119,7 @@ public String toString() { public final java.util.Iterator> iterator(long maximumReadSize) { // We typically expect only one iterator instance to be created, so null out the first converted batch after // first use to make it available for GC. - ConvertedRecords firstBatch = firstConvertedBatch; + ConvertedRecords firstBatch = firstConvertedBatch; firstConvertedBatch = null; return new Iterator(records, maximumReadSize, firstBatch); } @@ -139,7 +139,7 @@ public void release() { private class Iterator extends AbstractIterator> { private final AbstractIterator batchIterator; private final long maximumReadSize; - private ConvertedRecords firstConvertedBatch; + private ConvertedRecords firstConvertedBatch; /** * @param recordsToDownConvert Records that require down-conversion @@ -161,10 +161,10 @@ private Iterator(Records recordsToDownConvert, long maximumReadSize, ConvertedRe * @return Down-converted records */ @Override - protected ConvertedRecords makeNext() { + protected ConvertedRecords makeNext() { // If we have cached the first down-converted batch, return that now if (firstConvertedBatch != null) { - ConvertedRecords convertedBatch = firstConvertedBatch; + ConvertedRecords convertedBatch = firstConvertedBatch; firstConvertedBatch = null; return convertedBatch; } @@ -183,7 +183,7 @@ protected ConvertedRecords makeNext() { isFirstBatch = false; } - ConvertedRecords convertedRecords = RecordsUtil.downConvert(batches, toMagic, firstOffset, time); + ConvertedRecords convertedRecords = RecordsUtil.downConvert(batches, toMagic, firstOffset, time); // During conversion, it is possible that we drop certain batches because they do not have an equivalent // representation in the message format we want to convert to. For example, V0 and V1 message formats // have no notion of transaction markers which were introduced in V2 so they get dropped during conversion. diff --git a/clients/src/main/java/org/apache/kafka/common/record/LazyDownConversionRecordsSend.java b/clients/src/main/java/org/apache/kafka/common/record/LazyDownConversionRecordsSend.java index 0f1ed1baff..1bced60557 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/LazyDownConversionRecordsSend.java +++ b/clients/src/main/java/org/apache/kafka/common/record/LazyDownConversionRecordsSend.java @@ -39,7 +39,7 @@ public final class LazyDownConversionRecordsSend extends RecordsSend> convertedRecordsIterator; - private RecordsSend convertedRecordsWriter; + private RecordsSend convertedRecordsWriter; public LazyDownConversionRecordsSend(LazyDownConversionRecords records) { super(records, records.sizeInBytes()); diff --git a/clients/src/main/java/org/apache/kafka/common/record/LogInputStream.java b/clients/src/main/java/org/apache/kafka/common/record/LogInputStream.java index 9a91432a7e..e75944565b 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/LogInputStream.java +++ b/clients/src/main/java/org/apache/kafka/common/record/LogInputStream.java @@ -23,7 +23,7 @@ * the batches at one level. For magic values 0 and 1, this means that it can either handle iteration * at the top level of the log or deep iteration within the payload of a single message, but it does not attempt * to handle both. For magic value 2, this is only used for iterating over the top-level record batches (inner - * records do not follow the {@link RecordBatch} interface. + * records do not follow the {@link RecordBatch} interface). * * The generic typing allows for implementations which present only a view of the log entries, which enables more * efficient iteration when the record data is not actually needed. See for example diff --git a/clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java b/clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java index 3ba60b09b3..92cff6b271 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java +++ b/clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java @@ -17,10 +17,13 @@ package org.apache.kafka.common.record; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.CorruptRecordException; +import org.apache.kafka.common.message.KRaftVersionRecord; import org.apache.kafka.common.message.LeaderChangeMessage; -import org.apache.kafka.common.message.SnapshotHeaderRecord; import org.apache.kafka.common.message.SnapshotFooterRecord; +import org.apache.kafka.common.message.SnapshotHeaderRecord; +import org.apache.kafka.common.message.VotersRecord; import org.apache.kafka.common.network.TransferableChannel; import org.apache.kafka.common.record.MemoryRecords.RecordFilter.BatchRetention; import org.apache.kafka.common.record.MemoryRecords.RecordFilter.BatchRetentionResult; @@ -45,7 +48,7 @@ /** * A {@link Records} implementation backed by a ByteBuffer. This is used only for reading or * modifying in-place an existing buffer of record batches. To create a new buffer see {@link MemoryRecordsBuilder}, - * or one of the {@link #builder(ByteBuffer, byte, CompressionType, TimestampType, long)} variants. + * or one of the {@link #builder(ByteBuffer, byte, Compression, TimestampType, long)} variants. */ public class MemoryRecords extends AbstractRecords { private static final Logger log = LoggerFactory.getLogger(MemoryRecords.class); @@ -154,7 +157,7 @@ public FilterResult filterTo(TopicPartition partition, RecordFilter filter, Byte /** * Note: This method is also used to convert the first timestamp of the batch (which is usually the timestamp of the first record) - * to the delete horizon of the tombstones or txn markers which are present in the batch. + * to the delete horizon of the tombstones or txn markers which are present in the batch. */ private static FilterResult filterTo(TopicPartition partition, Iterable batches, RecordFilter filter, ByteBuffer destinationBuffer, int maxRecordBatchSize, @@ -209,7 +212,7 @@ private static FilterResult filterTo(TopicPartition partition, Iterable this.maxTimestamp) { this.maxTimestamp = maxTimestamp; - this.offsetOfMaxTimestamp = offsetOfMaxTimestamp; + this.shallowOffsetOfMaxTimestamp = shallowOffsetOfMaxTimestamp; } this.maxOffset = Math.max(maxOffset, this.maxOffset); this.messagesRetained += messagesRetained; @@ -458,8 +462,8 @@ public long maxTimestamp() { return maxTimestamp; } - public long offsetOfMaxTimestamp() { - return offsetOfMaxTimestamp; + public long shallowOffsetOfMaxTimestamp() { + return shallowOffsetOfMaxTimestamp; } } @@ -468,14 +472,14 @@ public static MemoryRecords readableRecords(ByteBuffer buffer) { } public static MemoryRecordsBuilder builder(ByteBuffer buffer, - CompressionType compressionType, + Compression compression, TimestampType timestampType, long baseOffset) { - return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, timestampType, baseOffset); + return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compression, timestampType, baseOffset); } public static MemoryRecordsBuilder builder(ByteBuffer buffer, - CompressionType compressionType, + Compression compression, TimestampType timestampType, long baseOffset, int maxSize) { @@ -483,84 +487,84 @@ public static MemoryRecordsBuilder builder(ByteBuffer buffer, if (timestampType == TimestampType.LOG_APPEND_TIME) logAppendTime = System.currentTimeMillis(); - return new MemoryRecordsBuilder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, timestampType, baseOffset, + return new MemoryRecordsBuilder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compression, timestampType, baseOffset, logAppendTime, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, false, RecordBatch.NO_PARTITION_LEADER_EPOCH, maxSize); } public static MemoryRecordsBuilder idempotentBuilder(ByteBuffer buffer, - CompressionType compressionType, + Compression compression, long baseOffset, long producerId, short producerEpoch, int baseSequence) { - return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, TimestampType.CREATE_TIME, + return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compression, TimestampType.CREATE_TIME, baseOffset, System.currentTimeMillis(), producerId, producerEpoch, baseSequence); } public static MemoryRecordsBuilder builder(ByteBuffer buffer, byte magic, - CompressionType compressionType, + Compression compression, TimestampType timestampType, long baseOffset, long logAppendTime) { - return builder(buffer, magic, compressionType, timestampType, baseOffset, logAppendTime, + return builder(buffer, magic, compression, timestampType, baseOffset, logAppendTime, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, RecordBatch.NO_PARTITION_LEADER_EPOCH); } public static MemoryRecordsBuilder builder(ByteBuffer buffer, byte magic, - CompressionType compressionType, + Compression compression, TimestampType timestampType, long baseOffset) { long logAppendTime = RecordBatch.NO_TIMESTAMP; if (timestampType == TimestampType.LOG_APPEND_TIME) logAppendTime = System.currentTimeMillis(); - return builder(buffer, magic, compressionType, timestampType, baseOffset, logAppendTime, + return builder(buffer, magic, compression, timestampType, baseOffset, logAppendTime, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, RecordBatch.NO_PARTITION_LEADER_EPOCH); } public static MemoryRecordsBuilder builder(ByteBuffer buffer, byte magic, - CompressionType compressionType, + Compression compression, TimestampType timestampType, long baseOffset, long logAppendTime, int partitionLeaderEpoch) { - return builder(buffer, magic, compressionType, timestampType, baseOffset, logAppendTime, + return builder(buffer, magic, compression, timestampType, baseOffset, logAppendTime, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, partitionLeaderEpoch); } public static MemoryRecordsBuilder builder(ByteBuffer buffer, - CompressionType compressionType, + Compression compression, long baseOffset, long producerId, short producerEpoch, int baseSequence, boolean isTransactional) { - return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, TimestampType.CREATE_TIME, baseOffset, + return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compression, TimestampType.CREATE_TIME, baseOffset, RecordBatch.NO_TIMESTAMP, producerId, producerEpoch, baseSequence, isTransactional, RecordBatch.NO_PARTITION_LEADER_EPOCH); } public static MemoryRecordsBuilder builder(ByteBuffer buffer, byte magic, - CompressionType compressionType, + Compression compression, TimestampType timestampType, long baseOffset, long logAppendTime, long producerId, short producerEpoch, int baseSequence) { - return builder(buffer, magic, compressionType, timestampType, baseOffset, logAppendTime, + return builder(buffer, magic, compression, timestampType, baseOffset, logAppendTime, producerId, producerEpoch, baseSequence, false, RecordBatch.NO_PARTITION_LEADER_EPOCH); } public static MemoryRecordsBuilder builder(ByteBuffer buffer, byte magic, - CompressionType compressionType, + Compression compression, TimestampType timestampType, long baseOffset, long logAppendTime, @@ -569,13 +573,13 @@ public static MemoryRecordsBuilder builder(ByteBuffer buffer, int baseSequence, boolean isTransactional, int partitionLeaderEpoch) { - return builder(buffer, magic, compressionType, timestampType, baseOffset, + return builder(buffer, magic, compression, timestampType, baseOffset, logAppendTime, producerId, producerEpoch, baseSequence, isTransactional, false, partitionLeaderEpoch); } public static MemoryRecordsBuilder builder(ByteBuffer buffer, byte magic, - CompressionType compressionType, + Compression compression, TimestampType timestampType, long baseOffset, long logAppendTime, @@ -585,98 +589,98 @@ public static MemoryRecordsBuilder builder(ByteBuffer buffer, boolean isTransactional, boolean isControlBatch, int partitionLeaderEpoch) { - return new MemoryRecordsBuilder(buffer, magic, compressionType, timestampType, baseOffset, + return new MemoryRecordsBuilder(buffer, magic, compression, timestampType, baseOffset, logAppendTime, producerId, producerEpoch, baseSequence, isTransactional, isControlBatch, partitionLeaderEpoch, buffer.remaining()); } - public static MemoryRecords withRecords(CompressionType compressionType, SimpleRecord... records) { - return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, compressionType, records); + public static MemoryRecords withRecords(Compression compression, SimpleRecord... records) { + return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, compression, records); } - public static MemoryRecords withRecords(CompressionType compressionType, int partitionLeaderEpoch, SimpleRecord... records) { - return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, 0L, compressionType, TimestampType.CREATE_TIME, + public static MemoryRecords withRecords(Compression compression, int partitionLeaderEpoch, SimpleRecord... records) { + return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, 0L, compression, TimestampType.CREATE_TIME, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, partitionLeaderEpoch, false, records); } - public static MemoryRecords withRecords(byte magic, CompressionType compressionType, SimpleRecord... records) { - return withRecords(magic, 0L, compressionType, TimestampType.CREATE_TIME, records); + public static MemoryRecords withRecords(byte magic, Compression compression, SimpleRecord... records) { + return withRecords(magic, 0L, compression, TimestampType.CREATE_TIME, records); } - public static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, SimpleRecord... records) { - return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, initialOffset, compressionType, TimestampType.CREATE_TIME, + public static MemoryRecords withRecords(long initialOffset, Compression compression, SimpleRecord... records) { + return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, initialOffset, compression, TimestampType.CREATE_TIME, records); } - public static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType, SimpleRecord... records) { - return withRecords(magic, initialOffset, compressionType, TimestampType.CREATE_TIME, records); + public static MemoryRecords withRecords(byte magic, long initialOffset, Compression compression, SimpleRecord... records) { + return withRecords(magic, initialOffset, compression, TimestampType.CREATE_TIME, records); } - public static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, Integer partitionLeaderEpoch, SimpleRecord... records) { - return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, initialOffset, compressionType, TimestampType.CREATE_TIME, RecordBatch.NO_PRODUCER_ID, + public static MemoryRecords withRecords(long initialOffset, Compression compression, Integer partitionLeaderEpoch, SimpleRecord... records) { + return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, initialOffset, compression, TimestampType.CREATE_TIME, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, partitionLeaderEpoch, false, records); } - public static MemoryRecords withIdempotentRecords(CompressionType compressionType, long producerId, + public static MemoryRecords withIdempotentRecords(Compression compression, long producerId, short producerEpoch, int baseSequence, SimpleRecord... records) { - return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, 0L, compressionType, TimestampType.CREATE_TIME, producerId, producerEpoch, + return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, 0L, compression, TimestampType.CREATE_TIME, producerId, producerEpoch, baseSequence, RecordBatch.NO_PARTITION_LEADER_EPOCH, false, records); } - public static MemoryRecords withIdempotentRecords(byte magic, long initialOffset, CompressionType compressionType, + public static MemoryRecords withIdempotentRecords(byte magic, long initialOffset, Compression compression, long producerId, short producerEpoch, int baseSequence, int partitionLeaderEpoch, SimpleRecord... records) { - return withRecords(magic, initialOffset, compressionType, TimestampType.CREATE_TIME, producerId, producerEpoch, + return withRecords(magic, initialOffset, compression, TimestampType.CREATE_TIME, producerId, producerEpoch, baseSequence, partitionLeaderEpoch, false, records); } - public static MemoryRecords withIdempotentRecords(long initialOffset, CompressionType compressionType, long producerId, + public static MemoryRecords withIdempotentRecords(long initialOffset, Compression compression, long producerId, short producerEpoch, int baseSequence, int partitionLeaderEpoch, SimpleRecord... records) { - return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, initialOffset, compressionType, TimestampType.CREATE_TIME, + return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, initialOffset, compression, TimestampType.CREATE_TIME, producerId, producerEpoch, baseSequence, partitionLeaderEpoch, false, records); } - public static MemoryRecords withTransactionalRecords(CompressionType compressionType, long producerId, + public static MemoryRecords withTransactionalRecords(Compression compression, long producerId, short producerEpoch, int baseSequence, SimpleRecord... records) { - return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, 0L, compressionType, TimestampType.CREATE_TIME, + return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, 0L, compression, TimestampType.CREATE_TIME, producerId, producerEpoch, baseSequence, RecordBatch.NO_PARTITION_LEADER_EPOCH, true, records); } - public static MemoryRecords withTransactionalRecords(byte magic, long initialOffset, CompressionType compressionType, + public static MemoryRecords withTransactionalRecords(byte magic, long initialOffset, Compression compression, long producerId, short producerEpoch, int baseSequence, int partitionLeaderEpoch, SimpleRecord... records) { - return withRecords(magic, initialOffset, compressionType, TimestampType.CREATE_TIME, producerId, producerEpoch, + return withRecords(magic, initialOffset, compression, TimestampType.CREATE_TIME, producerId, producerEpoch, baseSequence, partitionLeaderEpoch, true, records); } - public static MemoryRecords withTransactionalRecords(long initialOffset, CompressionType compressionType, long producerId, + public static MemoryRecords withTransactionalRecords(long initialOffset, Compression compression, long producerId, short producerEpoch, int baseSequence, int partitionLeaderEpoch, SimpleRecord... records) { - return withTransactionalRecords(RecordBatch.CURRENT_MAGIC_VALUE, initialOffset, compressionType, + return withTransactionalRecords(RecordBatch.CURRENT_MAGIC_VALUE, initialOffset, compression, producerId, producerEpoch, baseSequence, partitionLeaderEpoch, records); } - public static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType, + public static MemoryRecords withRecords(byte magic, long initialOffset, Compression compression, TimestampType timestampType, SimpleRecord... records) { - return withRecords(magic, initialOffset, compressionType, timestampType, RecordBatch.NO_PRODUCER_ID, + return withRecords(magic, initialOffset, compression, timestampType, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, RecordBatch.NO_PARTITION_LEADER_EPOCH, false, records); } - public static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType, + public static MemoryRecords withRecords(byte magic, long initialOffset, Compression compression, TimestampType timestampType, long producerId, short producerEpoch, int baseSequence, int partitionLeaderEpoch, boolean isTransactional, SimpleRecord... records) { if (records.length == 0) return MemoryRecords.EMPTY; - int sizeEstimate = AbstractRecords.estimateSizeInBytes(magic, compressionType, Arrays.asList(records)); + int sizeEstimate = AbstractRecords.estimateSizeInBytes(magic, compression.type(), Arrays.asList(records)); ByteBufferOutputStream bufferStream = new ByteBufferOutputStream(sizeEstimate); long logAppendTime = RecordBatch.NO_TIMESTAMP; if (timestampType == TimestampType.LOG_APPEND_TIME) logAppendTime = System.currentTimeMillis(); - try (final MemoryRecordsBuilder builder = new MemoryRecordsBuilder(bufferStream, magic, compressionType, timestampType, + try (final MemoryRecordsBuilder builder = new MemoryRecordsBuilder(bufferStream, magic, compression, timestampType, initialOffset, logAppendTime, producerId, producerEpoch, baseSequence, isTransactional, false, partitionLeaderEpoch, sizeEstimate)) { for (SimpleRecord record : records) @@ -712,7 +716,7 @@ public static void writeEndTransactionalMarker(ByteBuffer buffer, long initialOf int partitionLeaderEpoch, long producerId, short producerEpoch, EndTransactionMarker marker) { boolean isTransactional = true; - try (MemoryRecordsBuilder builder = new MemoryRecordsBuilder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, + try (MemoryRecordsBuilder builder = new MemoryRecordsBuilder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, Compression.NONE, TimestampType.CREATE_TIME, initialOffset, timestamp, producerId, producerEpoch, RecordBatch.NO_SEQUENCE, isTransactional, true, partitionLeaderEpoch, buffer.capacity()) @@ -728,83 +732,114 @@ public static MemoryRecords withLeaderChangeMessage( ByteBuffer buffer, LeaderChangeMessage leaderChangeMessage ) { - writeLeaderChangeMessage(buffer, initialOffset, timestamp, leaderEpoch, leaderChangeMessage); - buffer.flip(); - return MemoryRecords.readableRecords(buffer); + try (MemoryRecordsBuilder builder = createKraftControlRecordBuilder( + initialOffset, + timestamp, + leaderEpoch, + buffer + ) + ) { + builder.appendLeaderChangeMessage(timestamp, leaderChangeMessage); + return builder.build(); + } } - private static void writeLeaderChangeMessage( - ByteBuffer buffer, + public static MemoryRecords withSnapshotHeaderRecord( long initialOffset, long timestamp, int leaderEpoch, - LeaderChangeMessage leaderChangeMessage + ByteBuffer buffer, + SnapshotHeaderRecord snapshotHeaderRecord ) { - try (MemoryRecordsBuilder builder = new MemoryRecordsBuilder( - buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, - TimestampType.CREATE_TIME, initialOffset, timestamp, - RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, - false, true, leaderEpoch, buffer.capacity()) + try (MemoryRecordsBuilder builder = createKraftControlRecordBuilder( + initialOffset, + timestamp, + leaderEpoch, + buffer + ) ) { - builder.appendLeaderChangeMessage(timestamp, leaderChangeMessage); + builder.appendSnapshotHeaderMessage(timestamp, snapshotHeaderRecord); + return builder.build(); } } - public static MemoryRecords withSnapshotHeaderRecord( + public static MemoryRecords withSnapshotFooterRecord( long initialOffset, long timestamp, int leaderEpoch, ByteBuffer buffer, - SnapshotHeaderRecord snapshotHeaderRecord + SnapshotFooterRecord snapshotFooterRecord ) { - writeSnapshotHeaderRecord(buffer, initialOffset, timestamp, leaderEpoch, snapshotHeaderRecord); - buffer.flip(); - return MemoryRecords.readableRecords(buffer); + try (MemoryRecordsBuilder builder = createKraftControlRecordBuilder( + initialOffset, + timestamp, + leaderEpoch, + buffer + ) + ) { + builder.appendSnapshotFooterMessage(timestamp, snapshotFooterRecord); + return builder.build(); + } } - private static void writeSnapshotHeaderRecord( - ByteBuffer buffer, + public static MemoryRecords withKRaftVersionRecord( long initialOffset, long timestamp, int leaderEpoch, - SnapshotHeaderRecord snapshotHeaderRecord + ByteBuffer buffer, + KRaftVersionRecord kraftVersionRecord ) { - try (MemoryRecordsBuilder builder = new MemoryRecordsBuilder( - buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, - TimestampType.CREATE_TIME, initialOffset, timestamp, - RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, - false, true, leaderEpoch, buffer.capacity()) + try (MemoryRecordsBuilder builder = createKraftControlRecordBuilder( + initialOffset, + timestamp, + leaderEpoch, + buffer + ) ) { - builder.appendSnapshotHeaderMessage(timestamp, snapshotHeaderRecord); + builder.appendKRaftVersionMessage(timestamp, kraftVersionRecord); + return builder.build(); } } - public static MemoryRecords withSnapshotFooterRecord( + public static MemoryRecords withVotersRecord( long initialOffset, long timestamp, int leaderEpoch, ByteBuffer buffer, - SnapshotFooterRecord snapshotFooterRecord + VotersRecord votersRecord ) { - writeSnapshotFooterRecord(buffer, initialOffset, timestamp, leaderEpoch, snapshotFooterRecord); - buffer.flip(); - return MemoryRecords.readableRecords(buffer); + try (MemoryRecordsBuilder builder = createKraftControlRecordBuilder( + initialOffset, + timestamp, + leaderEpoch, + buffer + ) + ) { + builder.appendVotersMessage(timestamp, votersRecord); + return builder.build(); + } } - private static void writeSnapshotFooterRecord( - ByteBuffer buffer, + private static MemoryRecordsBuilder createKraftControlRecordBuilder( long initialOffset, long timestamp, int leaderEpoch, - SnapshotFooterRecord snapshotFooterRecord + ByteBuffer buffer ) { - try (MemoryRecordsBuilder builder = new MemoryRecordsBuilder( - buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, - TimestampType.CREATE_TIME, initialOffset, timestamp, - RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, - false, true, leaderEpoch, buffer.capacity()) - ) { - builder.appendSnapshotFooterMessage(timestamp, snapshotFooterRecord); - } + return new MemoryRecordsBuilder( + buffer, + RecordBatch.CURRENT_MAGIC_VALUE, + Compression.NONE, + TimestampType.CREATE_TIME, + initialOffset, + timestamp, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_EPOCH, + RecordBatch.NO_SEQUENCE, + false, + true, + leaderEpoch, + buffer.capacity() + ); } } diff --git a/clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java b/clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java index 6b53ee4159..c3083457ad 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java @@ -17,10 +17,13 @@ package org.apache.kafka.common.record; import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.header.Header; +import org.apache.kafka.common.message.KRaftVersionRecord; import org.apache.kafka.common.message.LeaderChangeMessage; -import org.apache.kafka.common.message.SnapshotHeaderRecord; import org.apache.kafka.common.message.SnapshotFooterRecord; +import org.apache.kafka.common.message.SnapshotHeaderRecord; +import org.apache.kafka.common.message.VotersRecord; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.types.Struct; import org.apache.kafka.common.utils.ByteBufferOutputStream; @@ -52,7 +55,7 @@ public void write(int b) { }); private final TimestampType timestampType; - private final CompressionType compressionType; + private final Compression compression; // Used to hold a reference to the underlying ByteBuffer so that we can write the record batch header and access // the written bytes. ByteBufferOutputStream allocates a new ByteBuffer if the existing one is not large enough, // so it's not safe to hold a direct reference to the underlying ByteBuffer. @@ -90,7 +93,7 @@ public void write(int b) { public MemoryRecordsBuilder(ByteBufferOutputStream bufferStream, byte magic, - CompressionType compressionType, + Compression compression, TimestampType timestampType, long baseOffset, long logAppendTime, @@ -109,7 +112,7 @@ public MemoryRecordsBuilder(ByteBufferOutputStream bufferStream, throw new IllegalArgumentException("Transactional records are not supported for magic " + magic); if (isControlBatch) throw new IllegalArgumentException("Control records are not supported for magic " + magic); - if (compressionType == CompressionType.ZSTD) + if (compression.type() == CompressionType.ZSTD) throw new IllegalArgumentException("ZStandard compression is not supported for magic " + magic); if (deleteHorizonMs != RecordBatch.NO_TIMESTAMP) throw new IllegalArgumentException("Delete horizon timestamp is not supported for magic " + magic); @@ -117,7 +120,7 @@ public MemoryRecordsBuilder(ByteBufferOutputStream bufferStream, this.magic = magic; this.timestampType = timestampType; - this.compressionType = compressionType; + this.compression = compression; this.baseOffset = baseOffset; this.logAppendTime = logAppendTime; this.numRecords = 0; @@ -133,11 +136,11 @@ public MemoryRecordsBuilder(ByteBufferOutputStream bufferStream, this.partitionLeaderEpoch = partitionLeaderEpoch; this.writeLimit = writeLimit; this.initialPosition = bufferStream.position(); - this.batchHeaderSizeInBytes = AbstractRecords.recordBatchHeaderSizeInBytes(magic, compressionType); + this.batchHeaderSizeInBytes = AbstractRecords.recordBatchHeaderSizeInBytes(magic, compression.type()); bufferStream.position(initialPosition + batchHeaderSizeInBytes); this.bufferStream = bufferStream; - this.appendStream = new DataOutputStream(compressionType.wrapForOutput(this.bufferStream, magic)); + this.appendStream = new DataOutputStream(compression.wrapForOutput(this.bufferStream, magic)); if (hasDeleteHorizonMs()) { this.baseTimestamp = deleteHorizonMs; @@ -146,7 +149,7 @@ public MemoryRecordsBuilder(ByteBufferOutputStream bufferStream, public MemoryRecordsBuilder(ByteBufferOutputStream bufferStream, byte magic, - CompressionType compressionType, + Compression compression, TimestampType timestampType, long baseOffset, long logAppendTime, @@ -157,7 +160,7 @@ public MemoryRecordsBuilder(ByteBufferOutputStream bufferStream, boolean isControlBatch, int partitionLeaderEpoch, int writeLimit) { - this(bufferStream, magic, compressionType, timestampType, baseOffset, logAppendTime, producerId, + this(bufferStream, magic, compression, timestampType, baseOffset, logAppendTime, producerId, producerEpoch, baseSequence, isTransactional, isControlBatch, partitionLeaderEpoch, writeLimit, RecordBatch.NO_TIMESTAMP); } @@ -168,7 +171,7 @@ public MemoryRecordsBuilder(ByteBufferOutputStream bufferStream, * @param buffer The underlying buffer to use (note that this class will allocate a new buffer if necessary * to fit the records appended) * @param magic The magic value to use - * @param compressionType The compression codec to use + * @param compression The compression codec to use * @param timestampType The desired timestamp type. For magic > 0, this cannot be {@link TimestampType#NO_TIMESTAMP_TYPE}. * @param baseOffset The initial offset to use for * @param logAppendTime The log append time of this record set. Can be set to NO_TIMESTAMP if CREATE_TIME is used. @@ -184,7 +187,7 @@ public MemoryRecordsBuilder(ByteBufferOutputStream bufferStream, */ public MemoryRecordsBuilder(ByteBuffer buffer, byte magic, - CompressionType compressionType, + Compression compression, TimestampType timestampType, long baseOffset, long logAppendTime, @@ -195,7 +198,7 @@ public MemoryRecordsBuilder(ByteBuffer buffer, boolean isControlBatch, int partitionLeaderEpoch, int writeLimit) { - this(new ByteBufferOutputStream(buffer), magic, compressionType, timestampType, baseOffset, logAppendTime, + this(new ByteBufferOutputStream(buffer), magic, compression, timestampType, baseOffset, logAppendTime, producerId, producerEpoch, baseSequence, isTransactional, isControlBatch, partitionLeaderEpoch, writeLimit); } @@ -212,8 +215,8 @@ public double compressionRatio() { return actualCompressionRatio; } - public CompressionType compressionType() { - return compressionType; + public Compression compression() { + return compression; } public boolean isControlBatch() { @@ -240,25 +243,55 @@ public MemoryRecords build() { return builtRecords; } + + /** - * Get the max timestamp and its offset. The details of the offset returned are a bit subtle. - * Note: The semantic for the offset of max timestamp is the first offset with the max timestamp if there are multi-records having same timestamp. - * - * If the log append time is used, the offset will be the first offset of the record. - * - * If create time is used, the offset will always be the offset of the record with the max timestamp. - * - * If it's NO_TIMESTAMP (i.e. MAGIC_VALUE_V0), we'll return offset -1 since no timestamp info in records. - * - * @return The max timestamp and its offset + * There are three cases of finding max timestamp to return: + * 1) version 0: The max timestamp is NO_TIMESTAMP (-1) + * 2) LogAppendTime: All records have same timestamp, and so the max timestamp is equal to logAppendTime + * 3) CreateTime: The max timestamp of record + *

    + * Let's talk about OffsetOfMaxTimestamp. There are some paths that we don't try to find the OffsetOfMaxTimestamp + * to avoid expensive records iteration. Those paths include follower append and index recovery. In order to + * avoid inconsistent time index, we let all paths find shallowOffsetOfMaxTimestamp instead of OffsetOfMaxTimestamp. + *

    + * Let's define the shallowOffsetOfMaxTimestamp: It is last offset of the batch having max timestamp. If there are + * many batches having same max timestamp, we pick up the earliest batch. + *

    + * There are five cases of finding shallowOffsetOfMaxTimestamp to return: + * 1) version 0: It is always the -1 + * 2) LogAppendTime with single batch: It is the offset of last record + * 3) LogAppendTime with many single-record batches: Those single-record batches have same max timestamp, so we return + * the base offset, which is equal to the last offset of earliest batch + * 4) CreateTime with single batch: We return offset of last record to follow the spec we mentioned above. Of course, + * we do have the OffsetOfMaxTimestamp for this case, but we want to make all paths + * find the shallowOffsetOfMaxTimestamp rather than offsetOfMaxTimestamp + * 5) CreateTime with many single-record batches: Each batch is composed of single record, and hence offsetOfMaxTimestamp + * is equal to the last offset of earliest batch with max timestamp */ public RecordsInfo info() { if (timestampType == TimestampType.LOG_APPEND_TIME) { - return new RecordsInfo(logAppendTime, baseOffset); + if (compression.type() != CompressionType.NONE || magic >= RecordBatch.MAGIC_VALUE_V2) + // maxTimestamp => case 2 + // shallowOffsetOfMaxTimestamp => case 2 + return new RecordsInfo(logAppendTime, lastOffset); + else + // maxTimestamp => case 2 + // shallowOffsetOfMaxTimestamp => case 3 + return new RecordsInfo(logAppendTime, baseOffset); + } else if (maxTimestamp == RecordBatch.NO_TIMESTAMP) { + // maxTimestamp => case 1 + // shallowOffsetOfMaxTimestamp => case 1 + return new RecordsInfo(RecordBatch.NO_TIMESTAMP, -1); } else { - // For create time, we always use offsetOfMaxTimestamp for the correct time -> offset mapping - // If it's MAGIC_VALUE_V0, the value will be the default value: [-1, -1] - return new RecordsInfo(maxTimestamp, offsetOfMaxTimestamp); + if (compression.type() != CompressionType.NONE || magic >= RecordBatch.MAGIC_VALUE_V2) + // maxTimestamp => case 3 + // shallowOffsetOfMaxTimestamp => case 4 + return new RecordsInfo(maxTimestamp, lastOffset); + else + // maxTimestamp => case 3 + // shallowOffsetOfMaxTimestamp => case 5 + return new RecordsInfo(maxTimestamp, offsetOfMaxTimestamp); } } @@ -343,7 +376,7 @@ public void close() { } else { if (magic > RecordBatch.MAGIC_VALUE_V1) this.actualCompressionRatio = (float) writeDefaultBatchHeader() / this.uncompressedRecordsSizeInBytes; - else if (compressionType != CompressionType.NONE) + else if (compression.type() != CompressionType.NONE) this.actualCompressionRatio = (float) writeLegacyCompressedWrapperHeader() / this.uncompressedRecordsSizeInBytes; ByteBuffer buffer = buffer().duplicate(); @@ -388,7 +421,7 @@ private int writeDefaultBatchHeader() { else maxTimestamp = this.maxTimestamp; - DefaultRecordBatch.writeHeader(buffer, baseOffset, offsetDelta, size, magic, compressionType, timestampType, + DefaultRecordBatch.writeHeader(buffer, baseOffset, offsetDelta, size, magic, compression.type(), timestampType, baseTimestamp, maxTimestamp, producerId, producerEpoch, baseSequence, isTransactional, isControlBatch, hasDeleteHorizonMs(), partitionLeaderEpoch, numRecords); @@ -411,7 +444,7 @@ private int writeLegacyCompressedWrapperHeader() { AbstractLegacyRecordBatch.writeHeader(buffer, lastOffset, wrapperSize); long timestamp = timestampType == TimestampType.LOG_APPEND_TIME ? logAppendTime : maxTimestamp; - LegacyRecord.writeCompressedRecordHeader(buffer, magic, wrapperSize, timestamp, compressionType, timestampType); + LegacyRecord.writeCompressedRecordHeader(buffer, magic, wrapperSize, timestamp, compression.type(), timestampType); buffer.position(pos); return writtenCompressed; @@ -573,11 +606,12 @@ public void append(SimpleRecord record) { /** * Append a control record at the next sequential offset. + * * @param timestamp The record timestamp * @param type The control record type (cannot be UNKNOWN) * @param value The control record value */ - private void appendControlRecord(long timestamp, ControlRecordType type, ByteBuffer value) { + public void appendControlRecord(long timestamp, ControlRecordType type, ByteBuffer value) { Struct keyStruct = type.recordKey(); ByteBuffer key = ByteBuffer.allocate(keyStruct.sizeOf()); keyStruct.writeTo(key); @@ -621,6 +655,22 @@ public void appendSnapshotFooterMessage(long timestamp, SnapshotFooterRecord sna ); } + public void appendKRaftVersionMessage(long timestamp, KRaftVersionRecord kraftVersionRecord) { + appendControlRecord( + timestamp, + ControlRecordType.KRAFT_VERSION, + MessageUtil.toByteBuffer(kraftVersionRecord, ControlRecordUtils.KRAFT_VERSION_CURRENT_VERSION) + ); + } + + public void appendVotersMessage(long timestamp, VotersRecord votersRecord) { + appendControlRecord( + timestamp, + ControlRecordType.KRAFT_VOTERS, + MessageUtil.toByteBuffer(votersRecord, ControlRecordUtils.KRAFT_VOTERS_CURRENT_VERSION) + ); + } + /** * Add a legacy record without doing offset/magic validation (this should only be used in testing). * @param offset The offset of the record @@ -717,8 +767,6 @@ private void appendDefaultRecord(long offset, long timestamp, ByteBuffer key, By private long appendLegacyRecord(long offset, long timestamp, ByteBuffer key, ByteBuffer value, byte magic) throws IOException { ensureOpenForRecordAppend(); - if (compressionType == CompressionType.NONE && timestampType == TimestampType.LOG_APPEND_TIME) - timestamp = logAppendTime; int size = LegacyRecord.recordSize(magic, key, value); AbstractLegacyRecordBatch.writeHeader(appendStream, toInnerOffset(offset), size); @@ -732,7 +780,7 @@ private long appendLegacyRecord(long offset, long timestamp, ByteBuffer key, Byt private long toInnerOffset(long offset) { // use relative offsets for compressed messages with magic v1 - if (magic > 0 && compressionType != CompressionType.NONE) + if (magic > 0 && compression.type() != CompressionType.NONE) return offset - baseOffset; return offset; } @@ -771,7 +819,7 @@ private void ensureOpenForRecordBatchWrite() { * @return The estimated number of bytes written */ private int estimatedBytesWritten() { - if (compressionType == CompressionType.NONE) { + if (compression.type() == CompressionType.NONE) { return batchHeaderSizeInBytes + uncompressedRecordsSizeInBytes; } else { // estimate the written bytes to the underlying byte buffer based on uncompressed written bytes @@ -851,12 +899,12 @@ private long nextSequentialOffset() { public static class RecordsInfo { public final long maxTimestamp; - public final long offsetOfMaxTimestamp; + public final long shallowOffsetOfMaxTimestamp; public RecordsInfo(long maxTimestamp, - long offsetOfMaxTimestamp) { + long shallowOffsetOfMaxTimestamp) { this.maxTimestamp = maxTimestamp; - this.offsetOfMaxTimestamp = offsetOfMaxTimestamp; + this.shallowOffsetOfMaxTimestamp = shallowOffsetOfMaxTimestamp; } } diff --git a/clients/src/main/java/org/apache/kafka/common/record/RecordBatch.java b/clients/src/main/java/org/apache/kafka/common/record/RecordBatch.java index 7d231c1774..e36beff08f 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/RecordBatch.java +++ b/clients/src/main/java/org/apache/kafka/common/record/RecordBatch.java @@ -21,6 +21,7 @@ import java.nio.ByteBuffer; import java.util.Iterator; +import java.util.Optional; import java.util.OptionalLong; /** @@ -245,4 +246,23 @@ public interface RecordBatch extends Iterable { * @return Whether this is a batch containing control records */ boolean isControlBatch(); + + /** + * iterate all records to find the offset of max timestamp. + * noted: + * 1) that the earliest offset will return if there are multi records having same (max) timestamp + * 2) it always returns None if the {@link RecordBatch#magic()} is equal to {@link RecordBatch#MAGIC_VALUE_V0} + * @return offset of max timestamp + */ + default Optional offsetOfMaxTimestamp() { + if (magic() == RecordBatch.MAGIC_VALUE_V0) return Optional.empty(); + long maxTimestamp = maxTimestamp(); + try (CloseableIterator iter = streamingIterator(BufferSupplier.create())) { + while (iter.hasNext()) { + Record record = iter.next(); + if (maxTimestamp == record.timestamp()) return Optional.of(record.offset()); + } + } + return Optional.empty(); + } } diff --git a/clients/src/main/java/org/apache/kafka/common/record/Records.java b/clients/src/main/java/org/apache/kafka/common/record/Records.java index 9b2352a932..e1ea4f5364 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/Records.java +++ b/clients/src/main/java/org/apache/kafka/common/record/Records.java @@ -59,7 +59,7 @@ public interface Records extends TransferableRecords { * Get the record batches. Note that the signature allows subclasses * to return a more specific batch type. This enables optimizations such as in-place offset * assignment (see for example {@link DefaultRecordBatch}), and partial reading of - * record data (see {@link FileLogInputStream.FileChannelRecordBatch#magic()}. + * record data, see {@link FileLogInputStream.FileChannelRecordBatch#magic()}. * @return An iterator over the record batches of the log */ Iterable batches(); diff --git a/clients/src/main/java/org/apache/kafka/common/record/RecordsSend.java b/clients/src/main/java/org/apache/kafka/common/record/RecordsSend.java index a4b291b0e2..837e17e9db 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/RecordsSend.java +++ b/clients/src/main/java/org/apache/kafka/common/record/RecordsSend.java @@ -72,7 +72,7 @@ protected T records() { /** * Write records up to `remaining` bytes to `channel`. The implementation is allowed to be stateful. The contract * from the caller is that the first invocation will be with `previouslyWritten` equal to 0, and `remaining` equal to - * the to maximum bytes we want to write the to `channel`. `previouslyWritten` and `remaining` will be adjusted + * the maximum bytes we want to write the to `channel`. `previouslyWritten` and `remaining` will be adjusted * appropriately for every subsequent invocation. See {@link #writeTo} for example expected usage. * @param channel The channel to write to * @param previouslyWritten Bytes written in previous calls to {@link #writeTo(TransferableChannel, int, int)}; 0 if being called for the first time diff --git a/clients/src/main/java/org/apache/kafka/common/record/RecordsUtil.java b/clients/src/main/java/org/apache/kafka/common/record/RecordsUtil.java index 8883a035fa..c2134cb031 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/RecordsUtil.java +++ b/clients/src/main/java/org/apache/kafka/common/record/RecordsUtil.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.common.record; +import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.UnsupportedCompressionTypeException; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; @@ -110,7 +111,7 @@ private static MemoryRecordsBuilder convertRecordBatch(byte magic, ByteBuffer bu final TimestampType timestampType = batch.timestampType(); long logAppendTime = timestampType == TimestampType.LOG_APPEND_TIME ? batch.maxTimestamp() : RecordBatch.NO_TIMESTAMP; - MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, magic, batch.compressionType(), + MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, magic, Compression.of(batch.compressionType()).build(), timestampType, recordBatchAndRecords.baseOffset, logAppendTime); for (Record record : recordBatchAndRecords.records) { // Down-convert this record. Ignore headers when down-converting to V0 and V1 since they are not supported diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AbstractControlRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AbstractControlRequest.java index 6516de3f9a..f4c7f5dbcf 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AbstractControlRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AbstractControlRequest.java @@ -31,7 +31,7 @@ public enum Type { FULL(2); private final byte type; - private Type(int type) { + Type(int type) { this.type = (byte) type; } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionRequest.java index 45f85a6d36..0aae47aa24 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionRequest.java @@ -87,7 +87,7 @@ public AlterPartitionRequest build(short version) { topicData.partitions().forEach(partitionData -> { // The newIsrWithEpochs will be empty after build. Then we can skip the conversion if the build // is called again. - if (partitionData.newIsrWithEpochs().size() > 0) { + if (!partitionData.newIsrWithEpochs().isEmpty()) { List newIsr = new ArrayList<>(partitionData.newIsrWithEpochs().size()); partitionData.newIsrWithEpochs().forEach(brokerState -> { newIsr.add(brokerState.brokerId()); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterReplicaLogDirsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterReplicaLogDirsRequest.java index 68a87e6bf4..d828e1063b 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterReplicaLogDirsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterReplicaLogDirsRequest.java @@ -83,7 +83,7 @@ public Map partitionDirs() { data.dirs().forEach(alterDir -> alterDir.topics().forEach(topic -> topic.partitions().forEach(partition -> - result.put(new TopicPartition(topic.name(), partition.intValue()), alterDir.path()))) + result.put(new TopicPartition(topic.name(), partition), alterDir.path()))) ); return result; } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsResponse.java index f8f364f06f..04ee2014d1 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsResponse.java @@ -166,13 +166,6 @@ public static ApiVersionsResponse createApiVersionsResponse( ); } - public static ApiVersionCollection filterApis( - RecordVersion minRecordVersion, - ApiMessageType.ListenerType listenerType - ) { - return filterApis(minRecordVersion, listenerType, false, false); - } - public static ApiVersionCollection filterApis( RecordVersion minRecordVersion, ApiMessageType.ListenerType listenerType, diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeTopicPartitionsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeTopicPartitionsResponse.java index e92f03d6b7..25d6851299 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeTopicPartitionsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeTopicPartitionsResponse.java @@ -17,6 +17,8 @@ package org.apache.kafka.common.requests; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.TopicPartitionInfo; import org.apache.kafka.common.message.DescribeTopicPartitionsResponseData; import org.apache.kafka.common.message.DescribeTopicPartitionsResponseData.DescribeTopicPartitionsResponseTopic; import org.apache.kafka.common.protocol.ApiKeys; @@ -27,6 +29,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; public class DescribeTopicPartitionsResponse extends AbstractResponse { private final DescribeTopicPartitionsResponseData data; @@ -80,4 +83,16 @@ public static DescribeTopicPartitionsResponse parse(ByteBuffer buffer, short ver return new DescribeTopicPartitionsResponse( new DescribeTopicPartitionsResponseData(new ByteBufferAccessor(buffer), version)); } + + public static TopicPartitionInfo partitionToTopicPartitionInfo( + DescribeTopicPartitionsResponseData.DescribeTopicPartitionsResponsePartition partition, + Map nodes) { + return new TopicPartitionInfo( + partition.partitionIndex(), + nodes.get(partition.leaderId()), + partition.replicaNodes().stream().map(id -> nodes.getOrDefault(id, new Node(id, "", -1))).collect(Collectors.toList()), + partition.isrNodes().stream().map(id -> nodes.getOrDefault(id, new Node(id, "", -1))).collect(Collectors.toList()), + partition.eligibleLeaderReplicas().stream().map(id -> nodes.getOrDefault(id, new Node(id, "", -1))).collect(Collectors.toList()), + partition.lastKnownElr().stream().map(id -> nodes.getOrDefault(id, new Node(id, "", -1))).collect(Collectors.toList())); + } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeUserScramCredentialsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeUserScramCredentialsRequest.java index a75dd323d8..a4a389b4b5 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeUserScramCredentialsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeUserScramCredentialsRequest.java @@ -45,12 +45,10 @@ public String toString() { } private final DescribeUserScramCredentialsRequestData data; - private final short version; private DescribeUserScramCredentialsRequest(DescribeUserScramCredentialsRequestData data, short version) { super(ApiKeys.DESCRIBE_USER_SCRAM_CREDENTIALS, version); this.data = data; - this.version = version; } public static DescribeUserScramCredentialsRequest parse(ByteBuffer buffer, short version) { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/FetchRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/FetchRequest.java index 3a286225a6..2065a15d94 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/FetchRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/FetchRequest.java @@ -28,7 +28,6 @@ import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.record.RecordBatch; -import org.apache.kafka.common.utils.Utils; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -325,8 +324,8 @@ public String toString() { append(", maxBytes=").append(maxBytes). append(", fetchData=").append(toFetch). append(", isolationLevel=").append(isolationLevel). - append(", removed=").append(Utils.join(removed, ", ")). - append(", replaced=").append(Utils.join(replaced, ", ")). + append(", removed=").append(removed.stream().map(TopicIdPartition::toString).collect(Collectors.joining(", "))). + append(", replaced=").append(replaced.stream().map(TopicIdPartition::toString).collect(Collectors.joining(", "))). append(", metadata=").append(metadata). append(", rackId=").append(rackId). append(")"); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/FindCoordinatorRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/FindCoordinatorRequest.java index fcac7de545..5cd5410bd1 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/FindCoordinatorRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/FindCoordinatorRequest.java @@ -79,10 +79,6 @@ public FindCoordinatorRequestData data() { public static class NoBatchedFindCoordinatorsException extends UnsupportedVersionException { private static final long serialVersionUID = 1L; - public NoBatchedFindCoordinatorsException(String message, Throwable cause) { - super(message, cause); - } - public NoBatchedFindCoordinatorsException(String message) { super(message); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/IncrementalAlterConfigsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/IncrementalAlterConfigsRequest.java index 9433e31213..222097502b 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/IncrementalAlterConfigsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/IncrementalAlterConfigsRequest.java @@ -78,12 +78,10 @@ public String toString() { } private final IncrementalAlterConfigsRequestData data; - private final short version; public IncrementalAlterConfigsRequest(IncrementalAlterConfigsRequestData data, short version) { super(ApiKeys.INCREMENTAL_ALTER_CONFIGS, version); this.data = data; - this.version = version; } public static IncrementalAlterConfigsRequest parse(ByteBuffer buffer, short version) { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrRequest.java index 251810e30e..8caddb0054 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrRequest.java @@ -29,7 +29,6 @@ import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.utils.FlattenedIterator; -import org.apache.kafka.common.utils.Utils; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -120,7 +119,7 @@ public String toString() { .append(", brokerEpoch=").append(brokerEpoch) .append(", partitionStates=").append(partitionStates) .append(", topicIds=").append(topicIds) - .append(", liveLeaders=(").append(Utils.join(liveLeaders, ", ")).append(")") + .append(", liveLeaders=(").append(liveLeaders.stream().map(Node::toString).collect(Collectors.joining(", "))).append(")") .append(")"); return bld.toString(); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/MetadataResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/MetadataResponse.java index bc6c387c5e..dc60221d18 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/MetadataResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/MetadataResponse.java @@ -28,7 +28,6 @@ import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.utils.Utils; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -112,7 +111,7 @@ public Map errors() { public Map errorsByTopicId() { Map errors = new HashMap<>(); for (MetadataResponseTopic metadata : data.topics()) { - if (metadata.topicId() == Uuid.ZERO_UUID) { + if (metadata.topicId().equals(Uuid.ZERO_UUID)) { throw new IllegalStateException("Use errors() when managing topic using topic name"); } if (metadata.errorCode() != Errors.NONE.code()) @@ -416,9 +415,9 @@ public String toString() { ", partition=" + topicPartition + ", leader=" + leaderId + ", leaderEpoch=" + leaderEpoch + - ", replicas=" + Utils.join(replicaIds, ",") + - ", isr=" + Utils.join(inSyncReplicaIds, ",") + - ", offlineReplicas=" + Utils.join(offlineReplicaIds, ",") + ')'; + ", replicas=" + replicaIds.stream().map(Object::toString).collect(Collectors.joining(",")) + + ", isr=" + inSyncReplicaIds.stream().map(Object::toString).collect(Collectors.joining(",")) + + ", offlineReplicas=" + offlineReplicaIds.stream().map(Object::toString).collect(Collectors.joining(",")) + ')'; } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/StopReplicaRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/StopReplicaRequest.java index df746b56c8..940a16f0a8 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/StopReplicaRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/StopReplicaRequest.java @@ -28,7 +28,6 @@ import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.utils.MappedIterator; -import org.apache.kafka.common.utils.Utils; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -101,7 +100,7 @@ public String toString() { append(", controllerEpoch=").append(controllerEpoch). append(", brokerEpoch=").append(brokerEpoch). append(", deletePartitions=").append(deletePartitions). - append(", topicStates=").append(Utils.join(topicStates, ",")). + append(", topicStates=").append(topicStates.stream().map(StopReplicaTopicState::toString).collect(Collectors.joining(","))). append(")"); return bld.toString(); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/UpdateMetadataRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/UpdateMetadataRequest.java index 245fff7ffc..b846fb7b0f 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/UpdateMetadataRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/UpdateMetadataRequest.java @@ -30,7 +30,6 @@ import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.security.auth.SecurityProtocol; import org.apache.kafka.common.utils.FlattenedIterator; -import org.apache.kafka.common.utils.Utils; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -38,6 +37,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import static java.util.Collections.singletonList; @@ -141,7 +141,7 @@ public String toString() { append(", type=").append(updateType). append(", brokerEpoch=").append(brokerEpoch). append(", partitionStates=").append(partitionStates). - append(", liveBrokers=").append(Utils.join(liveBrokers, ", ")). + append(", liveBrokers=").append(liveBrokers.stream().map(UpdateMetadataBroker::toString).collect(Collectors.joining(", "))). append(")"); return bld.toString(); } diff --git a/clients/src/main/java/org/apache/kafka/common/resource/Resource.java b/clients/src/main/java/org/apache/kafka/common/resource/Resource.java index ebc5b8e128..3c1cb83fba 100644 --- a/clients/src/main/java/org/apache/kafka/common/resource/Resource.java +++ b/clients/src/main/java/org/apache/kafka/common/resource/Resource.java @@ -70,7 +70,7 @@ public String name() { @Override public String toString() { - return "(resourceType=" + resourceType + ", name=" + ((name == null) ? "" : name) + ")"; + return "(resourceType=" + resourceType + ", name=" + name + ")"; } /** diff --git a/clients/src/main/java/org/apache/kafka/common/security/JaasContext.java b/clients/src/main/java/org/apache/kafka/common/security/JaasContext.java index 34f2952bff..b072baefbb 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/JaasContext.java +++ b/clients/src/main/java/org/apache/kafka/common/security/JaasContext.java @@ -77,7 +77,7 @@ public static JaasContext loadServerContext(ListenerName listenerName, String me /** * Returns an instance of this class. * - * If JAAS configuration property @link SaslConfigs#SASL_JAAS_CONFIG} is specified, + * If JAAS configuration property {@link SaslConfigs#SASL_JAAS_CONFIG} is specified, * the configuration object is created by parsing the property value. Otherwise, the default Configuration * is returned. The context name is always `KafkaClient`. * diff --git a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java index a84c4f7290..7a0bee37f4 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java +++ b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java @@ -682,7 +682,7 @@ public Optional pollResponseReceivedDuringReauthentication() { public void setAuthenticationEndAndSessionReauthenticationTimes(long nowNanos) { authenticationEndNanos = nowNanos; - long sessionLifetimeMsToUse = 0; + long sessionLifetimeMsToUse; if (positiveSessionLifetimeMs != null) { // pick a random percentage between 85% and 95% for session re-authentication double pctWindowFactorToTakeNetworkLatencyAndClockDriftIntoAccount = 0.85; diff --git a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslInternalConfigs.java b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslInternalConfigs.java index c1793ebc31..d55ad04636 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslInternalConfigs.java +++ b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslInternalConfigs.java @@ -21,7 +21,7 @@ public class SaslInternalConfigs { /** * The server (broker) specifies a positive session length in milliseconds to a - * SASL client when {@link BrokerSecurityConfigs#CONNECTIONS_MAX_REAUTH_MS} is + * SASL client when {@link BrokerSecurityConfigs#CONNECTIONS_MAX_REAUTH_MS_CONFIG} is * positive as per KIP * 368: Allow SASL Connections to Periodically Re-Authenticate. The session diff --git a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java index 06e8dcd0bc..ee0ed8007d 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java +++ b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java @@ -182,7 +182,7 @@ public SaslServerAuthenticator(Map configs, throw new IllegalArgumentException("Callback handler not specified for SASL mechanism " + mechanism); if (!subjects.containsKey(mechanism)) throw new IllegalArgumentException("Subject cannot be null for SASL mechanism " + mechanism); - LOG.trace("{} for mechanism={}: {}", BrokerSecurityConfigs.CONNECTIONS_MAX_REAUTH_MS, mechanism, + LOG.trace("{} for mechanism={}: {}", BrokerSecurityConfigs.CONNECTIONS_MAX_REAUTH_MS_CONFIG, mechanism, connectionsMaxReauthMsByMechanism.get(mechanism)); } diff --git a/clients/src/main/java/org/apache/kafka/common/security/kerberos/KerberosLogin.java b/clients/src/main/java/org/apache/kafka/common/security/kerberos/KerberosLogin.java index f2b25a59ff..f7809e4cac 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/kerberos/KerberosLogin.java +++ b/clients/src/main/java/org/apache/kafka/common/security/kerberos/KerberosLogin.java @@ -165,7 +165,7 @@ public LoginContext login() throws LoginException { // minTimeBeforeRelogin. Will not sleep less than minTimeBeforeRelogin, unless doing so // would cause ticket expiration. if ((nextRefresh > expiry) || (minTimeBeforeRelogin > expiry - now)) { - // expiry is before next scheduled refresh). + // expiry is before next scheduled refresh. log.info("[Principal={}]: Refreshing now because expiry is before next scheduled refresh time.", principal); nextRefresh = now; } else { diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerToken.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerToken.java index ee443ede7a..94c3c95714 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerToken.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerToken.java @@ -78,7 +78,7 @@ public interface OAuthBearerToken { * epoch, as per RFC * 6749 Section 1.4 * - * @return the token'slifetime, expressed as the number of milliseconds since + * @return the token's lifetime, expressed as the number of milliseconds since * the epoch, as per * RFC 6749 * Section 1.4. diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandler.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandler.java index 2d296b545b..d39a06787f 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandler.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandler.java @@ -55,7 +55,7 @@ * *

    * This {@link AuthenticateCallbackHandler} is enabled in the broker configuration by setting the - * {@link org.apache.kafka.common.config.internals.BrokerSecurityConfigs#SASL_SERVER_CALLBACK_HANDLER_CLASS} + * {@link org.apache.kafka.common.config.internals.BrokerSecurityConfigs#SASL_SERVER_CALLBACK_HANDLER_CLASS_CONFIG} * like so: * * @@ -86,7 +86,7 @@ * validation callback handler: * *