diff --git a/kafka-spring-boot-3-integrationtests/src/test/java/org/axonframework/extensions/kafka/eventhandling/util/ProducerConfigUtil.java b/kafka-spring-boot-3-integrationtests/src/test/java/org/axonframework/extensions/kafka/eventhandling/util/ProducerConfigUtil.java new file mode 100644 index 00000000..3c87337b --- /dev/null +++ b/kafka-spring-boot-3-integrationtests/src/test/java/org/axonframework/extensions/kafka/eventhandling/util/ProducerConfigUtil.java @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2010-2023. Axon Framework + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.axonframework.extensions.kafka.eventhandling.util; + +import io.cloudevents.CloudEvent; +import io.cloudevents.kafka.CloudEventSerializer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.common.serialization.StringSerializer; + +import java.util.HashMap; +import java.util.Map; + +/** + * Test utility for generating a {@link ProducerConfig}. + * + * @author Nakul Mishra + * @author Steven van Beelen + */ +public abstract class ProducerConfigUtil { + + private ProducerConfigUtil() { + // Utility class + } + + /** + * Minimal configuration required for creating a {@link KafkaProducer}. + * + * + * @param bootstrapServer the Kafka Container address + * @return the configuration. + */ + public static KafkaProducer newProducer(String bootstrapServer) { + Map configs = new HashMap<>(); + configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer); + configs.put(ProducerConfig.RETRIES_CONFIG, 10); + configs.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384); + configs.put(ProducerConfig.LINGER_MS_CONFIG, 1); + configs.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432); + configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); + configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, CloudEventSerializer.class); + return new KafkaProducer<>(configs);} +} diff --git a/kafka-spring-boot-3-integrationtests/src/test/java/org/axonframework/extensions/kafka/integration/TokenReplayIntegrationTest.java b/kafka-spring-boot-3-integrationtests/src/test/java/org/axonframework/extensions/kafka/integration/TokenReplayIntegrationTest.java new file mode 100644 index 00000000..495d5c32 --- /dev/null +++ b/kafka-spring-boot-3-integrationtests/src/test/java/org/axonframework/extensions/kafka/integration/TokenReplayIntegrationTest.java @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2010-2023. Axon Framework + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.axonframework.extensions.kafka.integration; + +import io.cloudevents.CloudEvent; +import io.cloudevents.core.v1.CloudEventBuilder; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.axonframework.config.Configurer; +import org.axonframework.config.EventProcessingConfiguration; +import org.axonframework.config.ProcessingGroup; +import org.axonframework.eventhandling.EventHandler; +import org.axonframework.eventhandling.EventMessage; +import org.axonframework.eventhandling.ResetHandler; +import org.axonframework.eventhandling.TrackingEventProcessor; +import org.axonframework.extensions.kafka.eventhandling.consumer.streamable.StreamableKafkaMessageSource; +import org.junit.jupiter.api.*; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.autoconfigure.EnableAutoConfiguration; +import org.springframework.boot.test.context.runner.ApplicationContextRunner; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.EnableMBeanExport; +import org.springframework.jmx.support.RegistrationPolicy; +import org.springframework.stereotype.Component; +import org.springframework.test.context.ContextConfiguration; +import org.testcontainers.junit.jupiter.Container; +import org.testcontainers.junit.jupiter.Testcontainers; +import org.testcontainers.redpanda.RedpandaContainer; + +import java.net.URI; +import java.time.Duration; +import java.time.Instant; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.IntStream; + +import static org.awaitility.Awaitility.await; +import static org.axonframework.extensions.kafka.eventhandling.util.ProducerConfigUtil.newProducer; +import static org.junit.jupiter.api.Assertions.*; + +@Testcontainers +class TokenReplayIntegrationTest { + + @Container + private static final RedpandaContainer REDPANDA_CONTAINER = new RedpandaContainer( + "docker.redpanda.com/vectorized/redpanda:v22.2.1"); + private ApplicationContextRunner testApplicationContext; + + + @BeforeEach + void setUp() { + testApplicationContext = new ApplicationContextRunner() + .withPropertyValues("axon.axonserver.enabled=false") + .withPropertyValues("axon.kafka.fetcher.enabled=true") + .withPropertyValues("axon.kafka.publisher.enabled=false") + .withPropertyValues("axon.kafka.message-converter-mode=cloud_event") + .withPropertyValues("axon.kafka.consumer.event-processor-mode=tracking") + .withPropertyValues("axon.kafka.consumer.bootstrap-servers=" + REDPANDA_CONTAINER.getBootstrapServers()) + .withUserConfiguration(DefaultContext.class); + } + + @Test + void afterResetShouldOnlyProcessTenEventsIfTimeSetMidway() { + testApplicationContext + .withPropertyValues("axon.kafka.default-topic=counterfeed-1") + .run(context -> { + Counter counter = context.getBean(Counter.class); + assertNotNull(counter); + assertEquals(0, counter.getCount()); + Instant between = addRecords("counterfeed-1"); + await().atMost(Duration.ofSeconds(5L)).untilAsserted( + () -> assertEquals(20, counter.getCount()) + ); + EventProcessingConfiguration processingConfiguration = context.getBean(EventProcessingConfiguration.class); + assertNotNull(processingConfiguration); + processingConfiguration + .eventProcessorByProcessingGroup( + "counterfeedprocessor", + TrackingEventProcessor.class + ) + .ifPresent(tep -> { + tep.shutDown(); + tep.resetTokens(tep.getMessageSource().createTokenAt(between)); + assertEquals(0, counter.getCount()); + tep.start(); + }); + await().atMost(Duration.ofSeconds(5L)).untilAsserted( + () -> assertEquals(10, counter.getCount()) + ); + }); + } + + @Test + void afterResetShouldOnlyProcessNewMessages() { + testApplicationContext + .withPropertyValues("axon.kafka.default-topic=counterfeed-2") + .run(context -> { + Counter counter = context.getBean(Counter.class); + assertNotNull(counter); + assertEquals(0, counter.getCount()); + addRecords("counterfeed-2"); + await().atMost(Duration.ofSeconds(5L)).untilAsserted( + () -> assertEquals(20, counter.getCount()) + ); + EventProcessingConfiguration processingConfiguration = context.getBean(EventProcessingConfiguration.class); + assertNotNull(processingConfiguration); + processingConfiguration + .eventProcessorByProcessingGroup( + "counterfeedprocessor", + TrackingEventProcessor.class + ) + .ifPresent(tep -> { + tep.shutDown(); + tep.resetTokens(tep.getMessageSource().createHeadToken()); + assertEquals(0, counter.getCount()); + tep.start(); + }); + addRecords("counterfeed-2"); + await().atMost(Duration.ofSeconds(5L)).untilAsserted( + () -> assertEquals(20, counter.getCount()) + ); + }); + } + + private Instant addRecords(String topic) { + Producer producer = newProducer(REDPANDA_CONTAINER.getBootstrapServers()); + sendTenMessages(producer, topic); + Instant now = Instant.now(); + sendTenMessages(producer, topic); + producer.close(); + return now; + } + + private void sendMessage(Producer producer, String topic) { + CloudEvent event = new CloudEventBuilder() + .withId(UUID.randomUUID().toString()) + .withSource(URI.create("source")) + .withData("Payload".getBytes()) + .withType("java.util.String") + .build(); + ProducerRecord record = new ProducerRecord<>(topic, 0, null, null, event); + producer.send(record); + } + + private void sendTenMessages(Producer producer, String topic) { + IntStream.range(0, 10).forEach(i -> sendMessage(producer, topic)); + producer.flush(); + } + + @ContextConfiguration + @EnableAutoConfiguration + @EnableMBeanExport(registration = RegistrationPolicy.IGNORE_EXISTING) + public static class DefaultContext { + + @Bean + Counter counter() { + return new Counter(); + } + + @Bean + KafkaEventHandler kafkaEventHandler(Counter counter) { + return new KafkaEventHandler(counter); + } + + @Autowired + public void registerProcessor( + Configurer configurer, + StreamableKafkaMessageSource streamableKafkaMessageSource + ) { + configurer.eventProcessing() + .registerTrackingEventProcessor("counterfeedprocessor", c -> streamableKafkaMessageSource); + } + } + + private static class Counter { + + private final AtomicInteger counter = new AtomicInteger(); + + int getCount() { + return counter.get(); + } + + void count() { + counter.incrementAndGet(); + } + + void reset() { + counter.set(0); + } + } + + @SuppressWarnings("unused") + @Component + @ProcessingGroup("counterfeedprocessor") + private static class KafkaEventHandler { + + private final Counter counter; + + private KafkaEventHandler(Counter counter) { + this.counter = counter; + } + + @EventHandler + void on(EventMessage eventMessage) { + counter.count(); + } + + @ResetHandler + void onReset() { + counter.reset(); + } + } +} diff --git a/kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/ConsumerSeekUtil.java b/kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/ConsumerSeekUtil.java index cf7690e9..5a1e442a 100644 --- a/kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/ConsumerSeekUtil.java +++ b/kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/ConsumerSeekUtil.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2022. Axon Framework + * Copyright (c) 2010-2023. Axon Framework * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -54,18 +54,11 @@ private ConsumerSeekUtil() { */ public static void seekToCurrentPositions(Consumer consumer, Supplier tokenSupplier, List topics) { - List all = consumer.listTopics().entrySet() - .stream() - .filter(e -> topics.contains(e.getKey())) - .flatMap(e -> e.getValue().stream()) - .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), - partitionInfo.partition())) - .collect(Collectors.toList()); + List all = topicPartitions(consumer, topics); consumer.assign(all); KafkaTrackingToken currentToken = tokenSupplier.get(); + Map tokenPartitionPositions = currentToken.getPositions(); all.forEach(assignedPartition -> { - Map tokenPartitionPositions = currentToken.getPositions(); - long offset = 0L; if (tokenPartitionPositions.containsKey(assignedPartition)) { offset = tokenPartitionPositions.get(assignedPartition) + 1; @@ -75,4 +68,21 @@ public static void seekToCurrentPositions(Consumer consumer, Supplier topicPartitions(Consumer consumer, List topics) { + return consumer.listTopics().entrySet() + .stream() + .filter(e -> topics.contains(e.getKey())) + .flatMap(e -> e.getValue().stream()) + .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), + partitionInfo.partition())) + .collect(Collectors.toList()); + } } diff --git a/kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/streamable/ConsumerPositionsUtil.java b/kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/streamable/ConsumerPositionsUtil.java new file mode 100644 index 00000000..045edd74 --- /dev/null +++ b/kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/streamable/ConsumerPositionsUtil.java @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2010-2023. Axon Framework + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.axonframework.extensions.kafka.eventhandling.consumer.streamable; + +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.OffsetAndTimestamp; +import org.apache.kafka.common.TopicPartition; + +import java.time.Instant; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import javax.annotation.Nonnull; + +import static org.axonframework.extensions.kafka.eventhandling.consumer.ConsumerSeekUtil.topicPartitions; + +/** + * Contains static util functions related to the Kafka consumer to find the correct positions. + * + * @author Gerard Klijs + * @since 4.8.0 + */ +class ConsumerPositionsUtil { + + private ConsumerPositionsUtil() { + //prevent instantiation + } + + static Map getPositionsBasedOnTime( + @Nonnull Consumer consumer, + @Nonnull List topics, + @Nonnull Instant rawDefaultAt + ) { + List all = topicPartitions(consumer, topics); + Map positions = new HashMap<>(); + OffsetSupplier offsetSupplier = new OffsetSupplier(consumer, rawDefaultAt, all); + all.forEach(assignedPartition -> { + Long offset = offsetSupplier.getOffset(assignedPartition); + //if it's 0, we otherwise miss the first event + if (offset > 1) { + positions.put(assignedPartition, offset - 1); + } + }); + return positions; + } + + static Map getHeadPositions( + @Nonnull Consumer consumer, + @Nonnull List topics + ) { + List all = topicPartitions(consumer, topics); + Map positions = new HashMap<>(); + Map endOffsets = consumer.endOffsets(all); + endOffsets.forEach((assignedPartition, offset) -> { + //if it's 0, we otherwise miss the first event + if (offset > 1) { + positions.put(assignedPartition, offset - 1); + } + }); + return positions; + } + + private static class OffsetSupplier { + + private final Map partitionOffsetMap; + private final Map endOffsets; + + private OffsetSupplier(Consumer consumer, Instant rawDefaultAt, List all) { + long defaultAt = rawDefaultAt.toEpochMilli(); + Map timestampsToSearch = new HashMap<>(); + all.forEach(tp -> timestampsToSearch.put(tp, defaultAt)); + partitionOffsetMap = consumer.offsetsForTimes(timestampsToSearch); + endOffsets = consumer.endOffsets(all); + } + + private Optional getDefaultOffset(TopicPartition assignedPartition) { + return Optional.ofNullable(partitionOffsetMap.get(assignedPartition)) + .map(OffsetAndTimestamp::offset); + } + + private long getEndOffset(TopicPartition assignedPartition) { + return endOffsets.get(assignedPartition); + } + + private Long getOffset(TopicPartition assignedPartition) { + return getDefaultOffset(assignedPartition).orElseGet(() -> getEndOffset(assignedPartition)); + } + } +} diff --git a/kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/streamable/StreamableKafkaMessageSource.java b/kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/streamable/StreamableKafkaMessageSource.java index 7361eaf2..898e4945 100644 --- a/kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/streamable/StreamableKafkaMessageSource.java +++ b/kafka/src/main/java/org/axonframework/extensions/kafka/eventhandling/consumer/streamable/StreamableKafkaMessageSource.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2022. Axon Framework + * Copyright (c) 2010-2023. Axon Framework * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -38,6 +38,7 @@ import org.slf4j.LoggerFactory; import java.lang.invoke.MethodHandles; +import java.time.Instant; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -53,8 +54,9 @@ * Implementation of the {@link StreamableMessageSource} that reads messages from a Kafka topic using the provided * {@link Fetcher}. Will create new {@link Consumer} instances for every call of {@link #openStream(TrackingToken)}, for * which it will create a unique Consumer Group Id. The latter ensures that we can guarantee that each Consumer Group - * receives all messages, so that the {@link org.axonframework.eventhandling.TrackingEventProcessor} and it's {@link - * org.axonframework.eventhandling.async.SequencingPolicy} are in charge of partitioning the load instead of Kafka. + * receives all messages, so that the {@link org.axonframework.eventhandling.TrackingEventProcessor} and it's + * {@link org.axonframework.eventhandling.async.SequencingPolicy} are in charge of partitioning the load instead of + * Kafka. * * @param the key of the {@link ConsumerRecords} to consume, fetch and convert * @param the value type of {@link ConsumerRecords} to consume, fetch and convert @@ -76,8 +78,8 @@ public class StreamableKafkaMessageSource implements StreamableMessageSour /** * Instantiate a {@link StreamableKafkaMessageSource} based on the fields contained in the {@link Builder}. *

- * Will assert that the {@link ConsumerFactory} and {@link Fetcher} are not {@code null}. An {@link - * AxonConfigurationException} is thrown if any of them is not the case. + * Will assert that the {@link ConsumerFactory} and {@link Fetcher} are not {@code null}. An + * {@link AxonConfigurationException} is thrown if any of them is not the case. * * @param builder the {@link Builder} used to instantiate a {@link StreamableKafkaMessageSource} instance */ @@ -95,9 +97,9 @@ protected StreamableKafkaMessageSource(Builder builder) { *

* The {@code topics} list is defaulted to single entry of {@code "Axon.Events"}, {@code groupIdPrefix} defaults to * {@code "Axon.Streamable.Consumer-"} and it's {@code groupIdSuffixFactory} to a {@link UUID#randomUUID()} - * operation, the {@link KafkaMessageConverter} to a {@link DefaultKafkaMessageConverter} using the {@link - * XStreamSerializer} and the {@code bufferFactory} the {@link SortedKafkaMessageBuffer} constructor. The {@link - * ConsumerFactory} and {@link Fetcher} are hard requirements and as such should be provided. + * operation, the {@link KafkaMessageConverter} to a {@link DefaultKafkaMessageConverter} using the + * {@link XStreamSerializer} and the {@code bufferFactory} the {@link SortedKafkaMessageBuffer} constructor. The + * {@link ConsumerFactory} and {@link Fetcher} are hard requirements and as such should be provided. * * @param the key of the {@link ConsumerRecords} to consume, fetch and convert * @param the value type of {@link ConsumerRecords} to consume, fetch and convert @@ -110,8 +112,8 @@ public static Builder builder() { /** * {@inheritDoc} *

- * The stream is filled by polling {@link ConsumerRecords} from the specified {@code topic} with the {@link - * Fetcher}. The provided {@code trackingToken} is required to be of type {@link KafkaTrackingToken}. + * The stream is filled by polling {@link ConsumerRecords} from the specified {@code topic} with the + * {@link Fetcher}. The provided {@code trackingToken} is required to be of type {@link KafkaTrackingToken}. */ @Override public BlockingStream> openStream(TrackingToken trackingToken) { @@ -127,14 +129,29 @@ public BlockingStream> openStream(TrackingToken trackingT return new KafkaMessageStream(buffer, closeHandler); } + @Override + public TrackingToken createHeadToken() { + return KafkaTrackingToken.newInstance(ConsumerPositionsUtil.getHeadPositions( + consumerFactory.createConsumer(null), + topics)); + } + + @Override + public TrackingToken createTokenAt(Instant dateTime) { + return KafkaTrackingToken.newInstance(ConsumerPositionsUtil.getPositionsBasedOnTime( + consumerFactory.createConsumer(null), + topics, + dateTime)); + } + /** * Builder class to instantiate a {@link StreamableKafkaMessageSource}. *

* The {@code topics} list is defaulted to single entry of {@code "Axon.Events"}, {@code groupIdPrefix} defaults to * {@code "Axon.Streamable.Consumer-"} and it's {@code groupIdSuffixFactory} to a {@link UUID#randomUUID()} - * operation, the {@link KafkaMessageConverter} to a {@link DefaultKafkaMessageConverter} using the {@link - * XStreamSerializer} and the {@code bufferFactory} the {@link SortedKafkaMessageBuffer} constructor. The {@link - * ConsumerFactory} and {@link Fetcher} are hard requirements and as such should be provided. + * operation, the {@link KafkaMessageConverter} to a {@link DefaultKafkaMessageConverter} using the + * {@link XStreamSerializer} and the {@code bufferFactory} the {@link SortedKafkaMessageBuffer} constructor. The + * {@link ConsumerFactory} and {@link Fetcher} are hard requirements and as such should be provided. * * @param the key of the {@link ConsumerRecords} to consume, fetch and convert * @param the value type of {@link ConsumerRecords} to consume, fetch and convert @@ -149,8 +166,8 @@ public static class Builder { private Supplier serializer; /** - * Sets the {@link Serializer} used to serialize and deserialize messages. Defaults to a {@link - * XStreamSerializer}. + * Sets the {@link Serializer} used to serialize and deserialize messages. Defaults to a + * {@link XStreamSerializer}. * * @param serializer a {@link Serializer} used to serialize and deserialize messages * @return the current Builder instance, for fluent interfacing @@ -191,8 +208,8 @@ public Builder addTopic(String topic) { * Sets the prefix of the Consumer {@code groupId} from which a {@link Consumer} should retrieve records from. * Defaults to {@code "Axon.Streamable.Consumer-"}. * - * @param groupIdPrefix a {@link String} defining the prefix of the Consumer Group id to which a {@link - * Consumer} should retrieve records from + * @param groupIdPrefix a {@link String} defining the prefix of the Consumer Group id to which a + * {@link Consumer} should retrieve records from * @return the current Builder instance, for fluent interfacing * @deprecated value is not used anymore, as a {@code groupId} is no longer used. Instead of the group id the * topic partitions are manually assigned, using less resources. @@ -211,8 +228,8 @@ public Builder groupIdPrefix(String groupIdPrefix) { * Sets the factory that will provide the suffix of the Consumer {@code groupId} from which a {@link Consumer} * should retrieve records from * - * @param groupIdSuffixFactory a {@link Supplier} of {@link String} providing the suffix of the Consumer {@code - * groupId} from which a {@link Consumer} should retrieve records from + * @param groupIdSuffixFactory a {@link Supplier} of {@link String} providing the suffix of the Consumer + * {@code groupId} from which a {@link Consumer} should retrieve records from * @return the current Builder instance, for fluent interfacing * @deprecated value is not used anymore, as a {@code groupId} is no longer used. Instead of the group id the * topic partitions are manually assigned, using less resources @@ -227,8 +244,8 @@ public Builder groupIdSuffixFactory(Supplier groupIdSuffixFactory) } /** - * Sets the {@link ConsumerFactory} to be used by this {@link StreamableKafkaMessageSource} to create {@link - * Consumer} instances with. + * Sets the {@link ConsumerFactory} to be used by this {@link StreamableKafkaMessageSource} to create + * {@link Consumer} instances with. * * @param consumerFactory a {@link ConsumerFactory} to be used by this {@link StreamableKafkaMessageSource} to * create {@link Consumer} instances with. @@ -245,8 +262,8 @@ public Builder consumerFactory(ConsumerFactory consumerFactory) { * {@link StreamableKafkaMessageSource} to create {@link Consumer} instances with. * * @param consumerConfiguration a {@link DefaultConsumerFactory} with the given {@code consumerConfiguration}, - * to be used by this {@link StreamableKafkaMessageSource} to create {@link - * Consumer} instances with + * to be used by this {@link StreamableKafkaMessageSource} to create + * {@link Consumer} instances with * @return the current Builder instance, for fluent interfacing */ @SuppressWarnings("unused") @@ -268,15 +285,15 @@ public Builder fetcher(Fetcher fetcher) { } /** - * Sets the {@link KafkaMessageConverter} used to convert Kafka messages into {@link - * org.axonframework.eventhandling.EventMessage}s. Defaults to a {@link DefaultKafkaMessageConverter} using the - * {@link XStreamSerializer}. + * Sets the {@link KafkaMessageConverter} used to convert Kafka messages into + * {@link org.axonframework.eventhandling.EventMessage}s. Defaults to a {@link DefaultKafkaMessageConverter} + * using the {@link XStreamSerializer}. *

- * Note that configuring a MessageConverter on the builder is mandatory if the value type is not {@code - * byte[]}. + * Note that configuring a MessageConverter on the builder is mandatory if the value type is not + * {@code byte[]}. * - * @param messageConverter a {@link KafkaMessageConverter} used to convert Kafka messages into {@link - * org.axonframework.eventhandling.EventMessage}s + * @param messageConverter a {@link KafkaMessageConverter} used to convert Kafka messages into + * {@link org.axonframework.eventhandling.EventMessage}s * @return the current Builder instance, for fluent interfacing */ public Builder messageConverter(KafkaMessageConverter messageConverter) { @@ -286,9 +303,9 @@ public Builder messageConverter(KafkaMessageConverter messageConvert } /** - * Sets the {@code bufferFactory} of type {@link Supplier} with a generic type {@link Buffer} with {@link - * KafkaEventMessage}s. Used to create a buffer which will consume the converted Kafka {@link ConsumerRecords}. - * Defaults to a {@link SortedKafkaMessageBuffer}. + * Sets the {@code bufferFactory} of type {@link Supplier} with a generic type {@link Buffer} with + * {@link KafkaEventMessage}s. Used to create a buffer which will consume the converted Kafka + * {@link ConsumerRecords}. Defaults to a {@link SortedKafkaMessageBuffer}. * * @param bufferFactory a {@link Supplier} to create a buffer for the Kafka records fetcher * @return the current Builder instance, for fluent interfacing diff --git a/kafka/src/test/java/org/axonframework/extensions/kafka/eventhandling/consumer/ConsumerSeekUtilIntegrationTest.java b/kafka/src/test/java/org/axonframework/extensions/kafka/eventhandling/consumer/ConsumerSeekUtilIntegrationTest.java index a48dada3..721777fc 100644 --- a/kafka/src/test/java/org/axonframework/extensions/kafka/eventhandling/consumer/ConsumerSeekUtilIntegrationTest.java +++ b/kafka/src/test/java/org/axonframework/extensions/kafka/eventhandling/consumer/ConsumerSeekUtilIntegrationTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2022. Axon Framework + * Copyright (c) 2010-2023. Axon Framework * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -111,7 +111,7 @@ void tearDown() { } @Test - void testSeekUsingEmptyTokenConsumerStartsAtPositionZero() { + void seekUsingEmptyTokenConsumerStartsAtPositionZero() { String topic = "testSeekUsing_EmptyToken_ConsumerStartsAtPositionZero"; int recordsPerPartitions = 1; Producer producer = producerFactory.createProducer(); @@ -136,7 +136,7 @@ void testSeekUsingEmptyTokenConsumerStartsAtPositionZero() { @SuppressWarnings("unchecked") @Test - void testSeekUsingExistingTokenConsumerStartsAtSpecificPosition() { + void seekUsingExistingTokenConsumerStartsAtSpecificPosition() { String topic = "testSeekUsing_ExistingToken_ConsumerStartsAtSpecificPosition"; int recordsPerPartitions = 10; Producer producer = producerFactory.createProducer(); @@ -170,7 +170,7 @@ void testSeekUsingExistingTokenConsumerStartsAtSpecificPosition() { } @Test - void testSeekUsingExistingTokenConsumerStartsAtSpecificPositionAndCanContinueReadingNewRecords() { + void seekUsingExistingTokenConsumerStartsAtSpecificPositionAndCanContinueReadingNewRecords() { String topic = "testSeekUsing_ExistingToken_ConsumerStartsAtSpecificPosition_AndCanContinueReadingNewRecords"; int recordsPerPartitions = 10; Producer testProducer = producerFactory.createProducer(); diff --git a/kafka/src/test/java/org/axonframework/extensions/kafka/eventhandling/consumer/streamable/ConsumerPositionsUtilIntegrationTest.java b/kafka/src/test/java/org/axonframework/extensions/kafka/eventhandling/consumer/streamable/ConsumerPositionsUtilIntegrationTest.java new file mode 100644 index 00000000..1f06c459 --- /dev/null +++ b/kafka/src/test/java/org/axonframework/extensions/kafka/eventhandling/consumer/streamable/ConsumerPositionsUtilIntegrationTest.java @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2010-2023. Axon Framework + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.axonframework.extensions.kafka.eventhandling.consumer.streamable; + +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.TopicPartition; +import org.axonframework.extensions.kafka.eventhandling.consumer.ConsumerFactory; +import org.axonframework.extensions.kafka.eventhandling.producer.ProducerFactory; +import org.axonframework.extensions.kafka.eventhandling.util.KafkaAdminUtils; +import org.axonframework.extensions.kafka.eventhandling.util.KafkaContainerTest; +import org.junit.jupiter.api.*; + +import java.time.Instant; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.axonframework.extensions.kafka.eventhandling.util.ConsumerConfigUtil.consumerFactory; +import static org.axonframework.extensions.kafka.eventhandling.util.ProducerConfigUtil.producerFactory; +import static org.junit.jupiter.api.Assertions.*; + +/*** + * Integration tests spinning up a Kafka Broker to verify whether the {@link ConsumerPositionsUtil} + * gets the correct positions. + * + * @author Gerard Klijs + */ + +class ConsumerPositionsUtilIntegrationTest extends KafkaContainerTest { + + private static final String RECORD_BODY = "foo"; + + private static final String[] TOPICS = {"testPositionsUtil"}; + private static final Integer NR_PARTITIONS = 5; + + private ProducerFactory producerFactory; + private ConsumerFactory consumerFactory; + + @BeforeAll + static void before() { + KafkaAdminUtils.createTopics(getBootstrapServers(), TOPICS); + KafkaAdminUtils.createPartitions(getBootstrapServers(), NR_PARTITIONS, TOPICS); + } + + @AfterAll + public static void after() { + KafkaAdminUtils.deleteTopics(getBootstrapServers(), TOPICS); + } + + private static void publishRecordsOnPartitions(Producer producer, + String topic, + int recordsPerPartitions, + int partitionsPerTopic) { + for (int i = 0; i < recordsPerPartitions; i++) { + for (int p = 0; p < partitionsPerTopic; p++) { + producer.send(buildRecord(topic, p)); + } + } + producer.flush(); + } + + private static ProducerRecord buildRecord(String topic, int partition) { + return new ProducerRecord<>(topic, partition, null, null, RECORD_BODY); + } + + @BeforeEach + void setUp() { + producerFactory = producerFactory(getBootstrapServers()); + consumerFactory = consumerFactory(getBootstrapServers()); + } + + @AfterEach + void tearDown() { + producerFactory.shutDown(); + } + + @Test + void positionsTest() { + String topic = "testPositionsUtil"; + + Consumer testConsumer = consumerFactory.createConsumer(null); + List topics = Collections.singletonList(topic); + assertTrue(ConsumerPositionsUtil.getHeadPositions(testConsumer, topics).isEmpty()); + assertTrue(ConsumerPositionsUtil.getPositionsBasedOnTime(testConsumer, topics, Instant.now()).isEmpty()); + + int recordsPerPartitions = 5; + Producer producer = producerFactory.createProducer(); + publishRecordsOnPartitions(producer, topic, recordsPerPartitions, 5); + + Instant now = Instant.now(); + publishRecordsOnPartitions(producer, topic, recordsPerPartitions, 5); + + Map headPositions = ConsumerPositionsUtil.getHeadPositions(testConsumer, topics); + assertFalse(headPositions.isEmpty()); + assertEquals(5, headPositions.keySet().size()); + headPositions.values().forEach(p -> assertEquals(9, p)); + + Map positionsBasedOnTime = + ConsumerPositionsUtil.getPositionsBasedOnTime(testConsumer, topics, now); + assertFalse(positionsBasedOnTime.isEmpty()); + assertEquals(5, positionsBasedOnTime.keySet().size()); + positionsBasedOnTime.values().forEach(p -> assertEquals(4, p)); + } +}