diff --git a/checkstyle/suppressions.xml b/checkstyle/suppressions.xml index 41dd2e2f80..e3cc823fe2 100644 --- a/checkstyle/suppressions.xml +++ b/checkstyle/suppressions.xml @@ -18,7 +18,7 @@ client.init.timeout.ms */ public static final String KAFKACLIENT_INIT_TIMEOUT_CONFIG = "client.init.timeout.ms"; + /** + * client.request.timeout.ms + */ + public static final String KAFKACLIENT_REQUEST_TIMEOUT_CONFIG = "client.request.timeout.ms"; public static final String ZOOKEEPER_SET_ACL_CONFIG = "zookeeper.set.acl"; public static final String KAFKACLIENT_SECURITY_PROTOCOL_CONFIG = @@ -247,6 +251,9 @@ public class KafkaRestConfig extends RestConfig { protected static final String KAFKACLIENT_INIT_TIMEOUT_DOC = "The timeout for initialization of the Kafka store, including creation of the Kafka topic " + "that stores schema data."; + protected static final String KAFKACLIENT_REQUEST_TIMEOUT_DOC = + "The timeout for sending any admin-client request to Kafka cluster including waiting for" + + " the response on client side."; protected static final String KAFKACLIENT_TIMEOUT_DOC = "The timeout for an operation on the Kafka store"; protected static final String @@ -450,6 +457,14 @@ protected static ConfigDef baseKafkaRestConfigDef() { Importance.MEDIUM, KAFKACLIENT_INIT_TIMEOUT_DOC ) + .define( + KAFKACLIENT_REQUEST_TIMEOUT_CONFIG, + Type.INT, + 60000, + Range.atLeast(0), + Importance.MEDIUM, + KAFKACLIENT_REQUEST_TIMEOUT_DOC + ) .define( KAFKACLIENT_TIMEOUT_CONFIG, Type.INT, diff --git a/kafka-rest-common/src/main/java/io/confluent/kafkarest/entities/Topic.java b/kafka-rest-common/src/main/java/io/confluent/kafkarest/entities/Topic.java index 084dac9722..85b98badfb 100644 --- a/kafka-rest-common/src/main/java/io/confluent/kafkarest/entities/Topic.java +++ b/kafka-rest-common/src/main/java/io/confluent/kafkarest/entities/Topic.java @@ -15,6 +15,7 @@ package io.confluent.kafkarest.entities; +import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import org.hibernate.validator.constraints.NotEmpty; @@ -23,17 +24,14 @@ import java.util.Objects; import java.util.Properties; -import javax.validation.constraints.NotNull; - +@JsonInclude(JsonInclude.Include.NON_NULL) public class Topic { @NotEmpty private String name; - @NotNull private Properties configs; - @NotEmpty private List partitions; public Topic( diff --git a/kafka-rest/src/main/java/io/confluent/kafkarest/AdminClientWrapper.java b/kafka-rest/src/main/java/io/confluent/kafkarest/AdminClientWrapper.java index 54ae893170..34832f5c37 100644 --- a/kafka-rest/src/main/java/io/confluent/kafkarest/AdminClientWrapper.java +++ b/kafka-rest/src/main/java/io/confluent/kafkarest/AdminClientWrapper.java @@ -16,18 +16,22 @@ package io.confluent.kafkarest; import org.apache.kafka.clients.admin.AdminClient; +import org.apache.kafka.clients.admin.ConsumerGroupListing; +import org.apache.kafka.clients.admin.ConsumerGroupDescription; import org.apache.kafka.clients.admin.Config; import org.apache.kafka.clients.admin.ConfigEntry; import org.apache.kafka.clients.admin.DescribeClusterResult; import org.apache.kafka.clients.admin.TopicDescription; +import org.apache.kafka.clients.admin.ListConsumerGroupsOptions; +import org.apache.kafka.clients.admin.DescribeConsumerGroupsOptions; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartitionInfo; import org.apache.kafka.common.config.ConfigResource; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Properties; import java.util.TreeSet; import java.util.Vector; @@ -39,15 +43,18 @@ public class AdminClientWrapper { - private AdminClient adminClient; - private int initTimeOut; + private final AdminClient adminClient; + private final int initTimeOut; + private final int requestTimeOut; public AdminClientWrapper(KafkaRestConfig kafkaRestConfig, AdminClient adminClient) { this.adminClient = adminClient; this.initTimeOut = kafkaRestConfig.getInt(KafkaRestConfig.KAFKACLIENT_INIT_TIMEOUT_CONFIG); + this.requestTimeOut = kafkaRestConfig.getInt( + KafkaRestConfig.KAFKACLIENT_REQUEST_TIMEOUT_CONFIG); } - public static Properties adminProperties(KafkaRestConfig kafkaRestConfig) { + static Properties adminProperties(KafkaRestConfig kafkaRestConfig) { Properties properties = new Properties(); properties.putAll(kafkaRestConfig.getAdminProperties()); properties.put(KafkaRestConfig.BOOTSTRAP_SERVERS_CONFIG, @@ -67,36 +74,32 @@ public List getBrokerIds() throws Exception { } public Collection getTopicNames() throws Exception { - Collection allTopics = null; - allTopics = new TreeSet<>( + return new TreeSet<>( adminClient.listTopics().names().get(initTimeOut, TimeUnit.MILLISECONDS)); - return allTopics; } public boolean topicExists(String topic) throws Exception { - Collection allTopics = getTopicNames(); - return allTopics.contains(topic); + return getTopicNames().contains(topic); } public Topic getTopic(String topicName) throws Exception { - Topic topic = null; if (topicExists(topicName)) { TopicDescription topicDescription = getTopicDescription(topicName); - topic = buildTopic(topicName, topicDescription); + return buildTopic(topicName, topicDescription); + } else { + return null; } - return topic; } public List getTopicPartitions(String topicName) throws Exception { TopicDescription topicDescription = getTopicDescription(topicName); - List partitions = buildPartitonsData(topicDescription.partitions(), null); - return partitions; + return buildPartitionsData(topicDescription.partitions(), null); } public Partition getTopicPartition(String topicName, int partition) throws Exception { TopicDescription topicDescription = getTopicDescription(topicName); - List partitions = buildPartitonsData(topicDescription.partitions(), partition); + List partitions = buildPartitionsData(topicDescription.partitions(), partition); if (partitions.isEmpty()) { return null; } @@ -108,22 +111,33 @@ public boolean partitionExists(String topicName, int partition) throws Exception return (partition >= 0 && partition < topic.getPartitions().size()); } + public Collection listConsumerGroups() throws Exception { + return adminClient.listConsumerGroups(new ListConsumerGroupsOptions() + .timeoutMs(requestTimeOut)).all().get(requestTimeOut, TimeUnit.MILLISECONDS); + } + + public Map describeConsumerGroups( + Collection groupIds) throws Exception { + return adminClient.describeConsumerGroups(groupIds, + new DescribeConsumerGroupsOptions().timeoutMs(requestTimeOut)) + .all().get(requestTimeOut, TimeUnit.MILLISECONDS); + } + private Topic buildTopic(String topicName, TopicDescription topicDescription) throws Exception { - List partitions = buildPartitonsData(topicDescription.partitions(), null); + List partitions = buildPartitionsData(topicDescription.partitions(), null); ConfigResource topicResource = new ConfigResource(ConfigResource.Type.TOPIC, topicName); Config config = adminClient.describeConfigs( - Collections.unmodifiableList(Arrays.asList(topicResource)) + Collections.unmodifiableList(Collections.singletonList(topicResource)) ).values().get(topicResource).get(); Properties topicProps = new Properties(); for (ConfigEntry configEntry : config.entries()) { topicProps.put(configEntry.name(), configEntry.value()); } - Topic topic = new Topic(topicName, topicProps, partitions); - return topic; + return new Topic(topicName, topicProps, partitions); } - private List buildPartitonsData( + private List buildPartitionsData( List partitions, Integer partitionsFilter ) { @@ -153,7 +167,7 @@ private List buildPartitonsData( } private TopicDescription getTopicDescription(String topicName) throws Exception { - return adminClient.describeTopics(Collections.unmodifiableList(Arrays.asList(topicName))) + return adminClient.describeTopics(Collections.singletonList(topicName)) .values().get(topicName).get(initTimeOut, TimeUnit.MILLISECONDS); } diff --git a/kafka-rest/src/main/java/io/confluent/kafkarest/DefaultKafkaRestContext.java b/kafka-rest/src/main/java/io/confluent/kafkarest/DefaultKafkaRestContext.java index fd85d570c8..212eb4f776 100644 --- a/kafka-rest/src/main/java/io/confluent/kafkarest/DefaultKafkaRestContext.java +++ b/kafka-rest/src/main/java/io/confluent/kafkarest/DefaultKafkaRestContext.java @@ -30,6 +30,7 @@ public class DefaultKafkaRestContext implements KafkaRestContext { private ProducerPool producerPool; private KafkaConsumerManager kafkaConsumerManager; private AdminClientWrapper adminClientWrapper; + private GroupMetadataObserver groupMetadataObserver; public DefaultKafkaRestContext( @@ -37,6 +38,7 @@ public DefaultKafkaRestContext( ProducerPool producerPool, KafkaConsumerManager kafkaConsumerManager, AdminClientWrapper adminClientWrapper, + GroupMetadataObserver groupMetadataObserver, ScalaConsumersContext scalaConsumersContext ) { @@ -44,6 +46,7 @@ public DefaultKafkaRestContext( this.producerPool = producerPool; this.kafkaConsumerManager = kafkaConsumerManager; this.adminClientWrapper = adminClientWrapper; + this.groupMetadataObserver = groupMetadataObserver; this.scalaConsumersContext = scalaConsumersContext; } @@ -54,7 +57,7 @@ public KafkaRestConfig getConfig() { } @Override - public ProducerPool getProducerPool() { + public synchronized ProducerPool getProducerPool() { if (producerPool == null) { producerPool = new ProducerPool(config); } @@ -77,7 +80,7 @@ public SimpleConsumerManager getSimpleConsumerManager() { } @Override - public KafkaConsumerManager getKafkaConsumerManager() { + public synchronized KafkaConsumerManager getKafkaConsumerManager() { if (kafkaConsumerManager == null) { kafkaConsumerManager = new KafkaConsumerManager(config); } @@ -85,7 +88,7 @@ public KafkaConsumerManager getKafkaConsumerManager() { } @Override - public AdminClientWrapper getAdminClientWrapper() { + public synchronized AdminClientWrapper getAdminClientWrapper() { if (adminClientWrapper == null) { adminClientWrapper = new AdminClientWrapper(config, AdminClient.create(AdminClientWrapper.adminProperties(config))); @@ -93,6 +96,14 @@ public AdminClientWrapper getAdminClientWrapper() { return adminClientWrapper; } + @Override + public synchronized GroupMetadataObserver getGroupMetadataObserver() { + if (groupMetadataObserver == null) { + groupMetadataObserver = new GroupMetadataObserver(config, getAdminClientWrapper()); + } + return groupMetadataObserver; + } + @Override public void shutdown() { if (kafkaConsumerManager != null) { diff --git a/kafka-rest/src/main/java/io/confluent/kafkarest/GroupMetadataObserver.java b/kafka-rest/src/main/java/io/confluent/kafkarest/GroupMetadataObserver.java new file mode 100644 index 0000000000..fbac601676 --- /dev/null +++ b/kafka-rest/src/main/java/io/confluent/kafkarest/GroupMetadataObserver.java @@ -0,0 +1,312 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * httcp://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.confluent.kafkarest; + +import io.confluent.kafkarest.entities.ConsumerGroup; +import io.confluent.kafkarest.entities.ConsumerGroupCoordinator; +import io.confluent.kafkarest.entities.ConsumerGroupSubscription; +import io.confluent.kafkarest.entities.ConsumerTopicPartitionDescription; +import io.confluent.kafkarest.entities.Topic; +import org.apache.kafka.clients.admin.ConsumerGroupDescription; +import org.apache.kafka.clients.admin.ConsumerGroupListing; +import org.apache.kafka.clients.admin.MemberDescription; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Collection; +import java.util.List; +import java.util.Set; +import java.util.Properties; +import java.util.Objects; +import java.util.Optional; +import java.util.HashSet; +import java.util.Comparator; +import java.util.Map.Entry; +import java.util.stream.Collectors; + + +public class GroupMetadataObserver { + + private static KafkaConsumer createConsumer(String groupId, + KafkaRestConfig appConfig) { + final Properties properties = new Properties(); + String deserializer = StringDeserializer.class.getName(); + properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, + RestConfigUtils.bootstrapBrokers(appConfig)); + properties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); + properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); + properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000"); + properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, deserializer); + properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, deserializer); + properties.putAll(appConfig.getConsumerProperties()); + + return new KafkaConsumer<>(properties); + } + + private final Logger log = LoggerFactory.getLogger(GroupMetadataObserver.class); + + private final KafkaRestConfig config; + private final AdminClientWrapper adminClientWrapper; + + public GroupMetadataObserver(KafkaRestConfig config, AdminClientWrapper adminClientWrapper) { + this.config = Objects.requireNonNull(config); + this.adminClientWrapper = Objects.requireNonNull(adminClientWrapper); + } + + /** + *

Get consumer group list restricted by paging parameters

+ * + * @return list of consumer groups + */ + public List getPagedConsumerGroupList(Integer startPos, + Integer count) + throws Exception { + return getConsumerGroups(getPagedConsumerGroup(startPos, count)); + } + + /** + *

Get consumer group list

+ * + * @return list of consumer groups + */ + public List getConsumerGroupList() throws Exception { + return getConsumerGroups(adminClientWrapper.listConsumerGroups()); + } + + private List getConsumerGroups(Collection groupsOverview) + throws Exception { + final List result = new ArrayList<>(); + List groupIds = groupsOverview.stream().map(ConsumerGroupListing::groupId) + .collect(Collectors.toList()); + for (Entry eachGroupInfo : + adminClientWrapper.describeConsumerGroups(groupIds).entrySet()) { + final Node node = eachGroupInfo.getValue().coordinator(); + result.add(new ConsumerGroup(eachGroupInfo.getKey(), + new ConsumerGroupCoordinator(node.host(), node.port()))); + } + return result; + } + + private Collection getPagedConsumerGroup(Integer startPosition, + Integer count) + throws Exception { + Collection groupsOverview; + final List consumerGroupListings = + new ArrayList<>(adminClientWrapper.listConsumerGroups()); + consumerGroupListings.sort(Comparator.comparing(ConsumerGroupListing::groupId)); + groupsOverview = consumerGroupListings.subList(startPosition, + Math.min(consumerGroupListings.size(), startPosition + count)); + return groupsOverview; + } + + /** + *

Get consumer group description

+ * + * @param groupId - group name + * @return description of consumer group + */ + public Set getConsumerGroupTopicInformation(String groupId) + throws Exception { + final Set result = getConsumerGroupTopics(groupId); + log.debug("Get topic list {}", result); + return result; + } + + /** + *

Get consumer group description restricted by paging parameters

+ * + * @param groupId - group name + * @return description of consumer group + */ + public Set getPagedConsumerGroupTopicInformation(String groupId, + Integer startPos, + Integer count) + throws Exception { + final Set result = getConsumerGroupTopics(groupId); + log.debug("Get topic list {}", result); + return result.stream() + .skip(startPos) + .limit(Math.min(result.size(), startPos + count)) + .collect(Collectors.toSet()); + } + + private Set getConsumerGroupTopics(String groupId) throws Exception { + final Set result = new HashSet<>(); + final Collection memberDescriptions = + adminClientWrapper.describeConsumerGroups(Collections.singleton(groupId)) + .get(groupId).members(); + if (memberDescriptions.isEmpty()) { + return Collections.emptySet(); + } + for (MemberDescription eachSummary : memberDescriptions) { + for (TopicPartition topicPartition : eachSummary.assignment().topicPartitions()) { + result.add(new Topic(topicPartition.topic(), null, null)); + } + } + return result; + } + + /** + *

Get consumer group description

+ * + * @param groupId - group name + * @return description of consumer group + * (all consumed topics with all partition offset information) + */ + public ConsumerGroupSubscription getConsumerGroupInformation(String groupId) throws Exception { + return getConsumerGroupInformation(groupId, Collections.emptyList()); + } + + /** + *

Get consumer group description

+ * + * @param groupId - group name + * @param topics - topic names for filter - default empty topic names + * @param offsetOpt - offset for TopicPartitionEntity + * collection for each consumer member for paging + * @param countOpt - count of elements TopicPartitionEntity + * collection for each consumer member for paging + * @return description of consumer group + */ + public ConsumerGroupSubscription getConsumerGroupInformation( + String groupId, + Collection topics, + Integer offsetOpt, + Integer countOpt) throws Exception { + final ConsumerGroupDescription consumerGroupSummary = + adminClientWrapper.describeConsumerGroups(Collections.singleton(groupId)) + .get(groupId); + final Collection summaries = consumerGroupSummary.members(); + if (summaries.isEmpty()) { + return ConsumerGroupSubscription.empty(); + } + log.debug("Get summary list {}", summaries); + try (KafkaConsumer kafkaConsumer = createConsumer(groupId, config)) { + final List consumerTopicPartitionDescriptions = + getConsumerTopicPartitionDescriptions(topics, summaries, kafkaConsumer); + final Node coordinatorNode = consumerGroupSummary.coordinator(); + return new ConsumerGroupSubscription( + getPagedTopicPartitionList(consumerTopicPartitionDescriptions, offsetOpt, countOpt), + consumerTopicPartitionDescriptions.size(), + new ConsumerGroupCoordinator(coordinatorNode.host(), coordinatorNode.port())); + } + } + + /** + *

Get consumer group description

+ * + * @param groupId - group name + * @param topics - topic names for filter - default empty topic names + * @return description of consumer group + */ + public ConsumerGroupSubscription getConsumerGroupInformation( + String groupId, + Collection topics) throws Exception { + final ConsumerGroupDescription consumerGroupSummary = + adminClientWrapper.describeConsumerGroups(Collections.singleton(groupId)) + .get(groupId); + final Collection summaries = consumerGroupSummary.members(); + if (summaries.isEmpty()) { + return ConsumerGroupSubscription.empty(); + } + log.debug("Get summary list {}", summaries); + try (KafkaConsumer kafkaConsumer = createConsumer(groupId, config)) { + final List consumerTopicPartitionDescriptions = + getConsumerTopicPartitionDescriptions(topics, summaries, kafkaConsumer); + final Node coordinatorNode = consumerGroupSummary.coordinator(); + return new ConsumerGroupSubscription( + consumerTopicPartitionDescriptions, + consumerTopicPartitionDescriptions.size(), + new ConsumerGroupCoordinator(coordinatorNode.host(), coordinatorNode.port())); + } + } + + private List getConsumerTopicPartitionDescriptions( + Collection topics, + Collection consumerGroupMembers, + KafkaConsumer kafkaConsumer) { + final List consumerTopicPartitionDescriptions = + new ArrayList<>(); + for (MemberDescription summary : consumerGroupMembers) { + final Set assignedTopicPartitions = + summary.assignment().topicPartitions(); + final List filteredTopicPartitions = new ArrayList<>(); + if (!topics.isEmpty()) { + final List newTopicPartitions = new ArrayList<>(); + for (TopicPartition topicPartition : assignedTopicPartitions) { + if (topics.contains(topicPartition.topic())) { + newTopicPartitions.add(topicPartition); + } + } + filteredTopicPartitions.addAll(newTopicPartitions); + } else { + filteredTopicPartitions.addAll(assignedTopicPartitions); + } + filteredTopicPartitions.sort(Comparator.comparingInt(TopicPartition::partition)); + kafkaConsumer.assign(filteredTopicPartitions); + consumerTopicPartitionDescriptions.addAll( + createConsumerTopicPartitionDescriptions(kafkaConsumer, + summary, filteredTopicPartitions)); + } + consumerTopicPartitionDescriptions.sort( + Comparator.comparingInt(ConsumerTopicPartitionDescription::getPartitionId)); + return consumerTopicPartitionDescriptions; + } + + private List createConsumerTopicPartitionDescriptions( + KafkaConsumer kafkaConsumer, + MemberDescription summary, + List filteredTopicPartitions) { + final List result = new ArrayList<>(); + for (TopicPartition topicPartition : filteredTopicPartitions) { + final OffsetAndMetadata metadata = kafkaConsumer.committed(topicPartition); + // Get current offset + final Long currentOffset = Optional.ofNullable(metadata).isPresent() + ? metadata.offset() : 0; + // Goto end offset for current TopicPartition WITHOUT COMMIT + kafkaConsumer.seekToEnd(Collections.singleton(topicPartition)); + // Get end offset + final Long totalOffset = kafkaConsumer.position(topicPartition); + result.add( + new ConsumerTopicPartitionDescription(summary.consumerId(), + summary.host(), + topicPartition.topic(), + topicPartition.partition(), + currentOffset, + totalOffset - currentOffset, + totalOffset + )); + } + return result; + } + + private List getPagedTopicPartitionList( + List topicPartitionList, + Integer offsetOpt, + Integer countOpt) { + return topicPartitionList.subList(offsetOpt, + Math.min(topicPartitionList.size(), offsetOpt + countOpt)); + } +} diff --git a/kafka-rest/src/main/java/io/confluent/kafkarest/KafkaRestApplication.java b/kafka-rest/src/main/java/io/confluent/kafkarest/KafkaRestApplication.java index 7c72fe27ae..fa26763033 100644 --- a/kafka-rest/src/main/java/io/confluent/kafkarest/KafkaRestApplication.java +++ b/kafka-rest/src/main/java/io/confluent/kafkarest/KafkaRestApplication.java @@ -23,6 +23,7 @@ import io.confluent.kafkarest.resources.BrokersResource; import io.confluent.kafkarest.resources.ConsumersResource; import io.confluent.kafkarest.resources.PartitionsResource; +import io.confluent.kafkarest.resources.ConsumerGroupsResource; import io.confluent.kafkarest.resources.RootResource; import io.confluent.kafkarest.resources.TopicsResource; import io.confluent.kafkarest.v2.KafkaConsumerManager; @@ -65,7 +66,7 @@ public KafkaRestApplication(KafkaRestConfig config) { @Override public void setupResources(Configurable config, KafkaRestConfig appConfig) { setupInjectedResources(config, appConfig, null, - null, null, null + null, null, null, null ); } @@ -78,6 +79,7 @@ protected void setupInjectedResources( ProducerPool producerPool, KafkaConsumerManager kafkaConsumerManager, AdminClientWrapper adminClientWrapperInjected, + GroupMetadataObserver groupMetadataObserver, ScalaConsumersContext scalaConsumersContext ) { if (StringUtil.isBlank(appConfig.getString(KafkaRestConfig.BOOTSTRAP_SERVERS_CONFIG)) @@ -87,9 +89,10 @@ protected void setupInjectedResources( + KafkaRestConfig.ZOOKEEPER_CONNECT_CONFIG + " needs to be configured"); } + KafkaRestContextProvider.initialize(config, appConfig, producerPool, - kafkaConsumerManager, adminClientWrapperInjected, scalaConsumersContext - ); + kafkaConsumerManager, adminClientWrapperInjected, + groupMetadataObserver, scalaConsumersContext); ContextInvocationHandler contextInvocationHandler = new ContextInvocationHandler(); KafkaRestContext context = (KafkaRestContext) Proxy.newProxyInstance( @@ -102,6 +105,7 @@ protected void setupInjectedResources( config.register(new TopicsResource(context)); config.register(new PartitionsResource(context)); config.register(new ConsumersResource(context)); + config.register(new ConsumerGroupsResource(context)); config.register(new io.confluent.kafkarest.resources.v2.ConsumersResource(context)); config.register(new io.confluent.kafkarest.resources.v2.PartitionsResource(context)); config.register(KafkaRestCleanupFilter.class); diff --git a/kafka-rest/src/main/java/io/confluent/kafkarest/KafkaRestContext.java b/kafka-rest/src/main/java/io/confluent/kafkarest/KafkaRestContext.java index e46041ee2d..396c665834 100644 --- a/kafka-rest/src/main/java/io/confluent/kafkarest/KafkaRestContext.java +++ b/kafka-rest/src/main/java/io/confluent/kafkarest/KafkaRestContext.java @@ -18,22 +18,24 @@ import io.confluent.kafkarest.v2.KafkaConsumerManager; public interface KafkaRestContext { - public KafkaRestConfig getConfig(); + KafkaRestConfig getConfig(); - public ProducerPool getProducerPool(); + ProducerPool getProducerPool(); @Deprecated - public ScalaConsumersContext getScalaConsumersContext(); + ScalaConsumersContext getScalaConsumersContext(); @Deprecated - public ConsumerManager getConsumerManager(); + ConsumerManager getConsumerManager(); @Deprecated - public SimpleConsumerManager getSimpleConsumerManager(); + SimpleConsumerManager getSimpleConsumerManager(); - public KafkaConsumerManager getKafkaConsumerManager(); + KafkaConsumerManager getKafkaConsumerManager(); - public AdminClientWrapper getAdminClientWrapper(); + AdminClientWrapper getAdminClientWrapper(); + + GroupMetadataObserver getGroupMetadataObserver(); void shutdown(); } diff --git a/kafka-rest/src/main/java/io/confluent/kafkarest/entities/ConsumerGroup.java b/kafka-rest/src/main/java/io/confluent/kafkarest/entities/ConsumerGroup.java new file mode 100644 index 0000000000..d510b81e59 --- /dev/null +++ b/kafka-rest/src/main/java/io/confluent/kafkarest/entities/ConsumerGroup.java @@ -0,0 +1,75 @@ +/* + * Copyright 2017 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.confluent.kafkarest.entities; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +public class ConsumerGroup { + + private final String groupId; + private final ConsumerGroupCoordinator coordinator; + + @JsonCreator + public ConsumerGroup(@JsonProperty("groupId") String groupId, + @JsonProperty("coordinator") ConsumerGroupCoordinator coordinator) { + this.groupId = groupId; + this.coordinator = coordinator; + } + + @JsonProperty + public String getGroupId() { + return groupId; + } + + @JsonProperty + public ConsumerGroupCoordinator getCoordinator() { + return coordinator; + } + + @Override + public String toString() { + return "ConsumerGroup{" + + "groupId='" + groupId + '\'' + + ", coordinator=" + coordinator + + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ConsumerGroup that = (ConsumerGroup) o; + + if (!groupId.equals(that.groupId)) { + return false; + } + return coordinator.equals(that.coordinator); + } + + @Override + public int hashCode() { + int result = groupId.hashCode(); + result = 31 * result + coordinator.hashCode(); + return result; + } +} diff --git a/kafka-rest/src/main/java/io/confluent/kafkarest/entities/ConsumerGroupCoordinator.java b/kafka-rest/src/main/java/io/confluent/kafkarest/entities/ConsumerGroupCoordinator.java new file mode 100644 index 0000000000..f655f6c875 --- /dev/null +++ b/kafka-rest/src/main/java/io/confluent/kafkarest/entities/ConsumerGroupCoordinator.java @@ -0,0 +1,79 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.confluent.kafkarest.entities; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +public class ConsumerGroupCoordinator { + + public static ConsumerGroupCoordinator empty() { + return new ConsumerGroupCoordinator("", -1); + } + + private final String host; + private final Integer port; + + @JsonCreator + public ConsumerGroupCoordinator(@JsonProperty("host") String host, + @JsonProperty("port") Integer port) { + this.host = host; + this.port = port; + } + + @JsonProperty + public String getHost() { + return host; + } + + @JsonProperty + public Integer getPort() { + return port; + } + + @Override + public String toString() { + return "ConsumerGroupCoordinator{" + + "host='" + host + '\'' + + ", port=" + port + + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ConsumerGroupCoordinator that = (ConsumerGroupCoordinator) o; + + if (!host.equals(that.host)) { + return false; + } + return port.equals(that.port); + } + + @Override + public int hashCode() { + int result = host.hashCode(); + result = 31 * result + port.hashCode(); + return result; + } +} diff --git a/kafka-rest/src/main/java/io/confluent/kafkarest/entities/ConsumerGroupSubscription.java b/kafka-rest/src/main/java/io/confluent/kafkarest/entities/ConsumerGroupSubscription.java new file mode 100644 index 0000000000..ddd446f57c --- /dev/null +++ b/kafka-rest/src/main/java/io/confluent/kafkarest/entities/ConsumerGroupSubscription.java @@ -0,0 +1,100 @@ +/* + * Copyright 2017 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.confluent.kafkarest.entities; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.Collections; +import java.util.List; + +public class ConsumerGroupSubscription { + + public static ConsumerGroupSubscription empty() { + return new ConsumerGroupSubscription( + Collections.emptyList(), + 0, + ConsumerGroupCoordinator.empty()); + } + + private final List topicPartitionList; + private final Integer topicPartitionCount; + private final ConsumerGroupCoordinator coordinator; + + @JsonCreator + public ConsumerGroupSubscription( + @JsonProperty("topicPartitions") List + topicPartitionList, + @JsonProperty("topicPartitionCount") Integer topicPartitionCount, + @JsonProperty("coordinator") ConsumerGroupCoordinator coordinator) { + this.topicPartitionList = topicPartitionList; + this.topicPartitionCount = topicPartitionCount; + this.coordinator = coordinator; + } + + @JsonProperty + public List getTopicPartitionList() { + return topicPartitionList; + } + + @JsonProperty + public Integer getTopicPartitionCount() { + return topicPartitionCount; + } + + @JsonProperty + public ConsumerGroupCoordinator getCoordinator() { + return coordinator; + } + + @Override + public String toString() { + return "ConsumerEntity{" + + "topicPartitionList=" + topicPartitionList + + ", topicPartitionCount=" + topicPartitionCount + + ", coordinator=" + coordinator + + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ConsumerGroupSubscription that = (ConsumerGroupSubscription) o; + + if (!topicPartitionList.equals(that.topicPartitionList)) { + return false; + } + if (!topicPartitionCount.equals(that.topicPartitionCount)) { + return false; + } + return coordinator.equals(that.coordinator); + } + + @Override + public int hashCode() { + int result = topicPartitionList.hashCode(); + result = 31 * result + topicPartitionCount.hashCode(); + result = 31 * result + coordinator.hashCode(); + return result; + } +} diff --git a/kafka-rest/src/main/java/io/confluent/kafkarest/entities/ConsumerTopicPartitionDescription.java b/kafka-rest/src/main/java/io/confluent/kafkarest/entities/ConsumerTopicPartitionDescription.java new file mode 100644 index 0000000000..36d934cf02 --- /dev/null +++ b/kafka-rest/src/main/java/io/confluent/kafkarest/entities/ConsumerTopicPartitionDescription.java @@ -0,0 +1,128 @@ +/* + * Copyright 2017 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.confluent.kafkarest.entities; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +public class ConsumerTopicPartitionDescription { + + private final String consumerId; + private final String consumerIp; + private final String topicName; + private final Integer partitionId; + private final Long currentOffset; + private final Long lag; + private final Long endOffset; + + @JsonCreator + public ConsumerTopicPartitionDescription(@JsonProperty("consumerId") String consumerId, + @JsonProperty("consumerIp") String consumerIp, + @JsonProperty("topicName") String topicName, + @JsonProperty("partitionId") Integer partitionId, + @JsonProperty("currentOffset") Long currentOffset, + @JsonProperty("lag") Long lag, + @JsonProperty("endOffset") Long endOffset) { + this.consumerId = consumerId; + this.consumerIp = consumerIp; + this.topicName = topicName; + this.partitionId = partitionId; + this.currentOffset = currentOffset; + this.lag = lag; + this.endOffset = endOffset; + } + + @JsonProperty + public String getConsumerId() { + return consumerId; + } + + @JsonProperty + public String getConsumerIp() { + return consumerIp; + } + + @JsonProperty + public String getTopicName() { + return topicName; + } + + @JsonProperty + public Integer getPartitionId() { + return partitionId; + } + + @JsonProperty + public Long getCurrentOffset() { + return currentOffset; + } + + @JsonProperty + public Long getLag() { + return lag; + } + + @JsonProperty + public Long getEndOffset() { + return endOffset; + } + + @Override + public String toString() { + return "TopicPartitionEntity{" + + "consumerId='" + consumerId + '\'' + + ", consumerIp='" + consumerIp + '\'' + + ", topicName='" + topicName + '\'' + + ", partitionId=" + partitionId + + ", currentOffset=" + currentOffset + + ", lag=" + lag + + ", endOffset=" + endOffset + + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ConsumerTopicPartitionDescription that = (ConsumerTopicPartitionDescription) o; + + return consumerId.equals(that.consumerId) + && consumerIp.equals(that.consumerIp) + && topicName.equals(that.topicName) + && partitionId.equals(that.partitionId) + && currentOffset.equals(that.currentOffset) + && lag.equals(that.lag) + && endOffset.equals(that.endOffset); + } + + @Override + public int hashCode() { + int result = consumerId.hashCode(); + result = 31 * result + consumerIp.hashCode(); + result = 31 * result + topicName.hashCode(); + result = 31 * result + partitionId.hashCode(); + result = 31 * result + currentOffset.hashCode(); + result = 31 * result + lag.hashCode(); + result = 31 * result + endOffset.hashCode(); + return result; + } +} diff --git a/kafka-rest/src/main/java/io/confluent/kafkarest/extension/KafkaRestContextProvider.java b/kafka-rest/src/main/java/io/confluent/kafkarest/extension/KafkaRestContextProvider.java index 76793f5121..3854dfc62c 100644 --- a/kafka-rest/src/main/java/io/confluent/kafkarest/extension/KafkaRestContextProvider.java +++ b/kafka-rest/src/main/java/io/confluent/kafkarest/extension/KafkaRestContextProvider.java @@ -17,13 +17,15 @@ import io.confluent.kafkarest.AdminClientWrapper; import io.confluent.kafkarest.DefaultKafkaRestContext; +import io.confluent.kafkarest.GroupMetadataObserver; import io.confluent.kafkarest.KafkaRestConfig; import io.confluent.kafkarest.KafkaRestContext; import io.confluent.kafkarest.ProducerPool; import io.confluent.kafkarest.ScalaConsumersContext; -import io.confluent.kafkarest.v2.KafkaConsumerManager; import java.util.concurrent.atomic.AtomicBoolean; +import io.confluent.kafkarest.v2.KafkaConsumerManager; + import javax.ws.rs.core.Configurable; public class KafkaRestContextProvider { @@ -41,6 +43,7 @@ public static void initialize( ProducerPool producerPool, KafkaConsumerManager kafkaConsumerManager, AdminClientWrapper adminClientWrapper, + GroupMetadataObserver groupMetadataObserver, ScalaConsumersContext scalaConsumersContext ) { if (initialized.compareAndSet(false, true)) { @@ -50,8 +53,9 @@ public static void initialize( ScalaConsumersContext.registerExceptionMappers(config, appConfig); } defaultContext = - new DefaultKafkaRestContext(appConfig, producerPool, kafkaConsumerManager, - adminClientWrapper, scalaConsumersContext); + new DefaultKafkaRestContext(appConfig, producerPool, + kafkaConsumerManager, adminClientWrapper, + groupMetadataObserver, scalaConsumersContext); defaultAppConfig = appConfig; } } diff --git a/kafka-rest/src/main/java/io/confluent/kafkarest/resources/ConsumerGroupsResource.java b/kafka-rest/src/main/java/io/confluent/kafkarest/resources/ConsumerGroupsResource.java new file mode 100644 index 0000000000..c2ba69280c --- /dev/null +++ b/kafka-rest/src/main/java/io/confluent/kafkarest/resources/ConsumerGroupsResource.java @@ -0,0 +1,217 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.confluent.kafkarest.resources; + +import io.confluent.kafkarest.KafkaRestContext; +import io.confluent.kafkarest.Versions; +import io.confluent.kafkarest.entities.ConsumerGroupSubscription; +import io.confluent.kafkarest.entities.ConsumerGroup; +import io.confluent.kafkarest.entities.Topic; +import io.confluent.rest.annotations.PerformanceMetric; + +import javax.ws.rs.Consumes; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.Set; + +/** + * Provides metadata about consumers groups + */ +@Path("/groups") +@Produces({Versions.KAFKA_V1_JSON_WEIGHTED, Versions.KAFKA_DEFAULT_JSON_WEIGHTED, + Versions.JSON_WEIGHTED, Versions.KAFKA_V2_JSON_WEIGHTED}) +@Consumes({Versions.KAFKA_V1_JSON, Versions.KAFKA_DEFAULT_JSON, Versions.JSON, + Versions.GENERIC_REQUEST, Versions.KAFKA_V2_JSON}) +public class ConsumerGroupsResource { + + private final KafkaRestContext context; + + /** + *

Create consumers group resource

+ * + * @param context - context of rest application + */ + public ConsumerGroupsResource(KafkaRestContext context) { + this.context = context; + } + + /** + *

Get consumer group list

+ *

Example: http://127.0.0.1:2081/groups/

+ * + * @param pageSize - Optional parameter. Used for paging. + * Restrict count of returned entities with group information. + * @param pageOffset - Optional parameter. Used for paging. + * Offset which starts return records from. + * @return List of group names with group coordinator host of each group. + * [{"groupId":"testGroup", "coordinator": {"host": "127.0.0.1", "port": "123"}}] + */ + @GET + @PerformanceMetric("groups.list") + public List list(@QueryParam("page_offset") Integer pageOffset, + @QueryParam("page_size") Integer pageSize) + throws Exception { + final boolean needPartOfData = Optional.ofNullable(pageOffset).isPresent() + && Optional.ofNullable(pageSize).isPresent() + && pageSize > 0; + if (needPartOfData) { + return context.getGroupMetadataObserver() + .getPagedConsumerGroupList(pageOffset, pageSize); + } + return context.getGroupMetadataObserver().getConsumerGroupList(); + } + + /** + *

Get partitions list for group groupId

+ *

Example: http://127.0.0.1:2081/groups/testGroup/partitions

+ * + * @param groupId - Group name. + * @return Consumer subscription information. Include group offsets, lags and + * group coordinator information. + * { "topicPartitionList":[ + * { "consumerId":"consumer-1-88792db6-99a2-4064-aad2-38be12b32e88", + * "consumerIp":"/{some_ip}", + * "topicName":"1", + * "partitionId":0, + * "currentOffset":15338734, + * "lag":113812, + * "endOffset":15452546}, + * { "consumerId":"consumer-1-88792db6-99a2-4064-aad2-38be12b32e88", + * "consumerIp":"/{some_ip}", + * "topicName":"1", + * "partitionId":1, + * "currentOffset":15753823, + * "lag":136160, + * "endOffset":15889983}, + * { "consumerId":"consumer-1-88792db6-99a2-4064-aad2-38be12b32e88", + * "consumerIp":"/{some_ip}", + * "topicName":"1", + * "partitionId":2, + * "currentOffset":15649419, + * "lag":133052, + * "endOffset":15782471}], + * "topicPartitionCount":3, + * "coordinator":{ "host":"{coordinator_host_name}","port":9496 } + * } + */ + @GET + @Path("/{groupId}/partitions") + @PerformanceMetric("groups.get.partitions") + public ConsumerGroupSubscription getPartitionsInformation(@PathParam("groupId") String groupId) + throws Exception { + return context.getGroupMetadataObserver().getConsumerGroupInformation(groupId); + } + + /** + *

Get topics list for group groupId

+ *

Example: http://127.0.0.1:2081/groups/testGroup/topics

+ * + * @param groupId - Group name. + * @param pageSize - Optional parameter. Used for paging. + * Restrict count of returned entities with group information. + * @param pageOffset - Optional parameter. Used for paging. + * Offset which starts return records from. + * @return Topic names who are read by specified consumer group. + * [{"name":"1"}] + */ + @GET + @Path("/{groupId}/topics") + @PerformanceMetric("groups.get.topics") + public Set getTopics(@PathParam("groupId") String groupId, + @QueryParam("page_offset") Integer pageOffset, + @QueryParam("page_size") Integer pageSize) throws Exception { + final boolean needPartOfData = Optional.ofNullable(pageOffset).isPresent() + && Optional.ofNullable(pageSize).isPresent() + && pageSize > 0; + if (needPartOfData) { + return context.getGroupMetadataObserver() + .getPagedConsumerGroupTopicInformation(groupId, + pageOffset, + pageSize); + } + return context.getGroupMetadataObserver() + .getConsumerGroupTopicInformation(groupId); + } + + /** + *

Get partitions list for group groupId

+ *

Example: http://127.0.0.1:2081/groups/testGroup/topics/testTopic?page_offset=10&page_size=10

+ * + * @param groupId - Group name. + * @param topic - Topic name. + * @param pageSize - Optional parameter. Used for paging. + * Restrict count of returned entities with group information. + * @param pageOffset - Optional parameter. Used for paging. + * Offset which starts return records from. + * @return Consumer subscription information. Include group offsets, lags and + * group coordinator information. + * { "topicPartitionList":[ + * { "consumerId":"consumer-1-88792db6-99a2-4064-aad2-38be12b32e88", + * "consumerIp":"/{some_ip}", + * "topicName":"1", + * "partitionId":0, + * "currentOffset":15338734, + * "lag":113812, + * "endOffset":15452546}, + * { "consumerId":"consumer-1-88792db6-99a2-4064-aad2-38be12b32e88", + * "consumerIp":"/{some_ip}", + * "topicName":"1", + * "partitionId":1, + * "currentOffset":15753823, + * "lag":136160, + * "endOffset":15889983}, + * { "consumerId":"consumer-1-88792db6-99a2-4064-aad2-38be12b32e88", + * "consumerIp":"/{some_ip}", + * "topicName":"1", + * "partitionId":2, + * "currentOffset":15649419, + * "lag":133052, + * "endOffset":15782471}], + * "topicPartitionCount":3, + * "coordinator":{ "host":"{coordinator_host_name}","port":9496 } + * } + */ + @GET + @Path("/{groupId}/topics/{topic}") + @PerformanceMetric("groups.get.topic.partitions") + public ConsumerGroupSubscription getPartitionsInformationByTopic( + @PathParam("groupId") String groupId, + @PathParam("topic") String topic, + @QueryParam("page_offset") Integer pageOffset, + @QueryParam("page_size") Integer pageSize) + throws Exception { + final boolean needPartOfData = Optional.ofNullable(pageOffset).isPresent() + && Optional.ofNullable(pageSize).isPresent() + && pageSize > 0; + if (needPartOfData) { + return context.getGroupMetadataObserver() + .getConsumerGroupInformation(groupId, + Collections.singleton(topic), + pageOffset, + pageSize); + } + return context.getGroupMetadataObserver() + .getConsumerGroupInformation(groupId, + Collections.singleton(topic)); + } +} diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/TestUtils.java b/kafka-rest/src/test/java/io/confluent/kafkarest/TestUtils.java index 301a65de75..fc324f5bb4 100644 --- a/kafka-rest/src/test/java/io/confluent/kafkarest/TestUtils.java +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/TestUtils.java @@ -262,36 +262,37 @@ public static void assertTopicContains(String bootstrapServers, String to Properties deserializerProps, boolean validateContents) { - KafkaConsumer consumer = createConsumer(bootstrapServers, "testgroup", "consumer0", - 20000L, keyDeserializerClassName, valueDeserializerClassName, deserializerProps); + try(KafkaConsumer consumer = createConsumer(bootstrapServers, "testgroup", "consumer0", + 20000L, keyDeserializerClassName, valueDeserializerClassName, deserializerProps)) { - Map msgCounts = TestUtils.topicCounts(consumer, topicName, records, partition); + Map msgCounts = TestUtils.topicCounts(consumer, topicName, records, partition); - Map refMsgCounts = new HashMap<>(); - for (ProduceRecord rec : records) { - Object msg = TestUtils.encodeComparable(rec.getValue()); - refMsgCounts.put(msg, (refMsgCounts.get(msg) == null ? 0 : refMsgCounts.get(msg)) + 1); - } - - // We can't always easily get the data on both ends to be easily comparable, e.g. when the - // input data is JSON but it's stored in Avro, so in some cases we use an alternative that - // just checks the # of each count matches up, e.g. if we have (a => 3, b => 4) input and (c - // => 4, d => 3), it would pass since both have (3 => 1, 4 => 1) counts, even though their - // encoded values differ. This, of course, assumes we don't get collisions. - if (validateContents) { - assertEquals(msgCounts, refMsgCounts); - } else { - Map refCountCounts = new HashMap(); - for (Map.Entry entry : refMsgCounts.entrySet()) { - Integer count = refCountCounts.get(entry.getValue()); - refCountCounts.put(entry.getValue(), (count == null ? 0 : count) + 1); + Map refMsgCounts = new HashMap<>(); + for (ProduceRecord rec : records) { + Object msg = TestUtils.encodeComparable(rec.getValue()); + refMsgCounts.put(msg, (refMsgCounts.get(msg) == null ? 0 : refMsgCounts.get(msg)) + 1); } - Map msgCountCounts = new HashMap(); - for (Map.Entry entry : msgCounts.entrySet()) { - Integer count = msgCountCounts.get(entry.getValue()); - msgCountCounts.put(entry.getValue(), (count == null ? 0 : count) + 1); + + // We can't always easily get the data on both ends to be easily comparable, e.g. when the + // input data is JSON but it's stored in Avro, so in some cases we use an alternative that + // just checks the # of each count matches up, e.g. if we have (a => 3, b => 4) input and (c + // => 4, d => 3), it would pass since both have (3 => 1, 4 => 1) counts, even though their + // encoded values differ. This, of course, assumes we don't get collisions. + if (validateContents) { + assertEquals(msgCounts, refMsgCounts); + } else { + Map refCountCounts = new HashMap(); + for (Map.Entry entry : refMsgCounts.entrySet()) { + Integer count = refCountCounts.get(entry.getValue()); + refCountCounts.put(entry.getValue(), (count == null ? 0 : count) + 1); + } + Map msgCountCounts = new HashMap(); + for (Map.Entry entry : msgCounts.entrySet()) { + Integer count = msgCountCounts.get(entry.getValue()); + msgCountCounts.put(entry.getValue(), (count == null ? 0 : count) + 1); + } + assertEquals(refCountCounts, msgCountCounts); } - assertEquals(refCountCounts, msgCountCounts); } } @@ -332,7 +333,6 @@ private static Map topicCounts(final KafkaConsumer throw new RuntimeException("InterruptedException occurred", e); } - consumer.close(); return msgCounts; } diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/integration/ClusterTestHarness.java b/kafka-rest/src/test/java/io/confluent/kafkarest/integration/ClusterTestHarness.java index 6bb16caab0..f226efd7c8 100644 --- a/kafka-rest/src/test/java/io/confluent/kafkarest/integration/ClusterTestHarness.java +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/integration/ClusterTestHarness.java @@ -142,11 +142,9 @@ public void setUp() throws Exception { zookeeper = new EmbeddedZookeeper(); zkConnect = String.format("127.0.0.1:%d", zookeeper.port()); Time time = Time.SYSTEM; - zkClient = new KafkaZkClient( - new ZooKeeperClient(zkConnect, zkSessionTimeout, zkConnectionTimeout, Integer.MAX_VALUE, time, - "testMetricGroup", "testMetricGroupType"), - JaasUtils.isZkSecurityEnabled(), - time); + zkClient = KafkaZkClient.apply( + zkConnect, JaasUtils.isZkSecurityEnabled(), zkSessionTimeout, zkConnectionTimeout, Integer.MAX_VALUE, time, + "testMetricGroup", "testMetricGroupType", Option.apply("test")); configs = new Vector<>(); servers = new Vector<>(); @@ -209,6 +207,7 @@ public void setUp() throws Exception { getProducerPool(restConfig), null, null, + null, getScalaConsumersContext(restConfig)); restServer = restApp.createServer(); restServer.start(); @@ -266,6 +265,7 @@ public void tearDown() throws Exception { for (KafkaServer server : servers) { server.shutdown(); + server.awaitShutdown(); } for (KafkaServer server : servers) { CoreUtils.delete(server.config().logDirs()); diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/integration/ConsumerAvroTest.java b/kafka-rest/src/test/java/io/confluent/kafkarest/integration/ConsumerAvroTest.java index 85b6f601af..8c5404d6bd 100644 --- a/kafka-rest/src/test/java/io/confluent/kafkarest/integration/ConsumerAvroTest.java +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/integration/ConsumerAvroTest.java @@ -35,7 +35,7 @@ import io.confluent.kafkarest.entities.PartitionReplica; import io.confluent.kafkarest.entities.Topic; import kafka.utils.TestUtils; -import scala.collection.JavaConversions; +import scala.collection.JavaConverters; public class ConsumerAvroTest extends AbstractConsumerTest { @@ -100,7 +100,7 @@ public void setUp() throws Exception { final int numPartitions = 3; final int replicationFactor = 1; TestUtils.createTopic(zkClient, topicName, numPartitions, replicationFactor, - JavaConversions.asScalaBuffer(this.servers), new Properties()); + JavaConverters.asScalaBuffer(this.servers), new Properties()); } @Test diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/integration/ConsumerBinaryTest.java b/kafka-rest/src/test/java/io/confluent/kafkarest/integration/ConsumerBinaryTest.java index afbcb03333..6b8ef20da1 100644 --- a/kafka-rest/src/test/java/io/confluent/kafkarest/integration/ConsumerBinaryTest.java +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/integration/ConsumerBinaryTest.java @@ -36,7 +36,7 @@ import io.confluent.kafkarest.entities.Topic; import io.confluent.rest.exceptions.ConstraintViolationExceptionMapper; import kafka.utils.TestUtils; -import scala.collection.JavaConversions; +import scala.collection.JavaConverters; import static io.confluent.kafkarest.TestUtils.assertErrorResponse; @@ -77,7 +77,7 @@ public void setUp() throws Exception { final int numPartitions = 3; final int replicationFactor = 1; TestUtils.createTopic(zkClient, topicName, numPartitions, replicationFactor, - JavaConversions.asScalaBuffer(this.servers), new Properties()); + JavaConverters.asScalaBuffer(this.servers), new Properties()); } @Test diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/integration/ConsumerGroupsTest.java b/kafka-rest/src/test/java/io/confluent/kafkarest/integration/ConsumerGroupsTest.java new file mode 100644 index 0000000000..d5583429ba --- /dev/null +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/integration/ConsumerGroupsTest.java @@ -0,0 +1,187 @@ +/** + * Copyright 2019 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + **/ + +package io.confluent.kafkarest.integration; + +import io.confluent.kafkarest.RestConfigUtils; +import io.confluent.kafkarest.Versions; +import io.confluent.kafkarest.entities.*; +import kafka.serializer.Decoder; +import kafka.serializer.DefaultDecoder; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.common.serialization.ByteArrayDeserializer; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import scala.collection.JavaConverters; + +import javax.ws.rs.core.GenericType; +import javax.ws.rs.core.Response; +import java.time.Duration; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.stream.Collectors; + +import static io.confluent.kafkarest.TestUtils.assertOKResponse; + +public class ConsumerGroupsTest extends AbstractProducerTest { + + private final String topicName = "topic"; + private final String topic2 = "topic2"; + private final String groupName = "testconsumergroup"; + + private final List topicRecordsWithKeys = Arrays.asList( + new BinaryTopicProduceRecord("key".getBytes(), "value".getBytes()), + new BinaryTopicProduceRecord("key".getBytes(), "value2".getBytes()), + new BinaryTopicProduceRecord("key".getBytes(), "value3".getBytes()), + new BinaryTopicProduceRecord("key".getBytes(), "value4".getBytes()) + ); + + + + private final List produceOffsets = Arrays.asList( + new PartitionOffset(0, 0L, null, null), + new PartitionOffset(0, 1L, null, null), + new PartitionOffset(0, 2L, null, null), + new PartitionOffset(0, 3L, null, null) + ); + + private static final Decoder binaryDecoder = new DefaultDecoder(null); + + private final int defaultRebalancedTimeoutInMillis = 5000; + + private KafkaConsumer createNativeConsumer(String groupName, Collection topics) { + Properties properties = new Properties(); + String deserializer = StringDeserializer.class.getName(); + properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, + RestConfigUtils.bootstrapBrokers(restConfig)); + properties.put(ConsumerConfig.GROUP_ID_CONFIG, groupName); + properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000"); + properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, deserializer); + properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, deserializer); + properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); + KafkaConsumer kafkaConsumer = new KafkaConsumer(properties); + kafkaConsumer.subscribe(topics); + return kafkaConsumer; + } + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + final int numPartitions = 3; + final int replicationFactor = 1; + kafka.utils.TestUtils.createTopic(zkClient, topicName, numPartitions, replicationFactor, + JavaConverters.asScalaBuffer(this.servers), + new Properties()); + kafka.utils.TestUtils.createTopic(zkClient, topic2, numPartitions, replicationFactor, + JavaConverters.asScalaBuffer(this.servers), + new Properties()); + } + + @Test + public void testGetConsumerGroups() throws InterruptedException { + testProduceToTopic(topicName, topicRecordsWithKeys, ByteArrayDeserializer.class.getName(), + ByteArrayDeserializer.class.getName(), + produceOffsets, false); + try (KafkaConsumer kafkaConsumer = createNativeConsumer(groupName, + Collections.singleton(topicName))) { + Thread.sleep(defaultRebalancedTimeoutInMillis); + + kafkaConsumer.poll(Duration.ofMillis(100)); + kafkaConsumer.commitSync(); + + Response response = request("/groups").get(); + assertOKResponse(response, Versions.KAFKA_MOST_SPECIFIC_DEFAULT); + List groups = response.readEntity(new GenericType>() { + }); + ConsumerGroupCoordinator expectedCoordinator = groups.stream() + .filter(g -> groupName.equals(g.getGroupId())) + .collect(Collectors.toList()).get(0).getCoordinator(); + Assert.assertTrue(groups.contains(new ConsumerGroup(groupName, expectedCoordinator))); + } + } + + @Test + public void testGetConsumerGroupMetrics() throws InterruptedException { + testProduceToTopic(topicName, topicRecordsWithKeys, ByteArrayDeserializer.class.getName(), + ByteArrayDeserializer.class.getName(), + produceOffsets, false); + try (KafkaConsumer kafkaConsumer = createNativeConsumer(groupName, + Collections.singleton(topicName))) { + Thread.sleep(defaultRebalancedTimeoutInMillis); + + kafkaConsumer.poll(Duration.ofMillis(100)); + kafkaConsumer.commitSync(); + + Response response = request("/groups/"+ groupName + "/partitions").get(); + assertOKResponse(response, Versions.KAFKA_MOST_SPECIFIC_DEFAULT); + ConsumerGroupSubscription groupInfo = response.readEntity(ConsumerGroupSubscription.class); + Assert.assertFalse("Group information should not be null",groupInfo == null); + Assert.assertEquals(3, groupInfo.getTopicPartitionList().size()); + } + } + + @Test + public void testGetConsumerGroupTopics() throws InterruptedException { + testProduceToTopic(topicName, topicRecordsWithKeys, ByteArrayDeserializer.class.getName(), + ByteArrayDeserializer.class.getName(), + produceOffsets, false); + try (KafkaConsumer kafkaConsumer = createNativeConsumer(groupName, + Collections.singleton(topicName))) { + Thread.sleep(defaultRebalancedTimeoutInMillis); + + kafkaConsumer.poll(Duration.ofMillis(100)); + kafkaConsumer.commitSync(); + + Response response = request("/groups/"+ groupName + "/topics").get(); + assertOKResponse(response, Versions.KAFKA_MOST_SPECIFIC_DEFAULT); + Set groupTopics = response.readEntity(new GenericType>() {}); + Assert.assertEquals(1, groupTopics.size()); + Assert.assertEquals(Collections.singleton(new Topic(topicName, null, null)), groupTopics); + + final int expectedSize = 1; + Map requestParameters = new HashMap<>(); + requestParameters.put("page_offset", "0"); + requestParameters.put("page_size", "" + expectedSize); + Response filteredResponse = request("/groups/"+ groupName + "/topics/" + topicName, + requestParameters).get(); + assertOKResponse(filteredResponse, Versions.KAFKA_MOST_SPECIFIC_DEFAULT); + ConsumerGroupSubscription filteredTopicInfo = filteredResponse.readEntity(ConsumerGroupSubscription.class); + Assert.assertNotNull("Group information should not be null", filteredTopicInfo); + Assert.assertEquals(expectedSize, filteredTopicInfo.getTopicPartitionList().size()); + Assert.assertEquals(3, filteredTopicInfo.getTopicPartitionCount().longValue()); + + + Response topicGroupResponse = request("/groups/"+ groupName + "/topics/" + topicName).get(); + assertOKResponse(topicGroupResponse, Versions.KAFKA_MOST_SPECIFIC_DEFAULT); + ConsumerGroupSubscription nonFilteredTopicInfo = topicGroupResponse.readEntity(ConsumerGroupSubscription.class); + Assert.assertFalse("Group information should not be null",nonFilteredTopicInfo == null); + Assert.assertEquals(3, nonFilteredTopicInfo.getTopicPartitionList().size()); + Assert.assertEquals(3, nonFilteredTopicInfo.getTopicPartitionCount().longValue()); + } + } + +} diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/integration/ConsumerTimeoutTest.java b/kafka-rest/src/test/java/io/confluent/kafkarest/integration/ConsumerTimeoutTest.java index abfc506b6e..ca35008c6b 100644 --- a/kafka-rest/src/test/java/io/confluent/kafkarest/integration/ConsumerTimeoutTest.java +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/integration/ConsumerTimeoutTest.java @@ -26,7 +26,7 @@ import io.confluent.kafkarest.entities.BinaryConsumerRecord; import io.confluent.kafkarest.entities.EmbeddedFormat; import kafka.utils.TestUtils; -import scala.collection.JavaConversions; +import scala.collection.JavaConverters; public class ConsumerTimeoutTest extends AbstractConsumerTest { @@ -48,7 +48,7 @@ public void setUp() throws Exception { final int numPartitions = 3; final int replicationFactor = 1; TestUtils.createTopic(zkClient, topicName, numPartitions, replicationFactor, - JavaConversions.asScalaBuffer(this.servers), new Properties()); + JavaConverters.asScalaBuffer(this.servers), new Properties()); } @Test diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/integration/MetadataAPITest.java b/kafka-rest/src/test/java/io/confluent/kafkarest/integration/MetadataAPITest.java index bda20a0189..8c79914836 100644 --- a/kafka-rest/src/test/java/io/confluent/kafkarest/integration/MetadataAPITest.java +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/integration/MetadataAPITest.java @@ -30,7 +30,7 @@ import io.confluent.kafkarest.entities.Partition; import io.confluent.kafkarest.entities.PartitionReplica; import io.confluent.kafkarest.entities.Topic; -import scala.collection.JavaConversions; +import scala.collection.JavaConverters; import static io.confluent.kafkarest.TestUtils.assertErrorResponse; import static io.confluent.kafkarest.TestUtils.assertOKResponse; @@ -82,9 +82,9 @@ public MetadataAPITest() { public void setUp() throws Exception { super.setUp(); kafka.utils.TestUtils.createTopic(zkClient, topic1Name, topic1Partitions.size(), numReplicas, - JavaConversions.asScalaBuffer(this.servers), new Properties()); + JavaConverters.asScalaBuffer(this.servers), new Properties()); kafka.utils.TestUtils.createTopic(zkClient, topic2Name, topic2Partitions.size(), numReplicas, - JavaConversions.asScalaBuffer(this.servers), topic2Configs); + JavaConverters.asScalaBuffer(this.servers), topic2Configs); } @Test diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/integration/SimpleConsumerAvroTest.java b/kafka-rest/src/test/java/io/confluent/kafkarest/integration/SimpleConsumerAvroTest.java index 09a5ab065c..4d85c45bdc 100644 --- a/kafka-rest/src/test/java/io/confluent/kafkarest/integration/SimpleConsumerAvroTest.java +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/integration/SimpleConsumerAvroTest.java @@ -24,7 +24,7 @@ import org.apache.kafka.clients.producer.ProducerRecord; import org.junit.Before; import org.junit.Test; -import scala.collection.JavaConversions; +import scala.collection.JavaConverters; import javax.ws.rs.core.GenericType; import javax.ws.rs.core.Response; @@ -91,7 +91,7 @@ public void setUp() throws Exception { final int numPartitions = 1; final int replicationFactor = 1; TestUtils.createTopic(zkClient, topicName, numPartitions, replicationFactor, - JavaConversions.asScalaBuffer(this.servers), new Properties()); + JavaConverters.asScalaBuffer(this.servers), new Properties()); } @Test diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/integration/SimpleConsumerBinaryTest.java b/kafka-rest/src/test/java/io/confluent/kafkarest/integration/SimpleConsumerBinaryTest.java index f24ad1d35a..32f2877b41 100644 --- a/kafka-rest/src/test/java/io/confluent/kafkarest/integration/SimpleConsumerBinaryTest.java +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/integration/SimpleConsumerBinaryTest.java @@ -21,7 +21,7 @@ import org.apache.kafka.clients.producer.ProducerRecord; import org.junit.Before; import org.junit.Test; -import scala.collection.JavaConversions; +import scala.collection.JavaConverters; import javax.ws.rs.core.GenericType; import javax.ws.rs.core.Response; @@ -61,7 +61,7 @@ public void setUp() throws Exception { final int numPartitions = 1; final int replicationFactor = 1; TestUtils.createTopic(zkClient, topicName, numPartitions, replicationFactor, - JavaConversions.asScalaBuffer(this.servers), new Properties()); + JavaConverters.asScalaBuffer(this.servers), new Properties()); } @Test diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/integration/TestKafkaRestApplication.java b/kafka-rest/src/test/java/io/confluent/kafkarest/integration/TestKafkaRestApplication.java index 369780ad70..bbd3b39509 100644 --- a/kafka-rest/src/test/java/io/confluent/kafkarest/integration/TestKafkaRestApplication.java +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/integration/TestKafkaRestApplication.java @@ -17,6 +17,7 @@ import io.confluent.kafkarest.*; import io.confluent.kafkarest.v2.KafkaConsumerManager; +import io.confluent.rest.RestConfigException; import javax.ws.rs.core.Configurable; @@ -29,25 +30,35 @@ public class TestKafkaRestApplication extends KafkaRestApplication { ProducerPool producerPoolInjected; KafkaConsumerManager kafkaConsumerManagerInjected; AdminClientWrapper adminClientWrapperInjected; + GroupMetadataObserver groupMetadataObserverInjected; ScalaConsumersContext scalaConsumersContextInjected; + public TestKafkaRestApplication(KafkaRestConfig config, ProducerPool producerPool, + ConsumerManager consumerManager, + SimpleConsumerFactory simpleConsumerFactory, SimpleConsumerManager simpleConsumerManager) + throws IllegalAccessException, InstantiationException, RestConfigException { + this(config, producerPool, null, null, null, null); + } - public TestKafkaRestApplication(KafkaRestConfig config, - ProducerPool producerPool, + public TestKafkaRestApplication(KafkaRestConfig config, ProducerPool producerPool, KafkaConsumerManager kafkaConsumerManager, AdminClientWrapper adminClientWrapper, + GroupMetadataObserver groupMetadataObserver, ScalaConsumersContext scalaConsumersContext) { super(config); producerPoolInjected = producerPool; kafkaConsumerManagerInjected = kafkaConsumerManager; adminClientWrapperInjected = adminClientWrapper; + groupMetadataObserverInjected = groupMetadataObserver; scalaConsumersContextInjected = scalaConsumersContext; } @Override public void setupResources(Configurable config, KafkaRestConfig appConfig) { - setupInjectedResources(config, appConfig, producerPoolInjected, kafkaConsumerManagerInjected, - adminClientWrapperInjected, scalaConsumersContextInjected); + setupInjectedResources(config, appConfig, + producerPoolInjected, + kafkaConsumerManagerInjected, adminClientWrapperInjected, + groupMetadataObserverInjected, scalaConsumersContextInjected); } } diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/AbstractConsumerResourceTest.java b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/AbstractConsumerResourceTest.java index 832eec9d02..ba5a38a134 100644 --- a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/AbstractConsumerResourceTest.java +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/AbstractConsumerResourceTest.java @@ -66,7 +66,8 @@ public AbstractConsumerResourceTest() throws RestConfigException { mdObserver = EasyMock.createMock(MetadataObserver.class); consumerManager = EasyMock.createMock(ConsumerManager.class); ScalaConsumersContext scalaConsumersContext = new ScalaConsumersContext(mdObserver, consumerManager, null); - ctx = new DefaultKafkaRestContext(config, null, null, null, scalaConsumersContext); + ctx = new DefaultKafkaRestContext(config, null, + null, null, null, scalaConsumersContext); ContextInvocationHandler contextInvocationHandler = new ContextInvocationHandler(); KafkaRestContext contextProxy = (KafkaRestContext) Proxy.newProxyInstance(KafkaRestContext.class.getClassLoader(), new diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/BrokersResourceTest.java b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/BrokersResourceTest.java index 9c9f34a8db..b5f0ca9fd1 100644 --- a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/BrokersResourceTest.java +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/BrokersResourceTest.java @@ -14,12 +14,22 @@ */ package io.confluent.kafkarest.unit; +import org.easymock.EasyMock; +import org.junit.Before; +import org.junit.Test; + +import java.util.Arrays; + +import javax.ws.rs.core.GenericType; +import javax.ws.rs.core.Response; + import io.confluent.kafkarest.AdminClientWrapper; import io.confluent.kafkarest.DefaultKafkaRestContext; import io.confluent.kafkarest.KafkaRestApplication; import io.confluent.kafkarest.KafkaRestConfig; import io.confluent.kafkarest.ProducerPool; import io.confluent.kafkarest.TestUtils; +import io.confluent.kafkarest.MetadataObserver; import io.confluent.kafkarest.entities.BrokerList; import io.confluent.kafkarest.resources.BrokersResource; import io.confluent.rest.EmbeddedServerTestHarness; @@ -30,13 +40,7 @@ import org.apache.kafka.common.errors.SaslAuthenticationException; import org.apache.kafka.common.internals.KafkaFutureImpl; import org.apache.kafka.common.protocol.Errors; -import org.easymock.EasyMock; -import org.junit.Before; -import org.junit.Test; -import javax.ws.rs.core.GenericType; -import javax.ws.rs.core.Response; -import java.util.Arrays; import java.util.Collection; import java.util.Properties; @@ -48,17 +52,20 @@ public class BrokersResourceTest extends EmbeddedServerTestHarness { private AdminClient adminClient; + private MetadataObserver metadataObserver; private ProducerPool producerPool; private DefaultKafkaRestContext ctx; public BrokersResourceTest() throws RestConfigException { adminClient = EasyMock.createMock(AdminClient.class); AdminClientWrapper adminClientWrapper = new AdminClientWrapper(new KafkaRestConfig(new Properties()), adminClient); + metadataObserver = EasyMock.createMock(MetadataObserver.class); producerPool = EasyMock.createMock(ProducerPool.class); ctx = new DefaultKafkaRestContext(config, producerPool, null, adminClientWrapper, + null, null ); addResource(new BrokersResource(ctx)); diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/ConsumerGroupsTest.java b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/ConsumerGroupsTest.java new file mode 100644 index 0000000000..6ae71a61e6 --- /dev/null +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/ConsumerGroupsTest.java @@ -0,0 +1,121 @@ +package io.confluent.kafkarest.unit; + +import io.confluent.kafkarest.*; +import io.confluent.kafkarest.entities.*; +import io.confluent.kafkarest.resources.ConsumerGroupsResource; +import io.confluent.rest.EmbeddedServerTestHarness; +import io.confluent.rest.RestConfigException; +import org.easymock.EasyMock; +import org.junit.Before; +import org.junit.Test; + +import javax.ws.rs.core.GenericType; +import javax.ws.rs.core.Response; +import java.util.*; + +import static io.confluent.kafkarest.TestUtils.assertOKResponse; +import static org.junit.Assert.assertEquals; + +public class ConsumerGroupsTest extends EmbeddedServerTestHarness { + + private final GroupMetadataObserver groupMetadataObserver; + private final ProducerPool producerPool; + private final DefaultKafkaRestContext ctx; + + public ConsumerGroupsTest() throws RestConfigException { + groupMetadataObserver = EasyMock.createMock(GroupMetadataObserver.class); + producerPool = EasyMock.createMock(ProducerPool.class); + ctx = new DefaultKafkaRestContext(config, producerPool, + null, null, groupMetadataObserver, null); + + addResource(new ConsumerGroupsResource(ctx)); + } + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + EasyMock.reset(groupMetadataObserver, producerPool); + } + + @Test + public void testListGroups() throws Exception { + for (TestUtils.RequestMediaType mediatype : TestUtils.V1_ACCEPT_MEDIATYPES) { + final List groups = + Arrays.asList(new ConsumerGroup("foo", new ConsumerGroupCoordinator("127.0.0.1", 9092)), + new ConsumerGroup("bar", new ConsumerGroupCoordinator("127.0.0.1", 9093))); + EasyMock.expect(groupMetadataObserver.getConsumerGroupList()) + .andReturn(groups); + EasyMock.replay(groupMetadataObserver); + + Response response = request("/groups", mediatype.header).get(); + assertOKResponse(response, mediatype.expected); + final List consumerGroups = TestUtils.tryReadEntityOrLog(response, + new GenericType>() {}); + assertEquals(groups.size(), consumerGroups.size()); + assertEquals(groups, consumerGroups); + EasyMock.verify(groupMetadataObserver); + EasyMock.reset(groupMetadataObserver, producerPool); + } + } + + @Test + public void testListTopicsByGroup() throws Exception { + for (TestUtils.RequestMediaType mediatype : TestUtils.V1_ACCEPT_MEDIATYPES) { + final Set groups = new HashSet<>(); + groups.add(new Topic("foo", null, null)); + groups.add(new Topic("bar", null, null)); + EasyMock.expect(groupMetadataObserver.getConsumerGroupTopicInformation("foo")) + .andReturn(groups); + EasyMock.replay(groupMetadataObserver); + + Response response = request("/groups/foo/topics", mediatype.header).get(); + assertOKResponse(response, mediatype.expected); + final Set consumerGroups = TestUtils.tryReadEntityOrLog(response, + new GenericType>() {}); + assertEquals(groups.size(), consumerGroups.size()); + assertEquals(groups, consumerGroups); + EasyMock.verify(groupMetadataObserver); + EasyMock.reset(groupMetadataObserver, producerPool); + } + } + + @Test + public void testTopicGroupOffsets() throws Exception { + for (TestUtils.RequestMediaType mediatype : TestUtils.V1_ACCEPT_MEDIATYPES) { + final ConsumerGroupSubscription consumerGroupSubscription = new ConsumerGroupSubscription(Collections.singletonList(new ConsumerTopicPartitionDescription("cons1", "127.0.0.1", "topic", 0, 2L, 0L, 2L)), 1, new ConsumerGroupCoordinator("127.0.0.1", 9092)); + EasyMock.expect(groupMetadataObserver.getConsumerGroupInformation("foo", Collections.singleton("topic"))) + .andReturn(consumerGroupSubscription); + EasyMock.replay(groupMetadataObserver); + + Response response = request("/groups/foo/topics/topic", mediatype.header).get(); + assertOKResponse(response, mediatype.expected); + final ConsumerGroupSubscription consumerGroupOffsets = TestUtils.tryReadEntityOrLog(response, + new GenericType() {}); + assertEquals(consumerGroupSubscription, consumerGroupOffsets); + assertEquals(consumerGroupSubscription, consumerGroupOffsets); + EasyMock.verify(groupMetadataObserver); + EasyMock.reset(groupMetadataObserver, producerPool); + } + } + + @Test + public void testAllTopicsGroupOffsets() throws Exception { + for (TestUtils.RequestMediaType mediatype : TestUtils.V1_ACCEPT_MEDIATYPES) { + final ConsumerGroupSubscription consumerGroupSubscription = new ConsumerGroupSubscription(Arrays.asList(new ConsumerTopicPartitionDescription("cons1", "127.0.0.1", "topic", 0, 2L, 0L, 2L), + new ConsumerTopicPartitionDescription("cons1", "127.0.0.1", "topic1", 0, 2L, 0L, 2L)), 1, new ConsumerGroupCoordinator("127.0.0.1", 9092)); + EasyMock.expect(groupMetadataObserver.getConsumerGroupInformation("foo")) + .andReturn(consumerGroupSubscription); + EasyMock.replay(groupMetadataObserver); + + Response response = request("/groups/foo/partitions", mediatype.header).get(); + assertOKResponse(response, mediatype.expected); + final ConsumerGroupSubscription consumerGroupOffsets = TestUtils.tryReadEntityOrLog(response, + new GenericType() {}); + assertEquals(consumerGroupSubscription, consumerGroupOffsets); + assertEquals(consumerGroupSubscription, consumerGroupOffsets); + EasyMock.verify(groupMetadataObserver); + EasyMock.reset(groupMetadataObserver, producerPool); + } + } +} diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/PartitionsResourceAbstractConsumeTest.java b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/PartitionsResourceAbstractConsumeTest.java index 311e006e25..c7870a1fab 100644 --- a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/PartitionsResourceAbstractConsumeTest.java +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/PartitionsResourceAbstractConsumeTest.java @@ -46,9 +46,10 @@ public class PartitionsResourceAbstractConsumeTest extends EmbeddedServerTestHar public PartitionsResourceAbstractConsumeTest() throws RestConfigException { super(); simpleConsumerManager = EasyMock.createMock(SimpleConsumerManager.class); - ScalaConsumersContext scalaConsumersContext = new ScalaConsumersContext(null, null, simpleConsumerManager); - final DefaultKafkaRestContext ctx = new DefaultKafkaRestContext(config, null, null, null, - scalaConsumersContext); + + final ScalaConsumersContext scalaConsumersContext = new ScalaConsumersContext(null, null, simpleConsumerManager); + final DefaultKafkaRestContext ctx = new DefaultKafkaRestContext(config, null, + null, null, null, scalaConsumersContext); addResource(new PartitionsResource(ctx)); addResource(InstantConverterProvider.class); } diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/PartitionsResourceAvroProduceTest.java b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/PartitionsResourceAvroProduceTest.java index 0c4dafdb9b..ac5addbdc6 100644 --- a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/PartitionsResourceAvroProduceTest.java +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/PartitionsResourceAvroProduceTest.java @@ -85,6 +85,7 @@ public PartitionsResourceAvroProduceTest() throws RestConfigException { producerPool, null, adminClientWrapper, + null, null ); addResource(new TopicsResource(ctx)); @@ -121,7 +122,7 @@ private Response produceToPartition(String topic, int partition, final List results) throws Exception { final Capture produceCallback = - Capture.newInstance(); + Capture.newInstance(); EasyMock.expect(adminClientWrapper.topicExists(topic)).andReturn(true); EasyMock.expect(adminClientWrapper.partitionExists(topic, partition)).andReturn(true); producerPool.produce(EasyMock.eq(topic), diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/PartitionsResourceBinaryProduceTest.java b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/PartitionsResourceBinaryProduceTest.java index 2d8113d513..33d131d178 100644 --- a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/PartitionsResourceBinaryProduceTest.java +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/PartitionsResourceBinaryProduceTest.java @@ -80,6 +80,7 @@ public PartitionsResourceBinaryProduceTest() throws RestConfigException { producerPool, null, adminClientWrapper, + null, null ); @@ -128,7 +129,7 @@ private Response produceToPartition(String topic, int partition, String a request.setRecords(records); final Capture produceCallback = - Capture.newInstance(); + Capture.newInstance(); EasyMock.expect(adminClientWrapper.topicExists(topic)).andReturn(true); EasyMock.expect(adminClientWrapper.partitionExists(topic, partition)).andReturn(true); producerPool.produce(EasyMock.eq(topic), diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/PartitionsResourceConsumeTest.java b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/PartitionsResourceConsumeTest.java index dd6acead5c..edc529bf31 100644 --- a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/PartitionsResourceConsumeTest.java +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/PartitionsResourceConsumeTest.java @@ -80,6 +80,7 @@ private KafkaRestContext createKafkaRestContext() { /* producerPool= */ null, kafkaConsumerManager, /* adminClientWrapper= */ null, + /* groupMetadataObserver= */ null, new ScalaConsumersContext( /* metadataObserver= */ null, /* consumerManager= */ null, diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/PartitionsResourceTest.java b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/PartitionsResourceTest.java index 025a495fb5..9ea14e75b1 100644 --- a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/PartitionsResourceTest.java +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/PartitionsResourceTest.java @@ -66,6 +66,7 @@ public PartitionsResourceTest() throws RestConfigException { producerPool, null, adminClientWrapper, + null, null ); diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/RootResourceTest.java b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/RootResourceTest.java index 57b051af3f..270737f545 100644 --- a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/RootResourceTest.java +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/RootResourceTest.java @@ -42,7 +42,7 @@ public class RootResourceTest private DefaultKafkaRestContext ctx; public RootResourceTest() throws RestConfigException { - ctx = new DefaultKafkaRestContext(config, null, null, null, null); + ctx = new DefaultKafkaRestContext(config, null, null, null, null, null); addResource(RootResource.class); } diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/TopicsResourceAvroProduceTest.java b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/TopicsResourceAvroProduceTest.java index 8282585451..6a7e90ec3d 100644 --- a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/TopicsResourceAvroProduceTest.java +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/TopicsResourceAvroProduceTest.java @@ -106,8 +106,9 @@ public class TopicsResourceAvroProduceTest public TopicsResourceAvroProduceTest() throws RestConfigException { mdObserver = EasyMock.createMock(MetadataObserver.class); producerPool = EasyMock.createMock(ProducerPool.class); - ScalaConsumersContext scalaConsumersContext = new ScalaConsumersContext(mdObserver, null, null); - ctx = new DefaultKafkaRestContext(config, producerPool, null, null, scalaConsumersContext); + ScalaConsumersContext scalaConsumersContext = new ScalaConsumersContext(mdObserver, null, null); + ctx = new DefaultKafkaRestContext(config, producerPool, null, null, + null, scalaConsumersContext); addResource(new TopicsResource(ctx)); @@ -130,7 +131,7 @@ private Response produceToTopic(String topic, String acceptHeader, String final List results) { final Capture produceCallback = - Capture.newInstance(); + Capture.newInstance(); producerPool.produce(EasyMock.eq(topic), EasyMock.eq((Integer) null), EasyMock.eq(recordFormat), diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/TopicsResourceBinaryProduceTest.java b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/TopicsResourceBinaryProduceTest.java index d91458e8d7..298f354c5b 100644 --- a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/TopicsResourceBinaryProduceTest.java +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/TopicsResourceBinaryProduceTest.java @@ -94,7 +94,7 @@ public class TopicsResourceBinaryProduceTest public TopicsResourceBinaryProduceTest() throws RestConfigException { mdObserver = EasyMock.createMock(MetadataObserver.class); producerPool = EasyMock.createMock(ProducerPool.class); - ctx = new DefaultKafkaRestContext(config, producerPool, null, null, null); + ctx = new DefaultKafkaRestContext(config, producerPool, null, null, null, null); addResource(new TopicsResource(ctx)); @@ -188,7 +188,7 @@ private Response produceToTopic(String topic, String acceptHeader, String request.setRecords(records); final Capture produceCallback = - Capture.newInstance(); + Capture.newInstance(); producerPool.produce(EasyMock.eq(topic), EasyMock.eq((Integer) null), EasyMock.eq(recordFormat), diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/TopicsResourceTest.java b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/TopicsResourceTest.java index 365c7e0efa..ddc1fd0bf8 100644 --- a/kafka-rest/src/test/java/io/confluent/kafkarest/unit/TopicsResourceTest.java +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/unit/TopicsResourceTest.java @@ -52,7 +52,8 @@ public class TopicsResourceTest public TopicsResourceTest() throws RestConfigException { adminClientWrapper = EasyMock.createMock(AdminClientWrapper.class); producerPool = EasyMock.createMock(ProducerPool.class); - ctx = new DefaultKafkaRestContext(config, producerPool, null, adminClientWrapper, null); + ctx = new DefaultKafkaRestContext(config, producerPool, + null, adminClientWrapper, null, null); addResource(new TopicsResource(ctx)); } diff --git a/kafka-rest/src/test/java/io/confluent/kafkarest/v2/PartitionsResourceTest.java b/kafka-rest/src/test/java/io/confluent/kafkarest/v2/PartitionsResourceTest.java index c8c9c59b09..c954e0f8e4 100644 --- a/kafka-rest/src/test/java/io/confluent/kafkarest/v2/PartitionsResourceTest.java +++ b/kafka-rest/src/test/java/io/confluent/kafkarest/v2/PartitionsResourceTest.java @@ -65,6 +65,7 @@ private KafkaRestContext createKafkaRestContext() { /* producerPool= */ null, consumerManager, adminClientWrapper, + /* groupMetadataObserver= */ null, /* scalaConsumersContext= */ null); } catch (RestConfigException e) { throw new RuntimeException(e);