Skip to content

Commit

Permalink
[Issue apache#45] [pulsar-client-kafka-compat] Handled Kafka record h…
Browse files Browse the repository at this point in the history
…eaders for producer and consumer

(cherry picked from commit 390131131c22e3b9ebcaf4762c15d3fa1411da5d)
  • Loading branch information
swamymavuri committed Mar 15, 2023
1 parent 9c1f67c commit 0d7443e
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 1 deletion.
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeaders;
import org.apache.kafka.common.record.TimestampType;
import org.apache.kafka.common.serialization.Deserializer;
import org.apache.kafka.common.serialization.StringDeserializer;
Expand All @@ -70,6 +72,7 @@
import org.apache.pulsar.client.util.MessageIdUtils;
import org.apache.pulsar.common.naming.TopicName;
import org.apache.pulsar.common.util.FutureUtil;
import org.bouncycastle.util.encoders.Hex;

@Slf4j
public class PulsarKafkaConsumer<K, V> implements Consumer<K, V>, MessageListener<byte[]> {
Expand Down Expand Up @@ -405,6 +408,11 @@ public ConsumerRecords<K, V> poll(long timeoutMillis) {
timestampType = TimestampType.CREATE_TIME;
}

Headers headers = new RecordHeaders();
if (msg.getProperties() != null) {
msg.getProperties().forEach((k, v) -> headers.add(k, Hex.decode(v)));
}

ConsumerRecord<K, V> consumerRecord = new ConsumerRecord<>(topic, partition, offset, timestamp,
timestampType, -1, msg.hasKey() ? msg.getKey().length() : 0, msg.getData().length, key, value);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;

import org.apache.commons.codec.binary.Hex;
import org.apache.kafka.clients.consumer.ConsumerGroupMetadata;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.Cluster;
Expand Down Expand Up @@ -387,6 +388,12 @@ private int buildMessage(TypedMessageBuilder<byte[]> builder, ProducerRecord<K,
builder.property(KafkaMessageRouter.PARTITION_ID, Integer.toString(partition));
}

if (record.headers() != null) {
Map<String, String> headerProperties = new HashMap<>();
record.headers()
.forEach(header -> headerProperties.putIfAbsent(header.key(), Hex.encodeHexString(header.value())));
builder.properties(headerProperties);
}
return value.length;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@
import org.apache.kafka.clients.producer.internals.DefaultPartitioner;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeaders;
import org.apache.kafka.common.serialization.StringSerializer;
import org.apache.pulsar.client.api.ClientBuilder;
import org.apache.pulsar.client.api.ProducerBuilder;
Expand Down Expand Up @@ -246,7 +248,13 @@ public void testPulsarKafkaSendAvro() throws PulsarClientException {
foo.setField2("field2");
foo.setField3(3);

pulsarKafkaProducer.send(new ProducerRecord<>("topic", 1,foo, bar));
Headers headers = new RecordHeaders();
String header1 = "header1";
String header2 = "header2";
headers.add(header1, header1.getBytes());
headers.add(header2, header2.getBytes());

pulsarKafkaProducer.send(new ProducerRecord<>("topic", 1, foo, bar, headers));

// Verify
verify(mockTypedMessageBuilder, times(1)).sendAsync();
Expand Down

0 comments on commit 0d7443e

Please sign in to comment.