Skip to content

Commit

Permalink
Revert "[Issue apache#45] [pulsar-client-kafka-compat] Handled Kafka …
Browse files Browse the repository at this point in the history
…record headers for producer and consumer"

This reverts commit 0d7443e.
  • Loading branch information
swamymavuri committed Mar 15, 2023
1 parent 056c3cd commit 8460674
Show file tree
Hide file tree
Showing 3 changed files with 1 addition and 24 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,6 @@
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeaders;
import org.apache.kafka.common.record.TimestampType;
import org.apache.kafka.common.serialization.Deserializer;
import org.apache.kafka.common.serialization.StringDeserializer;
Expand All @@ -72,7 +70,6 @@
import org.apache.pulsar.client.util.MessageIdUtils;
import org.apache.pulsar.common.naming.TopicName;
import org.apache.pulsar.common.util.FutureUtil;
import org.bouncycastle.util.encoders.Hex;

@Slf4j
public class PulsarKafkaConsumer<K, V> implements Consumer<K, V>, MessageListener<byte[]> {
Expand Down Expand Up @@ -408,11 +405,6 @@ public ConsumerRecords<K, V> poll(long timeoutMillis) {
timestampType = TimestampType.CREATE_TIME;
}

Headers headers = new RecordHeaders();
if (msg.getProperties() != null) {
msg.getProperties().forEach((k, v) -> headers.add(k, Hex.decode(v)));
}

ConsumerRecord<K, V> consumerRecord = new ConsumerRecord<>(topic, partition, offset, timestamp,
timestampType, -1, msg.hasKey() ? msg.getKey().length() : 0, msg.getData().length, key, value);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;

import org.apache.commons.codec.binary.Hex;
import org.apache.kafka.clients.consumer.ConsumerGroupMetadata;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.Cluster;
Expand Down Expand Up @@ -388,12 +387,6 @@ private int buildMessage(TypedMessageBuilder<byte[]> builder, ProducerRecord<K,
builder.property(KafkaMessageRouter.PARTITION_ID, Integer.toString(partition));
}

if (record.headers() != null) {
Map<String, String> headerProperties = new HashMap<>();
record.headers()
.forEach(header -> headerProperties.putIfAbsent(header.key(), Hex.encodeHexString(header.value())));
builder.properties(headerProperties);
}
return value.length;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,6 @@
import org.apache.kafka.clients.producer.internals.DefaultPartitioner;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeaders;
import org.apache.kafka.common.serialization.StringSerializer;
import org.apache.pulsar.client.api.ClientBuilder;
import org.apache.pulsar.client.api.ProducerBuilder;
Expand Down Expand Up @@ -248,13 +246,7 @@ public void testPulsarKafkaSendAvro() throws PulsarClientException {
foo.setField2("field2");
foo.setField3(3);

Headers headers = new RecordHeaders();
String header1 = "header1";
String header2 = "header2";
headers.add(header1, header1.getBytes());
headers.add(header2, header2.getBytes());

pulsarKafkaProducer.send(new ProducerRecord<>("topic", 1, foo, bar, headers));
pulsarKafkaProducer.send(new ProducerRecord<>("topic", 1,foo, bar));

// Verify
verify(mockTypedMessageBuilder, times(1)).sendAsync();
Expand Down

0 comments on commit 8460674

Please sign in to comment.