Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
finatra-kafka: Add NullKafkaProducer
Problem FinagleKafkaProducer's class member `producer` will try to make network connections during class construction time. When running unit tests and deploying the producer to a personal instance, the network connection failure can be annoying. Since this class member `producer` is evaluated during class construction time, a subclass of `FinagleKafkaProducer` cannot override the construction-time network connection behavior either. Solution Introduce `KafkaProducerBase` class and `NullKafkaProducer` class where `FinagleKafkaProducer` and `NullKafkaProducer` classes both extend `KafkaProducerBase`. Result Unit tests and services deployed to a personal instance can use `NullKafkaProducer` and they will not show network connection failures. Differential Revision: https://phabricator.twitter.biz/D429004
- Loading branch information
Showing
4 changed files
with
118 additions
and
4 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
46 changes: 46 additions & 0 deletions
46
kafka/src/main/scala/com/twitter/finatra/kafka/producers/KafkaProducerBase.scala
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,46 @@ | ||
package com.twitter.finatra.kafka.producers | ||
|
||
import com.twitter.inject.Logging | ||
import com.twitter.util.{Closable, Future, Time} | ||
import java.util | ||
import org.apache.kafka.clients.consumer.OffsetAndMetadata | ||
import org.apache.kafka.clients.producer.{ProducerRecord, RecordMetadata} | ||
import org.apache.kafka.common.{PartitionInfo, TopicPartition} | ||
|
||
/** | ||
* An interface for publishing events in key/value pairs to Kafka and | ||
* returning a [[com.twitter.util.Future]] | ||
* | ||
* @tparam K type of the key in key/value pairs to be published to Kafka | ||
* @tparam V type of the value in key/value pairs to be published to Kafka | ||
*/ | ||
trait KafkaProducerBase[K, V] extends Closable with Logging { | ||
def send( | ||
topic: String, | ||
key: K, | ||
value: V, | ||
timestamp: Long, | ||
partitionIdx: Option[Integer] = None | ||
): Future[RecordMetadata] | ||
|
||
def send(producerRecord: ProducerRecord[K, V]): Future[RecordMetadata] | ||
|
||
def initTransactions(): Unit | ||
|
||
def beginTransaction(): Unit | ||
|
||
def sendOffsetsToTransaction( | ||
offsets: Map[TopicPartition, OffsetAndMetadata], | ||
consumerGroupId: String | ||
): Unit | ||
|
||
def commitTransaction(): Unit | ||
|
||
def abortTransaction(): Unit | ||
|
||
def flush(): Unit | ||
|
||
def partitionsFor(topic: String): util.List[PartitionInfo] | ||
|
||
def close(deadline: Time): Future[Unit] | ||
} |
56 changes: 56 additions & 0 deletions
56
kafka/src/main/scala/com/twitter/finatra/kafka/producers/NullKafkaProducer.scala
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,56 @@ | ||
package com.twitter.finatra.kafka.producers | ||
import com.twitter.util.{Future, Time} | ||
import java.util | ||
import org.apache.kafka.clients.consumer.OffsetAndMetadata | ||
import org.apache.kafka.clients.producer.{ProducerRecord, RecordMetadata} | ||
import org.apache.kafka.common.{PartitionInfo, TopicPartition} | ||
|
||
/** | ||
* A no-op [[KafkaProducerBase]]. No network connection is created and | ||
* events are discarded, making this producer useful in unit tests | ||
* and as defaults in situations where event publishing is not needed. | ||
* | ||
* @tparam K type of the key in key/value pairs to be published to Kafka | ||
* @tparam V type of the value in key/value pairs to be published to Kafka | ||
*/ | ||
class NullKafkaProducer[K, V] extends KafkaProducerBase[K, V] { | ||
|
||
val DataRecord = new RecordMetadata( | ||
new TopicPartition("", 0), | ||
0L, 0L, 0L, | ||
0L, 0, 0 | ||
) | ||
|
||
val EmptyList = new util.ArrayList[PartitionInfo]() | ||
|
||
def send( | ||
topic: String, | ||
key: K, | ||
value: V, | ||
timestamp: Long, | ||
partitionIdx: Option[Integer] | ||
): Future[RecordMetadata] = Future.value(DataRecord) | ||
|
||
def send(producerRecord: ProducerRecord[K, V]): Future[RecordMetadata] = | ||
Future.value(DataRecord) | ||
|
||
def initTransactions(): Unit = () | ||
|
||
def beginTransaction(): Unit = () | ||
|
||
def sendOffsetsToTransaction( | ||
offsets: Map[TopicPartition, OffsetAndMetadata], | ||
consumerGroupId: String | ||
): Unit = () | ||
|
||
def commitTransaction(): Unit = () | ||
|
||
def abortTransaction(): Unit = () | ||
|
||
def flush(): Unit = () | ||
|
||
def partitionsFor(topic: String): util.List[PartitionInfo] = | ||
EmptyList | ||
|
||
def close(deadline: Time): Future[Unit] = Future.Unit | ||
} |