/
output.go
66 lines (54 loc) · 1.68 KB
/
output.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
package output
import (
"time"
"github.com/Shopify/sarama"
metrics "github.com/rcrowley/go-metrics"
)
// A KafkaProducer encapsulates a connection to a Kafka cluster.
type KafkaProducer struct {
producer sarama.AsyncProducer
topic string
registry metrics.Registry
msgTx metrics.Counter
bytesTx metrics.Counter
}
// NewKafkaProducer returns an initialized KafkaProducer.
func NewKafkaProducer(brokers []string, topic string, bufferTime, bufferBytes, batchSz int) (*KafkaProducer, error) {
config := sarama.NewConfig()
config.Producer.RequiredAcks = sarama.WaitForLocal // Only wait for the leader to ack
config.Producer.Compression = sarama.CompressionSnappy // Compress messages
config.Producer.Flush.Bytes = bufferBytes
config.Producer.Flush.Frequency = time.Duration(bufferTime * 1000000)
config.Producer.Flush.Messages = batchSz
p, err := sarama.NewAsyncProducer(brokers, config)
if err != nil {
return nil, err
}
k := &KafkaProducer{
producer: p,
topic: topic,
registry: metrics.NewRegistry(),
msgTx: metrics.NewCounter(),
bytesTx: metrics.NewCounter(),
}
k.registry.Register("messages.transmitted", k.msgTx)
k.registry.Register("messages.bytes.transmitted", k.bytesTx)
return k, nil
}
func (k *KafkaProducer) Write(s string) {
k.producer.Input() <- &sarama.ProducerMessage{
Topic: k.topic,
Value: sarama.StringEncoder(s),
}
k.msgTx.Inc(1)
k.bytesTx.Inc(int64(len(s)))
}
// Statistics returns an object storing statistics, which supports JSON
// marshalling.
func (k *KafkaProducer) Statistics() (metrics.Registry, error) {
return k.registry, nil
}
// Close closes the producer.
func (k *KafkaProducer) Close() error {
return k.producer.Close()
}