forked from lovoo/goka
/
consumer.go
118 lines (99 loc) · 3.28 KB
/
consumer.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
package kafka
import (
"time"
"github.com/Shopify/sarama"
cluster "github.com/bsm/sarama-cluster"
"github.com/lovoo/goka/multierr"
)
const (
// size of sarama buffer for consumer and producer
defaultChannelBufferSize = 256
// time sarama-cluster assumes the processing of an event may take
defaultMaxProcessingTime = 1 * time.Second
// producer flush configuration
defaultFlushFrequency = 100 * time.Millisecond
defaultFlushBytes = 64 * 1024
defaultProducerMaxRetries = 10
)
const (
// OffsetNewest defines the newest offset to read from using the consumer
OffsetNewest = -1
// OffsetOldest defines the oldest offset to read from using the consumer
OffsetOldest = -2
)
// Consumer abstracts a kafka consumer
type Consumer interface {
Events() <-chan Event
// group consume assumes co-partioned topics
// define input topics to consume
Subscribe(topics map[string]int64) error
// marks the consumer ready to start consuming the messages
AddGroupPartition(partition int32)
Commit(topic string, partition int32, offset int64) error
// consume individual topic/partitions
AddPartition(topic string, partition int32, initialOffset int64) error
RemovePartition(topic string, partition int32) error
// Close stops closes the events channel
Close() error
}
type saramaConsumer struct {
groupConsumer *groupConsumer
simpleConsumer *simpleConsumer
events chan Event
}
// NewSaramaConsumer creates a new Consumer using sarama
func NewSaramaConsumer(brokers []string, group string, config *cluster.Config) (Consumer, error) {
chsize := config.Config.ChannelBufferSize
if chsize == 0 {
chsize = defaultChannelBufferSize
}
events := make(chan Event, chsize)
g, err := newGroupConsumer(brokers, group, events, config)
if err != nil {
return nil, err
}
// since simple consumer only handle tables, be sure to start from oldest
simpleConfig := config.Config // copy config
simpleConfig.Consumer.Offsets.Initial = sarama.OffsetOldest
c, err := newSimpleConsumer(brokers, events, &simpleConfig)
if err != nil {
return nil, err
}
return &saramaConsumer{
groupConsumer: g,
simpleConsumer: c,
events: events,
}, nil
}
func (c *saramaConsumer) Close() error {
// we want to close the events-channel regardless of any errors closing
// the consumers
defer close(c.events)
var errs multierr.Errors
if err := c.simpleConsumer.Close(); err != nil {
errs.Collect(err)
}
if err := c.groupConsumer.Close(); err != nil {
errs.Collect(err)
}
return errs.NilOrError()
}
func (c *saramaConsumer) Events() <-chan Event {
return c.events
}
// group consume assumes co-partioned topics
func (c *saramaConsumer) Subscribe(topics map[string]int64) error {
return c.groupConsumer.Subscribe(topics)
}
func (c *saramaConsumer) AddGroupPartition(partition int32) {
c.groupConsumer.AddGroupPartition(partition)
}
func (c *saramaConsumer) Commit(topic string, partition int32, offset int64) error {
return c.groupConsumer.Commit(topic, partition, offset)
}
func (c *saramaConsumer) AddPartition(topic string, partition int32, initialOffset int64) error {
return c.simpleConsumer.AddPartition(topic, partition, int64(initialOffset))
}
func (c *saramaConsumer) RemovePartition(topic string, partition int32) error {
return c.simpleConsumer.RemovePartition(topic, partition)
}