-
Notifications
You must be signed in to change notification settings - Fork 0
/
listener.go
122 lines (97 loc) · 2.71 KB
/
listener.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
package kafka
import (
"encoding/json"
"fmt"
"log"
"os"
"strconv"
"strings"
"time"
"github.com/Shopify/sarama"
"github.com/martin-helmich/cloudnativego-backend/src/lib/helper/kafka"
"github.com/martin-helmich/cloudnativego-backend/src/lib/msgqueue"
)
type kafkaEventListener struct {
consumer sarama.Consumer
partitions []int32
mapper msgqueue.EventMapper
}
func NewKafkaEventListenerFromEnvironment() (msgqueue.EventListener, error) {
brokers := []string{"localhost:9092"}
partitions := []int32{}
if brokerList := os.Getenv("KAFKA_BROKERS"); brokerList != "" {
brokers = strings.Split(brokerList, ",")
}
if partitionList := os.Getenv("KAFKA_PARTITIONS"); partitionList != "" {
partitionStrings := strings.Split(partitionList, ",")
partitions = make([]int32, len(partitionStrings))
for i := range partitionStrings {
partition, err := strconv.Atoi(partitionStrings[i])
if err != nil {
return nil, err
}
partitions[i] = int32(partition)
}
}
client := <-kafka.RetryConnect(brokers, 5*time.Second)
return NewKafkaEventListener(client, partitions)
}
func NewKafkaEventListener(client sarama.Client, partitions []int32) (msgqueue.EventListener, error) {
consumer, err := sarama.NewConsumerFromClient(client)
if err != nil {
return nil, err
}
listener := &kafkaEventListener{
consumer: consumer,
partitions: partitions,
mapper: msgqueue.NewEventMapper(),
}
return listener, nil
}
func (k *kafkaEventListener) Listen(events ...string) (<-chan msgqueue.Event, <-chan error, error) {
var err error
topic := "events"
results := make(chan msgqueue.Event)
errors := make(chan error)
partitions := k.partitions
if len(partitions) == 0 {
partitions, err = k.consumer.Partitions(topic)
if err != nil {
return nil, nil, err
}
}
log.Printf("topic %s has partitions: %v", topic, partitions)
for _, partition := range partitions {
log.Printf("consuming partition %s:%d", topic, partition)
pConsumer, err := k.consumer.ConsumePartition(topic, partition, 0)
if err != nil {
return nil, nil, err
}
go func() {
for msg := range pConsumer.Messages() {
log.Printf("received message %v", msg)
body := messageEnvelope{}
err := json.Unmarshal(msg.Value, &body)
if err != nil {
errors <- fmt.Errorf("could not JSON-decode message: %v", err)
continue
}
event, err := k.mapper.MapEvent(body.EventName, body.Payload)
if err != nil {
errors <- fmt.Errorf("could not map message: %v", err)
continue
}
results <- event
}
}()
go func() {
for err := range pConsumer.Errors() {
errors <- err
}
}()
}
return results, errors, nil
}
func (l *kafkaEventListener) Mapper() msgqueue.EventMapper {
return l.mapper
}