-
Notifications
You must be signed in to change notification settings - Fork 35
/
kafka.go
116 lines (106 loc) · 3.06 KB
/
kafka.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
package msg
import (
"context"
"github.com/pkg/errors"
"github.com/segmentio/kafka-go"
"github.com/viant/toolbox"
"strings"
"time"
)
const keyAttribute = "key"
const idAttribute = "id"
type kafkaClient struct {
timeout time.Duration
}
func (k *kafkaClient) Push(ctx context.Context, dest *Resource, message *Message) (Result, error) {
config := kafka.WriterConfig{
Brokers: dest.Brokers,
Topic: dest.Name,
Balancer: &kafka.LeastBytes{},
}
body := toolbox.AsString(message.Data)
writer := kafka.NewWriter(config)
key := ""
for k := range message.Attributes {
candidate := strings.ToLower(k)
if candidate == keyAttribute || candidate == idAttribute {
key = toolbox.AsString(message.Attributes[k])
}
}
messages := make([]kafka.Message, 0)
messages = append(messages, kafka.Message{
Partition: dest.Partition,
Key: []byte(key),
Value: []byte(body),
})
err := writer.WriteMessages(ctx, messages...)
if err != nil {
return nil, err
}
_ = writer.Close()
return key, nil
}
func (k *kafkaClient) PullN(ctx context.Context, source *Resource, count int, nack bool) ([]*Message, error) {
reader := kafka.NewReader(kafka.ReaderConfig{
Brokers: source.Brokers,
Topic: source.Name,
Partition: source.Partition,
MinBytes: 10e3, // 10KB
MaxBytes: 10e6, // 10MB
MaxWait: k.timeout,
})
if source.Offset > 0 {
if err := reader.SetOffset(int64(source.Offset)); err != nil {
return nil, errors.Wrapf(err, "failed to set offset: %v", source.Offset)
}
}
var result = make([]*Message, 0)
for i := 0; i < count; i++ {
message, err := reader.ReadMessage(ctx)
if err != nil {
return nil, err
}
msg := &Message{
Data: message.Value,
Attributes: map[string]interface{}{},
}
if len(message.Key) > 0 {
msg.Attributes[keyAttribute] = string(message.Key)
}
result = append(result, msg)
if !nack {
if err = reader.CommitMessages(ctx, message); err != nil {
return nil, errors.Wrapf(err, "failed to commit message: %v", msg)
}
}
}
return result, nil
}
func (k *kafkaClient) SetupResource(resource *ResourceSetup) (*Resource, error) {
conn, err := kafka.DialLeader(context.Background(), "tcp", resource.Brokers[0], resource.Name, resource.Partition)
if err != nil {
return nil, errors.Wrapf(err, "failed to connect to %v", resource.Brokers[0])
}
if resource.Recreate {
_ = conn.DeleteTopics(resource.Name)
}
topicConfig := kafka.TopicConfig{
Topic: resource.Name,
ReplicationFactor: resource.ReplicationFactor,
NumPartitions: resource.Partitions,
}
return &resource.Resource, conn.CreateTopics(topicConfig)
}
func (k *kafkaClient) DeleteResource(resource *Resource) error {
conn, err := kafka.DialLeader(context.Background(), "tcp", resource.Brokers[0], resource.Name, resource.Partition)
if err != nil {
return errors.Wrapf(err, "failed to connect to %v", resource.Brokers[0])
}
return conn.DeleteTopics(resource.Name)
}
func (k *kafkaClient) Close() error {
return nil
}
func newKafkaClient(timeout time.Duration) (Client, error) {
return &kafkaClient{timeout: timeout}, nil
}