This repository has been archived by the owner on Dec 1, 2018. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 1.3k
/
kafka.go
185 lines (160 loc) · 5.14 KB
/
kafka.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kafka
import (
"encoding/json"
"fmt"
"net/url"
"time"
"github.com/golang/glog"
"github.com/optiopay/kafka"
"github.com/optiopay/kafka/proto"
)
const (
brokerClientID = "kafka-sink"
brokerDialTimeout = 10 * time.Second
brokerDialRetryLimit = 1
brokerDialRetryWait = 0
brokerAllowTopicCreation = true
brokerLeaderRetryLimit = 1
brokerLeaderRetryWait = 0
metricsTopic = "heapster-metrics"
eventsTopic = "heapster-events"
)
const (
TimeSeriesTopic = "timeseriestopic"
EventsTopic = "eventstopic"
compression = "compression"
)
type KafkaClient interface {
Name() string
Stop()
ProduceKafkaMessage(msgData interface{}) error
}
type kafkaSink struct {
producer kafka.DistributingProducer
dataTopic string
}
func (sink *kafkaSink) ProduceKafkaMessage(msgData interface{}) error {
start := time.Now()
msgJson, err := json.Marshal(msgData)
if err != nil {
return fmt.Errorf("failed to transform the items to json : %s", err)
}
message := &proto.Message{Value: []byte(string(msgJson))}
_, err = sink.producer.Distribute(sink.dataTopic, message)
if err != nil {
return fmt.Errorf("failed to produce message to %s: %s", sink.dataTopic, err)
}
end := time.Now()
glog.V(4).Infof("Exported %d data to kafka in %s", len([]byte(string(msgJson))), end.Sub(start))
return nil
}
func (sink *kafkaSink) Name() string {
return "Apache Kafka Sink"
}
func (sink *kafkaSink) Stop() {
// nothing needs to be done.
}
// setupProducer returns a producer of kafka server
func setupProducer(sinkBrokerHosts []string, topic string, brokerConf kafka.BrokerConf, compression proto.Compression) (kafka.DistributingProducer, error) {
glog.V(3).Infof("attempting to setup kafka sink")
broker, err := kafka.Dial(sinkBrokerHosts, brokerConf)
if err != nil {
return nil, fmt.Errorf("failed to connect to kafka cluster: %s", err)
}
defer broker.Close()
//create kafka producer
conf := kafka.NewProducerConf()
conf.RequiredAcks = proto.RequiredAcksLocal
conf.Compression = compression
producer := broker.Producer(conf)
// create RoundRobinProducer with the default producer.
count, err := broker.PartitionCount(topic)
if err != nil {
count = 1
glog.Warningf("Failed to get partition count of topic %q: %s", topic, err)
}
sinkProducer := kafka.NewRoundRobinProducer(producer, count)
glog.V(3).Infof("kafka sink setup successfully")
return sinkProducer, nil
}
func getTopic(opts map[string][]string, topicType string) (string, error) {
var topic string
switch topicType {
case TimeSeriesTopic:
topic = metricsTopic
case EventsTopic:
topic = eventsTopic
default:
return "", fmt.Errorf("Topic type '%s' is illegal.", topicType)
}
if len(opts[topicType]) > 0 {
topic = opts[topicType][0]
}
return topic, nil
}
func getCompression(opts map[string][]string) (proto.Compression, error) {
if len(opts[compression]) == 0 {
return proto.CompressionNone, nil
}
comp := opts[compression][0]
switch comp {
case "none":
return proto.CompressionNone, nil
case "gzip":
return proto.CompressionGzip, nil
default:
return proto.CompressionNone, fmt.Errorf("Compression '%s' is illegal. Use none or gzip", comp)
}
}
func NewKafkaClient(uri *url.URL, topicType string) (KafkaClient, error) {
opts, err := url.ParseQuery(uri.RawQuery)
if err != nil {
return nil, fmt.Errorf("failed to parse url's query string: %s", err)
}
glog.V(3).Infof("kafka sink option: %v", opts)
topic, err := getTopic(opts, topicType)
if err != nil {
return nil, err
}
compression, err := getCompression(opts)
if err != nil {
return nil, err
}
var kafkaBrokers []string
if len(opts["brokers"]) < 1 {
return nil, fmt.Errorf("There is no broker assigned for connecting kafka")
}
kafkaBrokers = append(kafkaBrokers, opts["brokers"]...)
glog.V(2).Infof("initializing kafka sink with brokers - %v", kafkaBrokers)
//structure the config of broker
brokerConf := kafka.NewBrokerConf(brokerClientID)
brokerConf.DialTimeout = brokerDialTimeout
brokerConf.DialRetryLimit = brokerDialRetryLimit
brokerConf.DialRetryWait = brokerDialRetryWait
brokerConf.LeaderRetryLimit = brokerLeaderRetryLimit
brokerConf.LeaderRetryWait = brokerLeaderRetryWait
brokerConf.AllowTopicCreation = brokerAllowTopicCreation
brokerConf.Logger = &GologAdapterLogger{}
// set up producer of kafka server.
sinkProducer, err := setupProducer(kafkaBrokers, topic, brokerConf, compression)
if err != nil {
return nil, fmt.Errorf("Failed to setup Producer: - %v", err)
}
return &kafkaSink{
producer: sinkProducer,
dataTopic: topic,
}, nil
}