forked from ISADBA/go-queue
-
Notifications
You must be signed in to change notification settings - Fork 0
/
pusher.go
160 lines (140 loc) · 3.64 KB
/
pusher.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
package kq
import (
"context"
"crypto/tls"
"strconv"
"time"
"github.com/segmentio/kafka-go"
"github.com/segmentio/kafka-go/sasl"
"github.com/segmentio/kafka-go/sasl/plain"
"github.com/segmentio/kafka-go/sasl/scram"
"github.com/zeromicro/go-zero/core/executors"
"github.com/zeromicro/go-zero/core/logx"
)
type (
PushOption func(options *chunkOptions)
Pusher struct {
produer *kafka.Writer
topic string
executor *executors.ChunkExecutor
}
chunkOptions struct {
chunkSize int
flushInterval time.Duration
}
)
func NewPusher(config KqConf, opts ...PushOption) *Pusher {
var producer *kafka.Writer
if config.Username != "" && config.Password != "" {
if config.Mechanism == "" {
// 使用SASL 明文认证
mechanism := plain.Mechanism{
Username: config.Username,
Password: config.Password,
}
sharedTransport := &kafka.Transport{
SASL: mechanism,
}
producer = &kafka.Writer{
Addr: kafka.TCP(config.Brokers...),
Topic: config.Topic,
Balancer: &kafka.Hash{},
Transport: sharedTransport,
}
} else {
// 使用SASL SCRAM-SHA认证
var mechanism sasl.Mechanism
var err error
if config.Mechanism == "SCRAM-SHA-512" {
mechanism, err = scram.Mechanism(scram.SHA512, config.Username, config.Password)
if err != nil {
panic(err)
}
} else if config.Mechanism == "SCRAM-SHA-256" {
mechanism, err = scram.Mechanism(scram.SHA256, config.Username, config.Password)
if err != nil {
panic(err)
}
}
// Transports are responsible for managing connection pools and other resources,
// it's generally best to create a few of these and share them across your
// application.
sharedTransport := &kafka.Transport{
SASL: mechanism,
TLS: &tls.Config{},
}
producer = &kafka.Writer{
Addr: kafka.TCP(config.Brokers...),
Topic: config.Topic,
Balancer: &kafka.Hash{},
Transport: sharedTransport,
}
}
} else {
// 不使用认证
producer = &kafka.Writer{
Addr: kafka.TCP(config.Brokers...),
Topic: config.Topic,
Balancer: &kafka.LeastBytes{},
Compression: kafka.Snappy,
}
}
pusher := &Pusher{
produer: producer,
topic: config.Topic,
}
pusher.executor = executors.NewChunkExecutor(func(tasks []interface{}) {
chunk := make([]kafka.Message, len(tasks))
for i := range tasks {
chunk[i] = tasks[i].(kafka.Message)
}
if err := pusher.produer.WriteMessages(context.Background(), chunk...); err != nil {
logx.Error(err)
}
}, newOptions(opts)...)
return pusher
}
func (p *Pusher) Close() error {
if p.executor != nil {
p.executor.Flush()
}
return p.produer.Close()
}
func (p *Pusher) Name() string {
return p.topic
}
func (p *Pusher) Push(v string) error {
msg := kafka.Message{
Key: []byte(strconv.FormatInt(time.Now().UnixNano(), 10)),
Value: []byte(v),
}
if p.executor != nil {
return p.executor.Add(msg, len(v))
} else {
return p.produer.WriteMessages(context.Background(), msg)
}
}
func WithChunkSize(chunkSize int) PushOption {
return func(options *chunkOptions) {
options.chunkSize = chunkSize
}
}
func WithFlushInterval(interval time.Duration) PushOption {
return func(options *chunkOptions) {
options.flushInterval = interval
}
}
func newOptions(opts []PushOption) []executors.ChunkOption {
var options chunkOptions
for _, opt := range opts {
opt(&options)
}
var chunkOpts []executors.ChunkOption
if options.chunkSize > 0 {
chunkOpts = append(chunkOpts, executors.WithChunkBytes(options.chunkSize))
}
if options.flushInterval > 0 {
chunkOpts = append(chunkOpts, executors.WithFlushInterval(options.flushInterval))
}
return chunkOpts
}