/
retrieve.go
191 lines (166 loc) · 4.56 KB
/
retrieve.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
package bucket_api
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
//"net/http"
"encoding/gob"
"os"
"os/signal"
"strings"
"syscall"
log "github.com/sirupsen/logrus"
// "github.com/aws/aws-sdk-go/aws"
// "github.com/aws/aws-sdk-go/service/S3"
// "github.com/aws/aws-sdk-go/aws/session"
"github.com/confluentinc/confluent-kafka-go/kafka"
//"github.com/segmentio/kafka-go"
)
type KafkaMsg struct {
Topic string
Partition int
Key, Value []byte
Offset int
Brokers []string
Topics []string
ClientId string
}
type retrieve struct {
pipeReader *io.PipeReader
pipeWriter *io.PipeWriter
}
func DecodeAndUpload(reader *bufio.Reader, context context.Context) error {
// Calling Pipe method
ret := retrieve{}
ret.pipeReader, ret.pipeWriter = io.Pipe()
go func() {
defer ret.pipeWriter.Close()
//remember to cancel the context also
decoder := gob.NewDecoder(reader)
interruptChan := make(chan os.Signal, 1)
signal.Notify(interruptChan, syscall.SIGINT, syscall.SIGTERM)
for {
select {
case <-interruptChan:
log.Info("DecodeAndSend got an interrupt")
break
case <-context.Done():
log.Info("DecodeAndSend got an interrupt")
break
default:
msg := KafkaMsg{}
err := decoder.Decode(&msg)
if err != nil {
log.Info("DecodeAndSend Got an error while decoding msg, %v", err)
break
}
// ret.pipeWriter.Write([]byte(msg))
// log.Info("DecodeAndSend: Msg written to pipe, msg length %v", len(msg.Value))
buffer := new(bytes.Buffer)
buffer.ReadFrom(ret.pipeReader)
res := buffer.Bytes()
// Upload Files
err = uploadFile(res)
if err != nil {
log.Fatal(err)
}
}
}
decoder = nil
}()
return nil
}
func DownlaodVideoFromKafka() {
context := context.Background()
kafkaConfig := KafkaMsg{}
kafkaBrokerUrl := ""
brokers := strings.Split(kafkaBrokerUrl, ",")
kafkaConfig.Brokers = brokers
kafkaConfig.Topics = []string{}
// kafkaconfig := kafka.ReaderConfig{
// Brokers: brokers,
// GroupID: kafkaClientId,
// Topic: kafkaTopic,
// MinBytes: 10e3, // 10KB
// MaxBytes: 10e6, // 10MB
// MaxWait: 1 * time.Second, // Maximum amount of time to wait for new data to come when fetching batches of messages from kafka.
// ReadLagInterval: -1,
// }
consumer, err1 := kafka.NewConsumer(&kafka.ConfigMap{
"bootstrap.servers": "host1:9092,host2:9092",
"group.id": "foo",
"auto.offset.reset": "smallest"})
if err1 != nil {
log.Fatal("Error in creating a consumer")
}
//reader := kafka.NewReader(kafkaconfig)
//defer reader.Close()
pipeReader, pipeWriter := io.Pipe()
defer pipeWriter.Close()
vidReader := bufio.NewReader(pipeReader)
encoder := gob.NewEncoder(pipeWriter)
go func() {
if err := DecodeAndUpload(vidReader, context); err != nil {
log.Infof("Could not download and send video, %v", err)
}
}()
// interruptChan := make(chan os.Signal, 1)
// signal.Notify(interruptChan, syscall.SIGINT, syscall.SIGTERM)
var err error
err = consumer.SubscribeTopics(kafkaConfig.Topics, nil)
// for {
// select {
// case <-interruptChan:
// log.Info("DownlaodVideoFromKafka got an interrupt")
// err = nil
// break
// case <-context.Done():
// log.Info("DownlaodVideoFromKafka got a context closure")
// err = nil
// break
// default:
// msg, err := reader.ReadMessage(context)
// if err != nil {
// //log.Error().Msgf("error while receiving message: %s", err.Error())
// log.Errorf("error while receiving message: %s", err.Error())
// continue
// } else if msg != nil {
// log.Infof("message at topic/partition/offset %v/%v/%v: %s\n", msg.Topic, msg.Partition, msg.Offset, string(msg.value))
// if err = encoder.Encode(msg.value); err != nil {
// err = fmt.Errorf("Error while encoding with gob: %v", err)
// break
// }
// } else {
// log.Infof("Messages are not coming")
// }
// }
// }
// if err := reader.Close(); err != nil {
// log.Fatal("failed to close reader:", err)
// }
go func() {
var run bool
for run == true {
ev := consumer.Poll(0)
switch e := ev.(type) {
case *kafka.Message:
fmt.Printf("%% Message on %s:\n%s\n",
e.TopicPartition, string(e.Value))
if err = encoder.Encode(e.Value); err != nil {
err = fmt.Errorf("Error while encoding with gob: %v", err)
break
}
case kafka.PartitionEOF:
fmt.Printf("%% Reached %v\n", e)
case kafka.Error:
fmt.Fprintf(os.Stderr, "%% Error: %v\n", e)
run = false
default:
fmt.Printf("Ignored %v\n", e)
}
}
}()
consumer.Close()
}