forked from aliyun/aliyun-log-go-sdk
/
log_accumulator.go
125 lines (112 loc) · 5.1 KB
/
log_accumulator.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
package producer
import (
"errors"
"github.com/aliyun/aliyun-log-go-sdk"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"strings"
"sync"
"sync/atomic"
)
type LogAccumulator struct {
lock sync.RWMutex
logGroupData sync.Map //map[string]*ProducerBatch,
producerConfig *ProducerConfig
ioWorker *IoWorker
shutDownFlag bool
logger log.Logger
threadPool *IoThreadPool
}
func initLogAccumulator(config *ProducerConfig, ioWorker *IoWorker, logger log.Logger, threadPool *IoThreadPool) *LogAccumulator {
return &LogAccumulator{
producerConfig: config,
ioWorker: ioWorker,
shutDownFlag: false,
logger: logger,
threadPool: threadPool,
}
}
func (logAccumulator *LogAccumulator) addOrSendProducerBatch(key, project, logstore, logTopic, logSource, shardHash string, producerBatch *ProducerBatch, log interface{}, callback CallBack) {
totalDataCount := producerBatch.getLogGroupCount() + 1
if int64(producerBatch.totalDataSize) > logAccumulator.producerConfig.MaxBatchSize && producerBatch.totalDataSize < 5242880 && totalDataCount <= logAccumulator.producerConfig.MaxBatchCount {
producerBatch.addLogToLogGroup(log)
if callback != nil {
producerBatch.addProducerBatchCallBack(callback)
}
logAccumulator.sendToServer(key, producerBatch)
} else if int64(producerBatch.totalDataSize) <= logAccumulator.producerConfig.MaxBatchSize && totalDataCount <= logAccumulator.producerConfig.MaxBatchCount {
producerBatch.addLogToLogGroup(log)
if callback != nil {
producerBatch.addProducerBatchCallBack(callback)
}
} else {
logAccumulator.sendToServer(key, producerBatch)
logAccumulator.createNewProducerBatch(log, callback, key, project, logstore, logTopic, logSource, shardHash)
}
}
// In this function,Naming with mlog is to avoid conflicts with the introduced kit/log package names.
func (logAccumulator *LogAccumulator) addLogToProducerBatch(project, logstore, shardHash, logTopic, logSource string,
logData interface{}, callback CallBack) error {
defer logAccumulator.lock.Unlock()
logAccumulator.lock.Lock()
if logAccumulator.shutDownFlag {
level.Warn(logAccumulator.logger).Log("msg", "Producer has started and shut down and cannot write to new logs")
return errors.New("Producer has started and shut down and cannot write to new logs")
}
key := logAccumulator.getKeyString(project, logstore, logTopic, shardHash, logSource)
if mlog, ok := logData.(*sls.Log); ok {
if data, ok := logAccumulator.logGroupData.Load(key); ok == true {
producerBatch := data.(*ProducerBatch)
logSize := int64(GetLogSizeCalculate(mlog))
atomic.AddInt64(&producerBatch.totalDataSize, logSize)
atomic.AddInt64(&producerLogGroupSize, logSize)
logAccumulator.addOrSendProducerBatch(key, project, logstore, logTopic, logSource, shardHash, producerBatch, mlog, callback)
} else {
logAccumulator.createNewProducerBatch(mlog, callback, key, project, logstore, logTopic, logSource, shardHash)
}
} else if logList, ok := logData.([]*sls.Log); ok {
if data, ok := logAccumulator.logGroupData.Load(key); ok == true {
producerBatch := data.(*ProducerBatch)
logListSize := int64(GetLogListSize(logList))
atomic.AddInt64(&producerBatch.totalDataSize, logListSize)
atomic.AddInt64(&producerLogGroupSize, logListSize)
logAccumulator.addOrSendProducerBatch(key, project, logstore, logTopic, logSource, shardHash, producerBatch, logList, callback)
} else {
logAccumulator.createNewProducerBatch(logList, callback, key, project, logstore, logTopic, logSource, shardHash)
}
} else {
level.Error(logAccumulator.logger).Log("msg", "Invalid logType")
return errors.New("Invalid logType")
}
return nil
}
func (logAccumulator *LogAccumulator) createNewProducerBatch(logType interface{}, callback CallBack, key, project, logstore, logTopic, logSource, shardHash string) {
level.Debug(logAccumulator.logger).Log("msg", "Create a new ProducerBatch")
if mlog, ok := logType.(*sls.Log); ok {
newProducerBatch := initProducerBatch(mlog, callback, project, logstore, logTopic, logSource, shardHash, logAccumulator.producerConfig)
logAccumulator.logGroupData.Store(key, newProducerBatch)
} else if logList, ok := logType.([]*sls.Log); ok {
newProducerBatch := initProducerBatch(logList, callback, project, logstore, logTopic, logSource, shardHash, logAccumulator.producerConfig)
logAccumulator.logGroupData.Store(key, newProducerBatch)
}
}
func (logAccumulator *LogAccumulator) sendToServer(key string, producerBatch *ProducerBatch) {
defer ioLock.Unlock()
ioLock.Lock()
level.Debug(logAccumulator.logger).Log("msg", "Send producerBatch to IoWorker from logAccumulator")
logAccumulator.threadPool.addTask(producerBatch)
logAccumulator.logGroupData.Delete(key)
}
func (logAccumulator *LogAccumulator) getKeyString(project, logstore, logTopic, shardHash, logSource string) string {
var key strings.Builder
key.WriteString(project)
key.WriteString(Delimiter)
key.WriteString(logstore)
key.WriteString(Delimiter)
key.WriteString(logTopic)
key.WriteString(Delimiter)
key.WriteString(shardHash)
key.WriteString(Delimiter)
key.WriteString(logSource)
return key.String()
}