-
Notifications
You must be signed in to change notification settings - Fork 0
/
datastore.go
130 lines (108 loc) · 2.56 KB
/
datastore.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
package kafkamock
import (
"sync"
"time"
)
type (
kafkaDataStore struct {
mu sync.Mutex
Topics map[string]*kafkaTopic
}
kafkaTopic struct {
mu sync.Mutex
Partitions map[int32]*kafkaPartition
}
kafkaPartition struct {
mu sync.Mutex
Index int32
ErrorCode int16
Timestamp int64
Offset int64
Records []*kafkaRecord
GroupCommittedOffsets map[string]int64
Metadata NullableString
}
kafkaRecord struct {
Attributes int8
Timestamp int64
Key []byte
Value []byte
Headers []kafkaRecordHeader
}
kafkaRecordHeader struct {
HeaderKey string
HeaderValue []byte
}
)
func newKafkaDataStore() *kafkaDataStore {
return &kafkaDataStore{
Topics: map[string]*kafkaTopic{},
}
}
func (ds *kafkaDataStore) createTopic(name string) *kafkaTopic {
ds.mu.Lock()
defer ds.mu.Unlock()
topic, exists := ds.Topics[name]
if !exists {
topic = &kafkaTopic{
Partitions: map[int32]*kafkaPartition{},
}
ds.Topics[name] = topic
}
return topic
}
func (ds *kafkaDataStore) getTopic(name string) *kafkaTopic {
ds.mu.Lock()
defer ds.mu.Unlock()
return ds.Topics[name]
}
func (kp *kafkaTopic) createPartition(number int32) *kafkaPartition {
kp.mu.Lock()
defer kp.mu.Unlock()
partition, exists := kp.Partitions[number]
if !exists {
partition = &kafkaPartition{
Index: number,
Records: []*kafkaRecord{},
GroupCommittedOffsets: map[string]int64{},
}
kp.Partitions[number] = partition
}
return partition
}
func (kp *kafkaTopic) getPartition(number int32) *kafkaPartition {
kp.mu.Lock()
defer kp.mu.Unlock()
return kp.Partitions[number]
}
func (kp *kafkaPartition) lock() {
kp.mu.Lock()
}
func (kp *kafkaPartition) unlock() {
kp.mu.Unlock()
}
func (kp *kafkaPartition) postRecord(attribs int8, ts time.Time, key, value []byte, headers map[string][]byte) {
kp.mu.Lock()
defer kp.mu.Unlock()
flatHeaders := make([]kafkaRecordHeader, 0, len(headers))
for k, v := range headers {
flatHeaders = append(flatHeaders, kafkaRecordHeader{HeaderKey: k, HeaderValue: v})
}
record := &kafkaRecord{
Attributes: attribs,
Timestamp: ts.UnixMilli(),
Key: key,
Value: value,
Headers: flatHeaders,
}
kp.Records = append(kp.Records, record)
}
func (kp *kafkaPartition) groupCommittedOffset(group string) int64 {
kp.mu.Lock()
defer kp.mu.Unlock()
offset, exists := kp.GroupCommittedOffsets[group]
if !exists {
kp.GroupCommittedOffsets[group] = 0
}
return offset
}