-
Notifications
You must be signed in to change notification settings - Fork 510
/
streaming_block.go
152 lines (123 loc) · 3.94 KB
/
streaming_block.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
package v2
import (
"bytes"
"context"
"fmt"
"github.com/google/uuid"
"github.com/grafana/tempo/tempodb/backend"
"github.com/grafana/tempo/tempodb/encoding/common"
)
type StreamingBlock struct {
meta *backend.BlockMeta
bloom *common.ShardedBloomFilter
bufferedObjects int
appendBuffer *bytes.Buffer
appender Appender
cfg *common.BlockConfig
}
// NewStreamingBlock creates a ... new streaming block. Objects are appended one at a time to the backend.
func NewStreamingBlock(cfg *common.BlockConfig, id uuid.UUID, tenantID string, metas []*backend.BlockMeta, estimatedObjects int) (*StreamingBlock, error) {
if len(metas) == 0 {
return nil, fmt.Errorf("empty block meta list")
}
dataEncoding := metas[0].DataEncoding
for _, meta := range metas {
if meta.DataEncoding != dataEncoding {
return nil, fmt.Errorf("two blocks of different data encodings can not be streamed together: %s: %s", dataEncoding, meta.DataEncoding)
}
}
// Start with times from input metas.
newMeta := backend.NewBlockMeta(tenantID, id, VersionString, cfg.Encoding, dataEncoding)
newMeta.StartTime = metas[0].StartTime
newMeta.EndTime = metas[0].EndTime
for _, m := range metas[1:] {
if m.StartTime.Before(newMeta.StartTime) {
newMeta.StartTime = m.StartTime
}
if m.EndTime.After(newMeta.EndTime) {
newMeta.EndTime = m.EndTime
}
}
c := &StreamingBlock{
meta: newMeta,
bloom: common.NewBloom(cfg.BloomFP, uint(cfg.BloomShardSizeBytes), uint(estimatedObjects)),
cfg: cfg,
}
c.appendBuffer = &bytes.Buffer{}
dataWriter, err := NewDataWriter(c.appendBuffer, cfg.Encoding)
if err != nil {
return nil, fmt.Errorf("failed to create page writer: %w", err)
}
c.appender, err = NewBufferedAppender(dataWriter, cfg.IndexDownsampleBytes, estimatedObjects)
if err != nil {
return nil, fmt.Errorf("failed to created appender: %w", err)
}
return c, nil
}
func (c *StreamingBlock) AddObject(id common.ID, object []byte) error {
err := c.appender.Append(id, object)
if err != nil {
return err
}
c.bufferedObjects++
c.meta.ObjectAdded(id, 0, 0) // streaming block handles start/end time by combining BlockMetas. See .BlockMeta()
c.bloom.Add(id)
return nil
}
func (c *StreamingBlock) CurrentBufferLength() int {
return c.appendBuffer.Len()
}
func (c *StreamingBlock) CurrentBufferedObjects() int {
return c.bufferedObjects
}
func (c *StreamingBlock) Length() int {
return c.appender.Length()
}
// FlushBuffer flushes any existing objects to the backend
func (c *StreamingBlock) FlushBuffer(ctx context.Context, tracker backend.AppendTracker, w backend.Writer) (backend.AppendTracker, int, error) {
if c.appender.Length() == 0 {
return tracker, 0, nil
}
meta := c.BlockMeta()
tracker, err := appendBlockData(ctx, w, meta, tracker, c.appendBuffer.Bytes())
if err != nil {
return nil, 0, err
}
bytesFlushed := c.appendBuffer.Len()
c.appendBuffer.Reset()
c.bufferedObjects = 0
return tracker, bytesFlushed, nil
}
// Complete finishes writes the compactor metadata and closes all buffers and appenders
func (c *StreamingBlock) Complete(ctx context.Context, tracker backend.AppendTracker, w backend.Writer) (int, error) {
err := c.appender.Complete()
if err != nil {
return 0, err
}
// one final flush
tracker, bytesFlushed, err := c.FlushBuffer(ctx, tracker, w)
if err != nil {
return 0, err
}
// close data file
err = w.CloseAppend(ctx, tracker)
if err != nil {
return 0, err
}
records := c.appender.Records()
meta := c.BlockMeta()
indexWriter := NewIndexWriter(c.cfg.IndexPageSizeBytes)
indexBytes, err := indexWriter.Write(records)
if err != nil {
return 0, err
}
meta.TotalRecords = uint32(len(records)) // casting
meta.IndexPageSize = uint32(c.cfg.IndexPageSizeBytes)
meta.BloomShardCount = uint16(c.bloom.GetShardCount())
return bytesFlushed, writeBlockMeta(ctx, w, meta, indexBytes, c.bloom)
}
func (c *StreamingBlock) BlockMeta() *backend.BlockMeta {
meta := c.meta
meta.Size = c.appender.DataLength()
return meta
}