forked from timescale/tsbs
-
Notifications
You must be signed in to change notification settings - Fork 2
/
scan.go
67 lines (55 loc) · 1.27 KB
/
scan.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
package akumuli
import (
"bufio"
"bytes"
"encoding/binary"
"io"
"sync"
"github.com/benchant/tsbs/pkg/data"
"github.com/benchant/tsbs/pkg/data/usecases/common"
"github.com/benchant/tsbs/pkg/targets"
)
type fileDataSource struct {
reader *bufio.Reader
}
func (d *fileDataSource) NextItem() data.LoadedPoint {
hdr, err := d.reader.Peek(6)
if err == io.EOF {
return data.LoadedPoint{}
}
nbytes := binary.LittleEndian.Uint16(hdr[4:6])
body := make([]byte, nbytes)
_, err = io.ReadFull(d.reader, body)
if err == io.EOF {
return data.LoadedPoint{}
}
return data.NewLoadedPoint(body)
}
// Cassandra doesn't serialize headers, no need to read them
func (d *fileDataSource) Headers() *common.GeneratedDataHeaders { return nil }
type pointIndexer struct {
nchan uint
}
func (i *pointIndexer) GetIndex(p data.LoadedPoint) uint {
hdr := p.Data.([]byte)
id := binary.LittleEndian.Uint32(hdr[0:4])
return uint(id) % i.nchan
}
type batch struct {
buf *bytes.Buffer
rows uint
}
func (b *batch) Len() uint {
return b.rows
}
func (b *batch) Append(item data.LoadedPoint) {
payload := item.Data.([]byte)
b.buf.Write(payload)
b.rows++
}
type factory struct {
bufPool *sync.Pool
}
func (f *factory) New() targets.Batch {
return &batch{buf: f.bufPool.Get().(*bytes.Buffer)}
}