/
bigtable_scanner.go
74 lines (59 loc) · 1.77 KB
/
bigtable_scanner.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
package gcp
import (
"context"
"fmt"
"cloud.google.com/go/bigtable"
"github.com/cortexproject/cortex/pkg/chunk"
"github.com/sirupsen/logrus"
chunkTool "github.com/grafana/cortextool/pkg/chunk"
)
type bigtableScanner struct {
client *bigtable.Client
}
// NewBigtableScanner returns a bigtable scanner
func NewBigtableScanner(ctx context.Context, project, instance string) (chunkTool.Scanner, error) {
client, err := bigtable.NewClient(ctx, project, instance)
if err != nil {
return nil, err
}
return &bigtableScanner{
client: client,
}, nil
}
// Scan forwards metrics to a golang channel, forwarded chunks must have the same
// user ID
func (s *bigtableScanner) Scan(ctx context.Context, req chunkTool.ScanRequest, filterFunc chunkTool.FilterFunc, out chan chunk.Chunk) error {
var processingErr error
table := s.client.Open(req.Table)
decodeContext := chunk.NewDecodeContext()
rr := bigtable.PrefixRange(req.User + "/" + req.Prefix)
// Read through rows and forward slices of chunks with the same metrics
// fingerprint
err := table.ReadRows(ctx, rr, func(row bigtable.Row) bool {
c, err := chunk.ParseExternalKey(req.User, row.Key())
if err != nil {
processingErr = err
return false
}
if !req.CheckTime(c.From, c.Through) {
logrus.Debugln("skipping chunk updated at timestamp outside filters range")
return true
}
err = c.Decode(decodeContext, row[columnFamily][0].Value)
if err != nil {
processingErr = err
return false
}
if filterFunc(c) {
out <- c
}
return true
})
if err != nil {
return fmt.Errorf("stream canceled, err: %v, table: %v, user: %v", err, req.Table, req.User)
}
if processingErr != nil {
return fmt.Errorf("stream canceled, err: %v, table: %v, user: %v", processingErr, req.Table, req.User)
}
return nil
}