forked from influxdata/influxdb
/
engine.go
992 lines (839 loc) · 27.4 KB
/
engine.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
package influxql
import (
"bytes"
"errors"
"fmt"
"hash/fnv"
"math"
"sort"
"time"
)
// DB represents an interface for creating transactions.
type DB interface {
Begin() (Tx, error)
}
const (
// Return an error if the user is trying to select more than this number of points in a group by statement.
// Most likely they specified a group by interval without time boundaries.
MaxGroupByPoints = 100000
// Since time is always selected, the column count when selecting only a single other value will be 2
SelectColumnCountWithOneValue = 2
// IgnoredChunkSize is what gets passed into Mapper.Begin for aggregate queries as they don't chunk points out
IgnoredChunkSize = 0
)
// Tx represents a transaction.
// The Tx must be opened before being used.
type Tx interface {
// Create MapReduceJobs for the given select statement. One MRJob will be created per unique tagset that matches the query
CreateMapReduceJobs(stmt *SelectStatement, tagKeys []string) ([]*MapReduceJob, error)
}
type MapReduceJob struct {
MeasurementName string
TagSet *TagSet
Mappers []Mapper // the mappers to hit all shards for this MRJob
TMin int64 // minimum time specified in the query
TMax int64 // maximum time specified in the query
key []byte // a key that identifies the MRJob so it can be sorted
interval int64 // the group by interval of the query
stmt *SelectStatement // the select statement this job was created for
chunkSize int // the number of points to buffer in raw queries before returning a chunked response
}
func (m *MapReduceJob) Open() error {
for _, mm := range m.Mappers {
if err := mm.Open(); err != nil {
m.Close()
return err
}
}
return nil
}
func (m *MapReduceJob) Close() {
for _, mm := range m.Mappers {
mm.Close()
}
}
func (m *MapReduceJob) Key() []byte {
if m.key == nil {
m.key = append([]byte(m.MeasurementName), m.TagSet.Key...)
}
return m.key
}
func (m *MapReduceJob) Execute(out chan *Row, filterEmptyResults bool) {
if err := m.Open(); err != nil {
out <- &Row{Err: err}
m.Close()
return
}
defer m.Close()
// if it's a raw query or a non-nested derivative we handle processing differently
if m.stmt.IsRawQuery || m.stmt.IsSimpleDerivative() {
m.processRawQuery(out, filterEmptyResults)
return
}
// get the aggregates and the associated reduce functions
aggregates := m.stmt.FunctionCalls()
reduceFuncs := make([]ReduceFunc, len(aggregates))
for i, c := range aggregates {
reduceFunc, err := InitializeReduceFunc(c)
if err != nil {
out <- &Row{Err: err}
return
}
reduceFuncs[i] = reduceFunc
}
// we'll have a fixed number of points with times in buckets. Initialize those times and a slice to hold the associated values
var pointCountInResult int
// if the user didn't specify a start time or a group by interval, we're returning a single point that describes the entire range
if m.TMin == 0 || m.interval == 0 {
// they want a single aggregate point for the entire time range
m.interval = m.TMax - m.TMin
pointCountInResult = 1
} else {
intervalTop := m.TMax/m.interval*m.interval + m.interval
intervalBottom := m.TMin / m.interval * m.interval
pointCountInResult = int((intervalTop - intervalBottom) / m.interval)
}
// For group by time queries, limit the number of data points returned by the limit and offset
// raw query limits are handled elsewhere
if m.stmt.Limit > 0 || m.stmt.Offset > 0 {
// ensure that the offset isn't higher than the number of points we'd get
if m.stmt.Offset > pointCountInResult {
return
}
// take the lesser of either the pre computed number of group by buckets that
// will be in the result or the limit passed in by the user
if m.stmt.Limit < pointCountInResult {
pointCountInResult = m.stmt.Limit
}
}
// If we are exceeding our MaxGroupByPoints and we aren't a raw query, error out
if pointCountInResult > MaxGroupByPoints {
out <- &Row{
Err: errors.New("too many points in the group by interval. maybe you forgot to specify a where time clause?"),
}
return
}
// initialize the times of the aggregate points
resultValues := make([][]interface{}, pointCountInResult)
// ensure that the start time for the results is on the start of the window
startTimeBucket := m.TMin
if m.interval > 0 {
startTimeBucket = startTimeBucket / m.interval * m.interval
}
for i, _ := range resultValues {
var t int64
if m.stmt.Offset > 0 {
t = startTimeBucket + (int64(i+1) * m.interval * int64(m.stmt.Offset))
} else {
t = startTimeBucket + (int64(i+1) * m.interval) - m.interval
}
// If we start getting out of our max time range, then truncate values and return
if t > m.TMax {
resultValues = resultValues[:i]
break
}
// we always include time so we need one more column than we have aggregates
vals := make([]interface{}, 0, len(aggregates)+1)
resultValues[i] = append(vals, time.Unix(0, t).UTC())
}
// This just makes sure that if they specify a start time less than what the start time would be with the offset,
// we just reset the start time to the later time to avoid going over data that won't show up in the result.
if m.stmt.Offset > 0 {
m.TMin = resultValues[0][0].(time.Time).UnixNano()
}
// now loop through the aggregate functions and populate everything
for i, c := range aggregates {
if err := m.processAggregate(c, reduceFuncs[i], resultValues); err != nil {
out <- &Row{
Name: m.MeasurementName,
Tags: m.TagSet.Tags,
Err: err,
}
return
}
}
// filter out empty results
if filterEmptyResults && m.resultsEmpty(resultValues) {
return
}
// put together the row to return
columnNames := make([]string, len(m.stmt.Fields)+1)
columnNames[0] = "time"
for i, f := range m.stmt.Fields {
columnNames[i+1] = f.Name()
}
// processes the result values if there's any math in there
resultValues = m.processResults(resultValues)
// handle any fill options
resultValues = m.processFill(resultValues)
// process derivatives
resultValues = m.processDerivative(resultValues)
row := &Row{
Name: m.MeasurementName,
Tags: m.TagSet.Tags,
Columns: columnNames,
Values: resultValues,
}
// and we out
out <- row
}
// processRawQuery will handle running the mappers and then reducing their output
// for queries that pull back raw data values without computing any kind of aggregates.
func (m *MapReduceJob) processRawQuery(out chan *Row, filterEmptyResults bool) {
// initialize the mappers
for _, mm := range m.Mappers {
if err := mm.Begin(nil, m.TMin, m.chunkSize); err != nil {
out <- &Row{Err: err}
return
}
}
mapperOutputs := make([][]*rawQueryMapOutput, len(m.Mappers))
// markers for which mappers have been completely emptied
mapperComplete := make([]bool, len(m.Mappers))
// for limit and offset we need to track how many values we've swallowed for the offset and how many we've already set for the limit.
// we track the number set for the limit because they could be getting chunks. For instance if your limit is 10k, but chunk size is 1k
valuesSent := 0
valuesOffset := 0
valuesToReturn := make([]*rawQueryMapOutput, 0)
var lastValueFromPreviousChunk *rawQueryMapOutput
// loop until we've emptied out all the mappers and sent everything out
for {
// collect up to the limit for each mapper
for j, mm := range m.Mappers {
// only pull from mappers that potentially have more data and whose last output has been completely sent out.
if mapperOutputs[j] != nil || mapperComplete[j] {
continue
}
res, err := mm.NextInterval()
if err != nil {
out <- &Row{Err: err}
return
}
if res != nil {
mapperOutputs[j] = res.([]*rawQueryMapOutput)
} else { // if we got a nil from the mapper it means that we've emptied all data from it
mapperComplete[j] = true
}
}
// process the mapper outputs. we can send out everything up to the min of the last time in the mappers
min := int64(math.MaxInt64)
for _, o := range mapperOutputs {
// some of the mappers could empty out before others so ignore them because they'll be nil
if o == nil {
continue
}
// find the min of the last point in each mapper
t := o[len(o)-1].Time
if t < min {
min = t
}
}
// now empty out all the mapper outputs up to the min time
var values []*rawQueryMapOutput
for j, o := range mapperOutputs {
// find the index of the point up to the min
ind := len(o)
for i, mo := range o {
if mo.Time > min {
ind = i
break
}
}
// add up to the index to the values
values = append(values, o[:ind]...)
// clear out previously sent mapper output data
mapperOutputs[j] = mapperOutputs[j][ind:]
// if we emptied out all the values, set this output to nil so that the mapper will get run again on the next loop
if len(mapperOutputs[j]) == 0 {
mapperOutputs[j] = nil
}
}
// if we didn't pull out any values, we're done here
if values == nil {
break
}
// sort the values by time first so we can then handle offset and limit
sort.Sort(rawOutputs(values))
// get rid of any points that need to be offset
if valuesOffset < m.stmt.Offset {
offset := m.stmt.Offset - valuesOffset
// if offset is bigger than the number of values we have, move to the next batch from the mappers
if offset > len(values) {
valuesOffset += len(values)
continue
}
values = values[offset:]
valuesOffset += offset
}
// ensure we don't send more than the limit
if valuesSent < m.stmt.Limit {
limit := m.stmt.Limit - valuesSent
if len(values) > limit {
values = values[:limit]
}
valuesSent += len(values)
}
valuesToReturn = append(valuesToReturn, values...)
// hit the chunk size? Send out what has been accumulated, but keep
// processing.
if len(valuesToReturn) >= m.chunkSize {
lastValueFromPreviousChunk = valuesToReturn[len(valuesToReturn)-1]
valuesToReturn = m.processRawQueryDerivative(lastValueFromPreviousChunk, valuesToReturn)
row := m.processRawResults(valuesToReturn)
// perform post-processing, such as math.
row.Values = m.processResults(row.Values)
out <- row
valuesToReturn = make([]*rawQueryMapOutput, 0)
}
// stop processing if we've hit the limit
if m.stmt.Limit != 0 && valuesSent >= m.stmt.Limit {
break
}
}
if len(valuesToReturn) == 0 {
if !filterEmptyResults {
out <- m.processRawResults(nil)
}
} else {
valuesToReturn = m.processRawQueryDerivative(lastValueFromPreviousChunk, valuesToReturn)
row := m.processRawResults(valuesToReturn)
// perform post-processing, such as math.
row.Values = m.processResults(row.Values)
out <- row
}
}
// derivativeInterval returns the time interval for the one (and only) derivative func
func (m *MapReduceJob) derivativeInterval() time.Duration {
if len(m.stmt.FunctionCalls()[0].Args) == 2 {
return m.stmt.FunctionCalls()[0].Args[1].(*DurationLiteral).Val
}
if m.stmt.groupByInterval > 0 {
return m.stmt.groupByInterval
}
return time.Second
}
func (m *MapReduceJob) isNonNegativeDerivative() bool {
return m.stmt.FunctionCalls()[0].Name == "non_negative_derivative"
}
func (m *MapReduceJob) processRawQueryDerivative(lastValueFromPreviousChunk *rawQueryMapOutput, valuesToReturn []*rawQueryMapOutput) []*rawQueryMapOutput {
// If we're called and do not have a derivative aggregate function, then return what was passed in
if !m.stmt.HasDerivative() {
return valuesToReturn
}
if len(valuesToReturn) == 0 {
return valuesToReturn
}
// If we only have 1 value, then the value did not change, so return
// a single row with 0.0
if len(valuesToReturn) == 1 {
return []*rawQueryMapOutput{
&rawQueryMapOutput{
Time: valuesToReturn[0].Time,
Values: 0.0,
},
}
}
if lastValueFromPreviousChunk == nil {
lastValueFromPreviousChunk = valuesToReturn[0]
}
// Determines whether to drop negative differences
isNonNegative := m.isNonNegativeDerivative()
derivativeValues := []*rawQueryMapOutput{}
for i := 1; i < len(valuesToReturn); i++ {
v := valuesToReturn[i]
// Calculate the derivative of successive points by dividing the difference
// of each value by the elapsed time normalized to the interval
diff := i64tof64(v.Values) - i64tof64(lastValueFromPreviousChunk.Values)
elapsed := v.Time - lastValueFromPreviousChunk.Time
value := 0.0
if elapsed > 0 {
value = diff / (float64(elapsed) / float64(m.derivativeInterval()))
}
lastValueFromPreviousChunk = v
// Drop negative values for non-negative derivatives
if isNonNegative && diff < 0 {
continue
}
derivativeValues = append(derivativeValues, &rawQueryMapOutput{
Time: v.Time,
Values: value,
})
}
return derivativeValues
}
// processDerivative returns the derivatives of the results
func (m *MapReduceJob) processDerivative(results [][]interface{}) [][]interface{} {
// Return early if we're not supposed to process the derivatives
if !m.stmt.HasDerivative() {
return results
}
// Return early if we can't calculate derivatives
if len(results) == 0 {
return results
}
// If we only have 1 value, then the value did not change, so return
// a single row w/ 0.0
if len(results) == 1 {
return [][]interface{}{
[]interface{}{results[0][0], 0.0},
}
}
// Determines whether to drop negative differences
isNonNegative := m.isNonNegativeDerivative()
// Otherwise calculate the derivatives as the difference between consecutive
// points divided by the elapsed time. Then normalize to the requested
// interval.
derivatives := [][]interface{}{}
for i := 1; i < len(results); i++ {
prev := results[i-1]
cur := results[i]
if cur[1] == nil || prev[1] == nil {
continue
}
elapsed := cur[0].(time.Time).Sub(prev[0].(time.Time))
diff := i64tof64(cur[1]) - i64tof64(prev[1])
value := 0.0
if elapsed > 0 {
value = float64(diff) / (float64(elapsed) / float64(m.derivativeInterval()))
}
// Drop negative values for non-negative derivatives
if isNonNegative && diff < 0 {
continue
}
val := []interface{}{
cur[0],
value,
}
derivatives = append(derivatives, val)
}
return derivatives
}
// processsResults will apply any math that was specified in the select statement against the passed in results
func (m *MapReduceJob) processResults(results [][]interface{}) [][]interface{} {
hasMath := false
for _, f := range m.stmt.Fields {
if _, ok := f.Expr.(*BinaryExpr); ok {
hasMath = true
} else if _, ok := f.Expr.(*ParenExpr); ok {
hasMath = true
}
}
if !hasMath {
return results
}
processors := make([]processor, len(m.stmt.Fields))
startIndex := 1
for i, f := range m.stmt.Fields {
processors[i], startIndex = getProcessor(f.Expr, startIndex)
}
mathResults := make([][]interface{}, len(results))
for i, _ := range mathResults {
mathResults[i] = make([]interface{}, len(m.stmt.Fields)+1)
// put the time in
mathResults[i][0] = results[i][0]
for j, p := range processors {
mathResults[i][j+1] = p(results[i])
}
}
return mathResults
}
// processFill will take the results and return new reaults (or the same if no fill modifications are needed) with whatever fill options the query has.
func (m *MapReduceJob) processFill(results [][]interface{}) [][]interface{} {
// don't do anything if we're supposed to leave the nulls
if m.stmt.Fill == NullFill {
return results
}
if m.stmt.Fill == NoFill {
// remove any rows that have even one nil value. This one is tricky because they could have multiple
// aggregates, but this option means that any row that has even one nil gets purged.
newResults := make([][]interface{}, 0, len(results))
for _, vals := range results {
hasNil := false
// start at 1 because the first value is always time
for j := 1; j < len(vals); j++ {
if vals[j] == nil {
hasNil = true
break
}
}
if !hasNil {
newResults = append(newResults, vals)
}
}
return newResults
}
// they're either filling with previous values or a specific number
for i, vals := range results {
// start at 1 because the first value is always time
for j := 1; j < len(vals); j++ {
if vals[j] == nil {
switch m.stmt.Fill {
case PreviousFill:
if i != 0 {
vals[j] = results[i-1][j]
}
case NumberFill:
vals[j] = m.stmt.FillValue
}
}
}
}
return results
}
func getProcessor(expr Expr, startIndex int) (processor, int) {
switch expr := expr.(type) {
case *VarRef:
return newEchoProcessor(startIndex), startIndex + 1
case *Call:
return newEchoProcessor(startIndex), startIndex + 1
case *BinaryExpr:
return getBinaryProcessor(expr, startIndex)
case *ParenExpr:
return getProcessor(expr.Expr, startIndex)
case *NumberLiteral:
return newLiteralProcessor(expr.Val), startIndex
case *StringLiteral:
return newLiteralProcessor(expr.Val), startIndex
case *BooleanLiteral:
return newLiteralProcessor(expr.Val), startIndex
case *TimeLiteral:
return newLiteralProcessor(expr.Val), startIndex
case *DurationLiteral:
return newLiteralProcessor(expr.Val), startIndex
}
panic("unreachable")
}
type processor func(values []interface{}) interface{}
func newEchoProcessor(index int) processor {
return func(values []interface{}) interface{} {
return values[index]
}
}
func newLiteralProcessor(val interface{}) processor {
return func(values []interface{}) interface{} {
return val
}
}
func getBinaryProcessor(expr *BinaryExpr, startIndex int) (processor, int) {
lhs, index := getProcessor(expr.LHS, startIndex)
rhs, index := getProcessor(expr.RHS, index)
return newBinaryExprEvaluator(expr.Op, lhs, rhs), index
}
func newBinaryExprEvaluator(op Token, lhs, rhs processor) processor {
switch op {
case ADD:
return func(values []interface{}) interface{} {
l := lhs(values)
r := rhs(values)
if lv, ok := l.(float64); ok {
if rv, ok := r.(float64); ok {
if rv != 0 {
return lv + rv
}
}
}
return nil
}
case SUB:
return func(values []interface{}) interface{} {
l := lhs(values)
r := rhs(values)
if lv, ok := l.(float64); ok {
if rv, ok := r.(float64); ok {
if rv != 0 {
return lv - rv
}
}
}
return nil
}
case MUL:
return func(values []interface{}) interface{} {
l := lhs(values)
r := rhs(values)
if lv, ok := l.(float64); ok {
if rv, ok := r.(float64); ok {
if rv != 0 {
return lv * rv
}
}
}
return nil
}
case DIV:
return func(values []interface{}) interface{} {
l := lhs(values)
r := rhs(values)
if lv, ok := l.(float64); ok {
if rv, ok := r.(float64); ok {
if rv != 0 {
return lv / rv
}
}
}
return nil
}
default:
// we shouldn't get here, but give them back nils if it goes this way
return func(values []interface{}) interface{} {
return nil
}
}
}
// resultsEmpty will return true if the all the result values are empty or contain only nulls
func (m *MapReduceJob) resultsEmpty(resultValues [][]interface{}) bool {
for _, vals := range resultValues {
// start the loop at 1 because we want to skip over the time value
for i := 1; i < len(vals); i++ {
if vals[i] != nil {
return false
}
}
}
return true
}
// processRawResults will handle converting the reduce results from a raw query into a Row
func (m *MapReduceJob) processRawResults(values []*rawQueryMapOutput) *Row {
selectNames := m.stmt.NamesInSelect()
// ensure that time is in the select names and in the first position
hasTime := false
for i, n := range selectNames {
if n == "time" {
// Swap time to the first argument for names
if i != 0 {
selectNames[0], selectNames[i] = selectNames[i], selectNames[0]
}
hasTime = true
break
}
}
// time should always be in the list of names they get back
if !hasTime {
selectNames = append([]string{"time"}, selectNames...)
}
// since selectNames can contain tags, we need to strip them out
selectFields := make([]string, 0, len(selectNames))
for _, n := range selectNames {
if _, found := m.TagSet.Tags[n]; !found {
selectFields = append(selectFields, n)
}
}
row := &Row{
Name: m.MeasurementName,
Tags: m.TagSet.Tags,
Columns: selectFields,
}
// return an empty row if there are no results
if len(values) == 0 {
return row
}
// if they've selected only a single value we have to handle things a little differently
singleValue := len(selectFields) == SelectColumnCountWithOneValue
// the results will have all of the raw mapper results, convert into the row
for _, v := range values {
vals := make([]interface{}, len(selectFields))
if singleValue {
vals[0] = time.Unix(0, v.Time).UTC()
vals[1] = v.Values.(interface{})
} else {
fields := v.Values.(map[string]interface{})
// time is always the first value
vals[0] = time.Unix(0, v.Time).UTC()
// populate the other values
for i := 1; i < len(selectFields); i++ {
vals[i] = fields[selectFields[i]]
}
}
row.Values = append(row.Values, vals)
}
return row
}
func (m *MapReduceJob) processAggregate(c *Call, reduceFunc ReduceFunc, resultValues [][]interface{}) error {
mapperOutputs := make([]interface{}, len(m.Mappers))
// intialize the mappers
for _, mm := range m.Mappers {
// for aggregate queries, we use the chunk size to determine how many times NextInterval should be called.
// This is the number of buckets that we need to fill.
if err := mm.Begin(c, m.TMin, len(resultValues)); err != nil {
return err
}
}
// populate the result values for each interval of time
for i, _ := range resultValues {
// collect the results from each mapper
for j, mm := range m.Mappers {
res, err := mm.NextInterval()
if err != nil {
return err
}
mapperOutputs[j] = res
}
resultValues[i] = append(resultValues[i], reduceFunc(mapperOutputs))
}
return nil
}
type MapReduceJobs []*MapReduceJob
func (a MapReduceJobs) Len() int { return len(a) }
func (a MapReduceJobs) Less(i, j int) bool { return bytes.Compare(a[i].Key(), a[j].Key()) == -1 }
func (a MapReduceJobs) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// Mapper will run through a map function. A single mapper will be created
// for each shard for each tagset that must be hit to satisfy a query.
// Mappers can either point to a local shard or could point to a remote server.
type Mapper interface {
// Open will open the necessary resources to being the map job. Could be connections to remote servers or
// hitting the local bolt store
Open() error
// Close will close the mapper (either the bolt transaction or the request)
Close()
// Begin will set up the mapper to run the map function for a given aggregate call starting at the passed in time.
// For raw data queries it will yield to the mapper no more than limit number of points.
Begin(aggregate *Call, startingTime int64, limit int) error
// NextInterval will get the time ordered next interval of the given interval size from the mapper. This is a
// forward only operation from the start time passed into Begin. Will return nil when there is no more data to be read.
// Interval periods can be different based on time boundaries (months, daylight savings, etc) of the query.
NextInterval() (interface{}, error)
}
type TagSet struct {
Tags map[string]string
Filters []Expr
SeriesKeys []string
Key []byte
}
func (t *TagSet) AddFilter(key string, filter Expr) {
t.SeriesKeys = append(t.SeriesKeys, key)
t.Filters = append(t.Filters, filter)
}
// Planner represents an object for creating execution plans.
type Planner struct {
DB DB
// Returns the current time. Defaults to time.Now().
Now func() time.Time
}
// NewPlanner returns a new instance of Planner.
func NewPlanner(db DB) *Planner {
return &Planner{
DB: db,
Now: time.Now,
}
}
// Plan creates an execution plan for the given SelectStatement and returns an Executor.
func (p *Planner) Plan(stmt *SelectStatement, chunkSize int) (*Executor, error) {
now := p.Now().UTC()
// Replace instances of "now()" with the current time.
stmt.Condition = Reduce(stmt.Condition, &NowValuer{Now: now})
// Begin an unopened transaction.
tx, err := p.DB.Begin()
if err != nil {
return nil, err
}
// Determine group by tag keys.
interval, tags, err := stmt.Dimensions.Normalize()
if err != nil {
return nil, err
}
// TODO: hanldle queries that select from multiple measurements. This assumes that we're only selecting from a single one
jobs, err := tx.CreateMapReduceJobs(stmt, tags)
if err != nil {
return nil, err
}
// LIMIT and OFFSET the unique series
if stmt.SLimit > 0 || stmt.SOffset > 0 {
if stmt.SOffset > len(jobs) {
jobs = nil
} else {
if stmt.SOffset+stmt.SLimit > len(jobs) {
stmt.SLimit = len(jobs) - stmt.SOffset
}
jobs = jobs[stmt.SOffset : stmt.SOffset+stmt.SLimit]
}
}
for _, j := range jobs {
j.interval = interval.Nanoseconds()
j.stmt = stmt
j.chunkSize = chunkSize
}
return &Executor{tx: tx, stmt: stmt, jobs: jobs, interval: interval.Nanoseconds()}, nil
}
// Executor represents the implementation of Executor.
// It executes all reducers and combines their result into a row.
type Executor struct {
tx Tx // transaction
stmt *SelectStatement // original statement
jobs []*MapReduceJob // one job per unique tag set that will return in the query
interval int64 // the group by interval of the query in nanoseconds
}
// Execute begins execution of the query and returns a channel to receive rows.
func (e *Executor) Execute() <-chan *Row {
// Create output channel and stream data in a separate goroutine.
out := make(chan *Row, 0)
go e.execute(out)
return out
}
func (e *Executor) close() {
for _, j := range e.jobs {
j.Close()
}
}
// execute runs in a separate separate goroutine and streams data from processors.
func (e *Executor) execute(out chan *Row) {
// Ensure the the MRJobs close after execution.
defer e.close()
// If we have multiple tag sets we'll want to filter out the empty ones
filterEmptyResults := len(e.jobs) > 1
// Execute each MRJob serially
for _, j := range e.jobs {
j.Execute(out, filterEmptyResults)
}
// Mark the end of the output channel.
close(out)
}
func i64tof64(v interface{}) float64 {
switch v.(type) {
case int64:
return float64(v.(int64))
case float64:
return v.(float64)
}
panic(fmt.Sprintf("expected either int64 or float64, got %v", v))
}
// Row represents a single row returned from the execution of a statement.
type Row struct {
Name string `json:"name,omitempty"`
Tags map[string]string `json:"tags,omitempty"`
Columns []string `json:"columns,omitempty"`
Values [][]interface{} `json:"values,omitempty"`
Err error `json:"err,omitempty"`
}
// tagsHash returns a hash of tag key/value pairs.
func (r *Row) tagsHash() uint64 {
h := fnv.New64a()
keys := r.tagsKeys()
for _, k := range keys {
h.Write([]byte(k))
h.Write([]byte(r.Tags[k]))
}
return h.Sum64()
}
// tagKeys returns a sorted list of tag keys.
func (r *Row) tagsKeys() []string {
a := make([]string, len(r.Tags))
for k := range r.Tags {
a = append(a, k)
}
sort.Strings(a)
return a
}
// Rows represents a list of rows that can be sorted consistently by name/tag.
type Rows []*Row
func (p Rows) Len() int { return len(p) }
func (p Rows) Less(i, j int) bool {
// Sort by name first.
if p[i].Name != p[j].Name {
return p[i].Name < p[j].Name
}
// Sort by tag set hash. Tags don't have a meaningful sort order so we
// just compute a hash and sort by that instead. This allows the tests
// to receive rows in a predictable order every time.
return p[i].tagsHash() < p[j].tagsHash()
}
func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] }