forked from influxdata/influxdb
-
Notifications
You must be signed in to change notification settings - Fork 0
/
functions.go
1745 lines (1564 loc) · 39.7 KB
/
functions.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
package tsdb
// All aggregate and query functions are defined in this file along with any intermediate data objects they need to process.
// Query functions are represented as two discreet functions: Map and Reduce. These roughly follow the MapReduce
// paradigm popularized by Google and Hadoop.
//
// When adding an aggregate function, define a mapper, a reducer, and add them in the switch statement in the MapreduceFuncs function
import (
"container/heap"
"encoding/json"
"fmt"
"math"
"math/rand"
"reflect"
"sort"
// "github.com/davecgh/go-spew/spew"
"github.com/influxdb/influxdb/influxql"
)
// MapInput represents a collection of values to be processed by the mapper.
type MapInput struct {
TMin int64
Items []MapItem
}
// MapItem represents a single item in a collection that's processed by the mapper.
type MapItem struct {
Timestamp int64
Value interface{}
// TODO(benbjohnson):
// Move fields and tags up to MapInput. Currently the engine combines
// multiple series together during processing. This needs to be fixed so
// that each map function only operates on a single series at a time instead.
Fields map[string]interface{}
Tags map[string]string
}
type MapItems []MapItem
func (a MapItems) Len() int { return len(a) }
func (a MapItems) Less(i, j int) bool { return a[i].Timestamp < a[j].Timestamp }
func (a MapItems) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// mapFunc represents a function used for mapping over a sequential series of data.
// The iterator represents a single group by interval
type mapFunc func(*MapInput) interface{}
// reduceFunc represents a function used for reducing mapper output.
type reduceFunc func([]interface{}) interface{}
// UnmarshalFunc represents a function that can take bytes from a mapper from remote
// server and marshal it into an interface the reducer can use
type UnmarshalFunc func([]byte) (interface{}, error)
// initializemapFunc takes an aggregate call from the query and returns the mapFunc
func initializeMapFunc(c *influxql.Call) (mapFunc, error) {
// see if it's a query for raw data
if c == nil {
return MapRawQuery, nil
}
// Retrieve map function by name.
switch c.Name {
case "count":
if _, ok := c.Args[0].(*influxql.Distinct); ok {
return MapCountDistinct, nil
}
if c, ok := c.Args[0].(*influxql.Call); ok {
if c.Name == "distinct" {
return MapCountDistinct, nil
}
}
return MapCount, nil
case "distinct":
return MapDistinct, nil
case "sum":
return MapSum, nil
case "mean":
return MapMean, nil
case "median":
return MapStddev, nil
case "min":
return func(input *MapInput) interface{} {
return MapMin(input, c.Fields()[0])
}, nil
case "max":
return func(input *MapInput) interface{} {
return MapMax(input, c.Fields()[0])
}, nil
case "spread":
return MapSpread, nil
case "stddev":
return MapStddev, nil
case "first":
return func(input *MapInput) interface{} {
return MapFirst(input, c.Fields()[0])
}, nil
case "last":
return func(input *MapInput) interface{} {
return MapLast(input, c.Fields()[0])
}, nil
case "top", "bottom":
// Capture information from the call that the Map function will require
lit, _ := c.Args[len(c.Args)-1].(*influxql.NumberLiteral)
limit := int(lit.Val)
fields := topCallArgs(c)
return func(input *MapInput) interface{} {
return MapTopBottom(input, limit, fields, len(c.Args), c.Name)
}, nil
case "percentile":
return MapEcho, nil
case "derivative", "non_negative_derivative":
// If the arg is another aggregate e.g. derivative(mean(value)), then
// use the map func for that nested aggregate
if fn, ok := c.Args[0].(*influxql.Call); ok {
return initializeMapFunc(fn)
}
return MapRawQuery, nil
default:
return nil, fmt.Errorf("function not found: %q", c.Name)
}
}
// InitializereduceFunc takes an aggregate call from the query and returns the reduceFunc
func initializeReduceFunc(c *influxql.Call) (reduceFunc, error) {
// Retrieve reduce function by name.
switch c.Name {
case "count":
if _, ok := c.Args[0].(*influxql.Distinct); ok {
return ReduceCountDistinct, nil
}
if c, ok := c.Args[0].(*influxql.Call); ok {
if c.Name == "distinct" {
return ReduceCountDistinct, nil
}
}
return ReduceSum, nil
case "distinct":
return ReduceDistinct, nil
case "sum":
return ReduceSum, nil
case "mean":
return ReduceMean, nil
case "median":
return ReduceMedian, nil
case "min":
return ReduceMin, nil
case "max":
return ReduceMax, nil
case "spread":
return ReduceSpread, nil
case "stddev":
return ReduceStddev, nil
case "first":
return ReduceFirst, nil
case "last":
return ReduceLast, nil
case "top", "bottom":
return func(values []interface{}) interface{} {
lit, _ := c.Args[len(c.Args)-1].(*influxql.NumberLiteral)
limit := int(lit.Val)
fields := topCallArgs(c)
return ReduceTopBottom(values, limit, fields, c.Name)
}, nil
case "percentile":
return func(values []interface{}) interface{} {
// Checks that this arg exists and is a valid type are done in the parsing validation
// and have test coverage there
lit, _ := c.Args[1].(*influxql.NumberLiteral)
percentile := lit.Val
return ReducePercentile(values, percentile)
}, nil
case "derivative", "non_negative_derivative":
// If the arg is another aggregate e.g. derivative(mean(value)), then
// use the map func for that nested aggregate
if fn, ok := c.Args[0].(*influxql.Call); ok {
return initializeReduceFunc(fn)
}
return nil, fmt.Errorf("expected function argument to %s", c.Name)
default:
return nil, fmt.Errorf("function not found: %q", c.Name)
}
}
func InitializeUnmarshaller(c *influxql.Call) (UnmarshalFunc, error) {
// if c is nil it's a raw data query
if c == nil {
return func(b []byte) (interface{}, error) {
a := make([]*rawQueryMapOutput, 0)
err := json.Unmarshal(b, &a)
return a, err
}, nil
}
// Retrieve marshal function by name
switch c.Name {
case "mean":
return func(b []byte) (interface{}, error) {
var o meanMapOutput
err := json.Unmarshal(b, &o)
return &o, err
}, nil
case "min", "max":
return func(b []byte) (interface{}, error) {
if string(b) == "null" {
return nil, nil
}
var o minMaxMapOut
err := json.Unmarshal(b, &o)
return &o, err
}, nil
case "top", "bottom":
return func(b []byte) (interface{}, error) {
var o PositionPoints
err := json.Unmarshal(b, &o)
return o, err
}, nil
case "spread":
return func(b []byte) (interface{}, error) {
var o spreadMapOutput
err := json.Unmarshal(b, &o)
return &o, err
}, nil
case "distinct":
return func(b []byte) (interface{}, error) {
var val InterfaceValues
err := json.Unmarshal(b, &val)
return val, err
}, nil
case "first":
return func(b []byte) (interface{}, error) {
var o firstLastMapOutput
err := json.Unmarshal(b, &o)
return &o, err
}, nil
case "last":
return func(b []byte) (interface{}, error) {
var o firstLastMapOutput
err := json.Unmarshal(b, &o)
return &o, err
}, nil
case "stddev":
return func(b []byte) (interface{}, error) {
val := make([]float64, 0)
err := json.Unmarshal(b, &val)
return val, err
}, nil
case "median":
return func(b []byte) (interface{}, error) {
a := make([]float64, 0)
err := json.Unmarshal(b, &a)
return a, err
}, nil
default:
return func(b []byte) (interface{}, error) {
var val interface{}
err := json.Unmarshal(b, &val)
return val, err
}, nil
}
}
// MapCount computes the number of values in an iterator.
func MapCount(input *MapInput) interface{} {
n := float64(0)
for range input.Items {
n++
}
return n
}
type InterfaceValues []interface{}
func (d InterfaceValues) Len() int { return len(d) }
func (d InterfaceValues) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d InterfaceValues) Less(i, j int) bool {
cmpt, a, b := typeCompare(d[i], d[j])
cmpv := valueCompare(a, b)
if cmpv == 0 {
return cmpt < 0
}
return cmpv < 0
}
// MapDistinct computes the unique values in an iterator.
func MapDistinct(input *MapInput) interface{} {
m := make(map[interface{}]struct{})
for _, item := range input.Items {
m[item.Value] = struct{}{}
}
if len(m) == 0 {
return nil
}
results := make(InterfaceValues, len(m))
var i int
for value, _ := range m {
results[i] = value
i++
}
return results
}
// ReduceDistinct finds the unique values for each key.
func ReduceDistinct(values []interface{}) interface{} {
var index = make(map[interface{}]struct{})
// index distinct values from each mapper
for _, v := range values {
if v == nil {
continue
}
d, ok := v.(InterfaceValues)
if !ok {
msg := fmt.Sprintf("expected distinctValues, got: %T", v)
panic(msg)
}
for _, distinctValue := range d {
index[distinctValue] = struct{}{}
}
}
// convert map keys to an array
results := make(InterfaceValues, len(index))
var i int
for k, _ := range index {
results[i] = k
i++
}
if len(results) > 0 {
sort.Sort(results)
return results
}
return nil
}
// MapCountDistinct computes the unique count of values in an iterator.
func MapCountDistinct(input *MapInput) interface{} {
var index = make(map[interface{}]struct{})
for _, item := range input.Items {
index[item.Value] = struct{}{}
}
if len(index) == 0 {
return nil
}
return index
}
// ReduceCountDistinct finds the unique counts of values.
func ReduceCountDistinct(values []interface{}) interface{} {
var index = make(map[interface{}]struct{})
// index distinct values from each mapper
for _, v := range values {
if v == nil {
continue
}
d, ok := v.(map[interface{}]struct{})
if !ok {
msg := fmt.Sprintf("expected map[interface{}]struct{}, got: %T", v)
panic(msg)
}
for distinctCountValue, _ := range d {
index[distinctCountValue] = struct{}{}
}
}
return len(index)
}
type NumberType int8
const (
Float64Type NumberType = iota
Int64Type
)
// MapSum computes the summation of values in an iterator.
func MapSum(input *MapInput) interface{} {
if len(input.Items) == 0 {
return nil
}
n := float64(0)
var resultType NumberType
for _, item := range input.Items {
switch v := item.Value.(type) {
case float64:
n += v
case int64:
n += float64(v)
resultType = Int64Type
}
}
switch resultType {
case Float64Type:
return n
case Int64Type:
return int64(n)
default:
return nil
}
}
// ReduceSum computes the sum of values for each key.
func ReduceSum(values []interface{}) interface{} {
var n float64
count := 0
var resultType NumberType
for _, v := range values {
if v == nil {
continue
}
count++
switch n1 := v.(type) {
case float64:
n += n1
case int64:
n += float64(n1)
resultType = Int64Type
}
}
if count > 0 {
switch resultType {
case Float64Type:
return n
case Int64Type:
return int64(n)
}
}
return nil
}
// MapMean computes the count and sum of values in an iterator to be combined by the reducer.
func MapMean(input *MapInput) interface{} {
if len(input.Items) == 0 {
return nil
}
out := &meanMapOutput{}
for _, item := range input.Items {
out.Count++
switch v := item.Value.(type) {
case float64:
out.Total += v
case int64:
out.Total += float64(v)
out.ResultType = Int64Type
}
}
return out
}
type meanMapOutput struct {
Count int
Total float64
ResultType NumberType
}
// ReduceMean computes the mean of values for each key.
func ReduceMean(values []interface{}) interface{} {
var total float64
var count int
for _, v := range values {
if v, _ := v.(*meanMapOutput); v != nil {
count += v.Count
total += v.Total
}
}
if count == 0 {
return nil
}
return total / float64(count)
}
// ReduceMedian computes the median of values
func ReduceMedian(values []interface{}) interface{} {
var data []float64
// Collect all the data points
for _, value := range values {
if value == nil {
continue
}
data = append(data, value.([]float64)...)
}
length := len(data)
if length < 2 {
if length == 0 {
return nil
}
return data[0]
}
middle := length / 2
var sortedRange []float64
if length%2 == 0 {
sortedRange = getSortedRange(data, middle-1, 2)
var low, high = sortedRange[0], sortedRange[1]
return low + (high-low)/2
}
sortedRange = getSortedRange(data, middle, 1)
return sortedRange[0]
}
// getSortedRange returns a sorted subset of data. By using discardLowerRange and discardUpperRange to get the target
// subset (unsorted) and then just sorting that subset, the work can be reduced from O(N lg N), where N is len(data), to
// O(N + count lg count) for the average case
// - O(N) to discard the unwanted items
// - O(count lg count) to sort the count number of extracted items
// This can be useful for:
// - finding the median: getSortedRange(data, middle, 1)
// - finding the top N: getSortedRange(data, len(data) - N, N)
// - finding the bottom N: getSortedRange(data, 0, N)
func getSortedRange(data []float64, start int, count int) []float64 {
out := discardLowerRange(data, start)
k := len(out) - count
if k > 0 {
out = discardUpperRange(out, k)
}
sort.Float64s(out)
return out
}
// discardLowerRange discards the lower k elements of the sorted data set without sorting all the data. Sorting all of
// the data would take O(NlgN), where N is len(data), but partitioning to find the kth largest number is O(N) in the
// average case. The remaining N-k unsorted elements are returned - no kind of ordering is guaranteed on these elements.
func discardLowerRange(data []float64, k int) []float64 {
out := make([]float64, len(data)-k)
i := 0
// discard values lower than the desired range
for k > 0 {
lows, pivotValue, highs := partition(data)
lowLength := len(lows)
if lowLength > k {
// keep all the highs and the pivot
out[i] = pivotValue
i++
copy(out[i:], highs)
i += len(highs)
// iterate over the lows again
data = lows
} else {
// discard all the lows
data = highs
k -= lowLength
if k == 0 {
// if discarded enough lows, keep the pivot
out[i] = pivotValue
i++
} else {
// able to discard the pivot too
k--
}
}
}
copy(out[i:], data)
return out
}
// discardUpperRange discards the upper k elements of the sorted data set without sorting all the data. Sorting all of
// the data would take O(NlgN), where N is len(data), but partitioning to find the kth largest number is O(N) in the
// average case. The remaining N-k unsorted elements are returned - no kind of ordering is guaranteed on these elements.
func discardUpperRange(data []float64, k int) []float64 {
out := make([]float64, len(data)-k)
i := 0
// discard values higher than the desired range
for k > 0 {
lows, pivotValue, highs := partition(data)
highLength := len(highs)
if highLength > k {
// keep all the lows and the pivot
out[i] = pivotValue
i++
copy(out[i:], lows)
i += len(lows)
// iterate over the highs again
data = highs
} else {
// discard all the highs
data = lows
k -= highLength
if k == 0 {
// if discarded enough highs, keep the pivot
out[i] = pivotValue
i++
} else {
// able to discard the pivot too
k--
}
}
}
copy(out[i:], data)
return out
}
// partition takes a list of data, chooses a random pivot index and returns a list of elements lower than the
// pivotValue, the pivotValue, and a list of elements higher than the pivotValue. partition mutates data.
func partition(data []float64) (lows []float64, pivotValue float64, highs []float64) {
length := len(data)
// there are better (more complex) ways to calculate pivotIndex (e.g. median of 3, median of 3 medians) if this
// proves to be inadequate.
pivotIndex := rand.Int() % length
pivotValue = data[pivotIndex]
low, high := 1, length-1
// put the pivot in the first position
data[pivotIndex], data[0] = data[0], data[pivotIndex]
// partition the data around the pivot
for low <= high {
for low <= high && data[low] <= pivotValue {
low++
}
for high >= low && data[high] >= pivotValue {
high--
}
if low < high {
data[low], data[high] = data[high], data[low]
}
}
return data[1:low], pivotValue, data[high+1:]
}
type minMaxMapOut struct {
Time int64
Val float64
Type NumberType
Fields map[string]interface{}
Tags map[string]string
}
// MapMin collects the values to pass to the reducer
func MapMin(input *MapInput, fieldName string) interface{} {
min := &minMaxMapOut{}
pointsYielded := false
var val float64
for _, item := range input.Items {
switch v := item.Value.(type) {
case float64:
val = v
case int64:
val = float64(v)
min.Type = Int64Type
case map[string]interface{}:
if d, t, ok := decodeValueAndNumberType(v[fieldName]); ok {
val, min.Type = d, t
} else {
continue
}
}
// Initialize min
if !pointsYielded {
min.Time = item.Timestamp
min.Val = val
min.Fields = item.Fields
min.Tags = item.Tags
pointsYielded = true
}
current := min.Val
min.Val = math.Min(min.Val, val)
// Check to see if the value changed, if so, update the fields/tags
if current != min.Val {
min.Time = item.Timestamp
min.Fields = item.Fields
min.Tags = item.Tags
}
}
if pointsYielded {
return min
}
return nil
}
// ReduceMin computes the min of value.
func ReduceMin(values []interface{}) interface{} {
var curr *minMaxMapOut
for _, value := range values {
v, _ := value.(*minMaxMapOut)
if v == nil {
continue
}
// Replace current if lower value.
if curr == nil || v.Val < curr.Val || (v.Val == curr.Val && v.Time < curr.Time) {
curr = v
}
}
if curr == nil {
return nil
}
switch curr.Type {
case Float64Type:
return PositionPoint{
Time: curr.Time,
Value: curr.Val,
Fields: curr.Fields,
Tags: curr.Tags,
}
case Int64Type:
return PositionPoint{
Time: curr.Time,
Value: int64(curr.Val),
Fields: curr.Fields,
Tags: curr.Tags,
}
default:
return nil
}
}
func decodeValueAndNumberType(v interface{}) (float64, NumberType, bool) {
switch n := v.(type) {
case float64:
return n, Float64Type, true
case int64:
return float64(n), Int64Type, true
default:
return 0, Float64Type, false
}
}
// MapMax collects the values to pass to the reducer
func MapMax(input *MapInput, fieldName string) interface{} {
max := &minMaxMapOut{}
pointsYielded := false
var val float64
for _, item := range input.Items {
switch v := item.Value.(type) {
case float64:
val = v
case int64:
val = float64(v)
max.Type = Int64Type
case map[string]interface{}:
if d, t, ok := decodeValueAndNumberType(v[fieldName]); ok {
val, max.Type = d, t
} else {
continue
}
}
// Initialize max
if !pointsYielded {
max.Time = item.Timestamp
max.Val = val
max.Fields = item.Fields
max.Tags = item.Tags
pointsYielded = true
}
current := max.Val
max.Val = math.Max(max.Val, val)
// Check to see if the value changed, if so, update the fields/tags
if current != max.Val {
max.Time = item.Timestamp
max.Fields = item.Fields
max.Tags = item.Tags
}
}
if pointsYielded {
return max
}
return nil
}
// ReduceMax computes the max of value.
func ReduceMax(values []interface{}) interface{} {
var curr *minMaxMapOut
for _, value := range values {
v, _ := value.(*minMaxMapOut)
if v == nil {
continue
}
// Replace current if higher value.
if curr == nil || v.Val > curr.Val || (v.Val == curr.Val && v.Time < curr.Time) {
curr = v
}
}
if curr == nil {
return nil
}
switch curr.Type {
case Float64Type:
return PositionPoint{
Time: curr.Time,
Value: curr.Val,
Fields: curr.Fields,
Tags: curr.Tags,
}
case Int64Type:
return PositionPoint{
Time: curr.Time,
Value: int64(curr.Val),
Fields: curr.Fields,
Tags: curr.Tags,
}
default:
return nil
}
}
type spreadMapOutput struct {
Min, Max float64
Type NumberType
}
// MapSpread collects the values to pass to the reducer
func MapSpread(input *MapInput) interface{} {
out := &spreadMapOutput{}
pointsYielded := false
var val float64
for _, item := range input.Items {
switch v := item.Value.(type) {
case float64:
val = v
case int64:
val = float64(v)
out.Type = Int64Type
}
// Initialize
if !pointsYielded {
out.Max = val
out.Min = val
pointsYielded = true
}
out.Max = math.Max(out.Max, val)
out.Min = math.Min(out.Min, val)
}
if pointsYielded {
return out
}
return nil
}
// ReduceSpread computes the spread of values.
func ReduceSpread(values []interface{}) interface{} {
result := &spreadMapOutput{}
pointsYielded := false
for _, v := range values {
if v == nil {
continue
}
val := v.(*spreadMapOutput)
// Initialize
if !pointsYielded {
result.Max = val.Max
result.Min = val.Min
result.Type = val.Type
pointsYielded = true
}
result.Max = math.Max(result.Max, val.Max)
result.Min = math.Min(result.Min, val.Min)
}
if pointsYielded {
switch result.Type {
case Float64Type:
return result.Max - result.Min
case Int64Type:
return int64(result.Max - result.Min)
}
}
return nil
}
// MapStddev collects the values to pass to the reducer
func MapStddev(input *MapInput) interface{} {
var a []float64
for _, item := range input.Items {
switch v := item.Value.(type) {
case float64:
a = append(a, v)
case int64:
a = append(a, float64(v))
}
}
return a
}
// ReduceStddev computes the stddev of values.
func ReduceStddev(values []interface{}) interface{} {
var data []float64
// Collect all the data points
for _, value := range values {
if value == nil {
continue
}
data = append(data, value.([]float64)...)
}
// If no data or we only have one point, it's nil or undefined
if len(data) < 2 {
return nil
}
// Get the mean
var mean float64
var count int
for _, v := range data {
count++
mean += (v - mean) / float64(count)
}
// Get the variance
var variance float64
for _, v := range data {
dif := v - mean
sq := math.Pow(dif, 2)
variance += sq
}
variance = variance / float64(count-1)
stddev := math.Sqrt(variance)
return stddev
}
type firstLastMapOutput struct {
Time int64
Value interface{}
Fields map[string]interface{}
Tags map[string]string
}
// MapFirst collects the values to pass to the reducer
// This function assumes time ordered input
func MapFirst(input *MapInput, fieldName string) interface{} {
if len(input.Items) == 0 {
return nil
}
k, v := input.Items[0].Timestamp, input.Items[0].Value
tags := input.Items[0].Tags
fields := input.Items[0].Fields
if n, ok := v.(map[string]interface{}); ok {
v = n[fieldName]
}
// Find greatest value at same timestamp.
for _, item := range input.Items[1:] {
nextk, nextv := item.Timestamp, item.Value
if nextk != k {
break
}
if n, ok := nextv.(map[string]interface{}); ok {
nextv = n[fieldName]
}
if greaterThan(nextv, v) {
fields = item.Fields
tags = item.Tags
v = nextv
}
}
return &firstLastMapOutput{Time: k, Value: v, Fields: fields, Tags: tags}
}
// ReduceFirst computes the first of value.
func ReduceFirst(values []interface{}) interface{} {
out := &firstLastMapOutput{}
pointsYielded := false
for _, v := range values {
if v == nil {
continue
}
val := v.(*firstLastMapOutput)
// Initialize first
if !pointsYielded {
out.Time = val.Time
out.Value = val.Value
out.Fields = val.Fields