/
tabify.js
109 lines (96 loc) · 3.84 KB
/
tabify.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import _ from 'lodash';
import { TabbedAggResponseWriterProvider } from 'ui/agg_response/tabify/_response_writer';
import { AggResponseBucketsProvider } from 'ui/agg_response/tabify/_buckets';
export function AggResponseTabifyProvider(Private, Notifier) {
const TabbedAggResponseWriter = Private(TabbedAggResponseWriterProvider);
const Buckets = Private(AggResponseBucketsProvider);
const notify = new Notifier({ location: 'agg_response/tabify' });
function tabifyAggResponse(vis, esResponse, respOpts) {
const write = new TabbedAggResponseWriter(vis, respOpts);
const topLevelBucket = _.assign({}, esResponse.aggregations, {
doc_count: esResponse.hits.total
});
collectBucket(write, topLevelBucket, '', 1);
return write.response();
}
/**
* read an aggregation from a bucket, which is *might* be found at key (if
* the response came in object form), and will recurse down the aggregation
* tree and will pass the read values to the ResponseWriter.
*
* @param {object} bucket - a bucket from the aggResponse
* @param {undefined|string} key - the key where the bucket was found
* @returns {undefined}
*/
function collectBucket(write, bucket, key, aggScale) {
const agg = write.aggStack.shift();
const aggInfo = agg.write();
aggScale *= aggInfo.metricScale || 1;
switch (agg.schema.group) {
case 'buckets':
const buckets = new Buckets(bucket[agg.id]);
if (buckets.length) {
const splitting = write.canSplit && agg.schema.name === 'split';
if (splitting) {
write.split(agg, buckets, function forEachBucket(subBucket, key) {
collectBucket(write, subBucket, agg.getKey(subBucket, key), aggScale);
});
} else {
buckets.forEach(function (subBucket, key) {
write.cell(agg, agg.getKey(subBucket, key), function () {
collectBucket(write, subBucket, agg.getKey(subBucket, key), aggScale);
});
});
}
} else if (write.partialRows && write.metricsForAllBuckets && write.minimalColumns) {
// we don't have any buckets, but we do have metrics at this
// level, then pass all the empty buckets and jump back in for
// the metrics.
write.aggStack.unshift(agg);
passEmptyBuckets(write, bucket, key, aggScale);
write.aggStack.shift();
} else {
// we don't have any buckets, and we don't have isHierarchical
// data, so no metrics, just try to write the row
write.row();
}
break;
case 'metrics':
let value = agg.getValue(bucket);
// since the aggregation could be a non integer (such as a max date)
// only do the scaling calculation if it is needed.
if (aggScale !== 1) {
value *= aggScale;
}
write.cell(agg, value, function () {
if (!write.aggStack.length) {
// row complete
write.row();
} else {
// process the next agg at this same level
collectBucket(write, bucket, key, aggScale);
}
});
break;
}
write.aggStack.unshift(agg);
}
// write empty values for each bucket agg, then write
// the metrics from the initial bucket using collectBucket()
function passEmptyBuckets(write, bucket, key, aggScale) {
const agg = write.aggStack.shift();
switch (agg.schema.group) {
case 'metrics':
// pass control back to collectBucket()
write.aggStack.unshift(agg);
collectBucket(write, bucket, key, aggScale);
return;
case 'buckets':
write.cell(agg, '', function () {
passEmptyBuckets(write, bucket, key, aggScale);
});
}
write.aggStack.unshift(agg);
}
return notify.timed('tabify agg response', tabifyAggResponse);
}