-
Notifications
You must be signed in to change notification settings - Fork 194
/
query.py
369 lines (311 loc) · 13.1 KB
/
query.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
#
# Copyright 2016 Metamarkets Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import six
import json
import collections
from pydruid.utils.aggregators import build_aggregators
from pydruid.utils.filters import Filter
from pydruid.utils.having import Having
from pydruid.utils.dimensions import build_dimension
from pydruid.utils.postaggregator import Postaggregator
from pydruid.utils.query_utils import UnicodeWriter
try:
import pandas
except ImportError:
print('Warning: unable to import Pandas. The export_pandas method will not work.')
pass
class Query(collections.MutableSequence):
"""
Query objects are produced by PyDruid clients and can be used for exporting query results into TSV files or
pandas.DataFrame objects for subsequent analysis. They also hold information about the issued query.
Query acts as a wrapper over raw result list of dictionaries.
:ivar str result_json: JSON object representing a query result. Initial value: None
:ivar list result: Query result parsed into a list of dicts. Initial value: None
:ivar str query_type: Name of most recently run query, e.g., topN. Initial value: None
:ivar dict query_dict: JSON object representing the query. Initial value: None
"""
def __init__(self, query_dict, query_type):
super(Query, self).__init__()
self.query_dict = query_dict
self.query_type = query_type
self.result = None
self.result_json = None
def parse(self, data):
if data:
self.result_json = data
res = json.loads(self.result_json)
self.result = res
else:
raise IOError('{Error parsing result: {0} for {1} query'.format(
self.result_json, self.query_type))
def export_tsv(self, dest_path):
"""
Export the current query result to a tsv file.
:param str dest_path: file to write query results to
:raise NotImplementedError:
Example
.. code-block:: python
:linenos:
>>> top = client.topn(
datasource='twitterstream',
granularity='all',
intervals='2013-10-04/pt1h',
aggregations={"count": doublesum("count")},
dimension='user_name',
filter = Dimension('user_lang') == 'en',
metric='count',
threshold=2
)
>>> top.export_tsv('top.tsv')
>>> !cat top.tsv
>>> count user_name timestamp
7.0 user_1 2013-10-04T00:00:00.000Z
6.0 user_2 2013-10-04T00:00:00.000Z
"""
if six.PY3:
f = open(dest_path, 'w', newline='', encoding='utf-8')
else:
f = open(dest_path, 'wb')
w = UnicodeWriter(f)
if self.query_type == "timeseries":
header = list(self.result[0]['result'].keys())
header.append('timestamp')
elif self.query_type == 'topN':
header = list(self.result[0]['result'][0].keys())
header.append('timestamp')
elif self.query_type == "groupBy":
header = list(self.result[0]['event'].keys())
header.append('timestamp')
header.append('version')
else:
raise NotImplementedError('TSV export not implemented for query type: {0}'.format(self.query_type))
w.writerow(header)
if self.result:
if self.query_type == "topN" or self.query_type == "timeseries":
for item in self.result:
timestamp = item['timestamp']
result = item['result']
if type(result) is list: # topN
for line in result:
w.writerow(list(line.values()) + [timestamp])
else: # timeseries
w.writerow(list(result.values()) + [timestamp])
elif self.query_type == "groupBy":
for item in self.result:
timestamp = item['timestamp']
version = item['version']
w.writerow(
list(item['event'].values()) + [timestamp] + [version])
f.close()
def export_pandas(self):
"""
Export the current query result to a Pandas DataFrame object.
:return: The DataFrame representing the query result
:rtype: DataFrame
:raise NotImplementedError:
Example
.. code-block:: python
:linenos:
>>> top = client.topn(
datasource='twitterstream',
granularity='all',
intervals='2013-10-04/pt1h',
aggregations={"count": doublesum("count")},
dimension='user_name',
filter = Dimension('user_lang') == 'en',
metric='count',
threshold=2
)
>>> df = top.export_pandas()
>>> print df
>>> count timestamp user_name
0 7 2013-10-04T00:00:00.000Z user_1
1 6 2013-10-04T00:00:00.000Z user_2
"""
if self.result:
if self.query_type == "timeseries":
nres = [list(v['result'].items()) + [('timestamp', v['timestamp'])]
for v in self.result]
nres = [dict(v) for v in nres]
elif self.query_type == "topN":
nres = []
for item in self.result:
timestamp = item['timestamp']
results = item['result']
tres = [dict(list(res.items()) + [('timestamp', timestamp)])
for res in results]
nres += tres
elif self.query_type == "groupBy":
nres = [list(v['event'].items()) + [('timestamp', v['timestamp'])]
for v in self.result]
nres = [dict(v) for v in nres]
else:
raise NotImplementedError('Pandas export not implemented for query type: {0}'.format(self.query_type))
df = pandas.DataFrame(nres)
return df
def __str__(self):
return self.result
def __len__(self):
return len(self.result)
def __delitem__(self, index):
del self.result[index]
def insert(self, index, value):
self.result.insert(index, value)
def __setitem__(self, index, value):
self.result[index] = value
def __getitem__(self, index):
return self.result[index]
class QueryBuilder(object):
def __init__(self):
self.last_query = None
@staticmethod
def validate_query(query_type, valid_parts, args):
"""
Validate the query parts so only allowed objects are sent.
Each query type can have an optional 'context' object attached which is used to set certain
query context settings, etc. timeout or priority. As each query can have this object, there's
no need for it to be sent - it might as well be added here.
:param string query_type: a type of query
:param list valid_parts: a list of valid object names
:param dict args: the dict of args to be sent
:raise ValueError: if an invalid object is given
"""
valid_parts = valid_parts[:] + ['context']
for key, val in six.iteritems(args):
if key not in valid_parts:
raise ValueError(
'Query component: {0} is not valid for query type: {1}.'
.format(key, query_type) +
'The list of valid components is: \n {0}'
.format(valid_parts))
def build_query(self, query_type, args):
"""
Build query based on given query type and arguments.
:param string query_type: a type of query
:param dict args: the dict of args to be sent
:return: the resulting query
:rtype: Query
"""
query_dict = {'queryType': query_type}
for key, val in six.iteritems(args):
if key == 'aggregations':
query_dict[key] = build_aggregators(val)
elif key == 'post_aggregations':
query_dict['postAggregations'] = Postaggregator.build_post_aggregators(val)
elif key == 'datasource':
query_dict['dataSource'] = val
elif key == 'paging_spec':
query_dict['pagingSpec'] = val
elif key == 'limit_spec':
query_dict['limitSpec'] = val
elif key == "filter":
query_dict[key] = Filter.build_filter(val)
elif key == "having":
query_dict[key] = Having.build_having(val)
elif key == 'dimension':
query_dict[key] = build_dimension(val)
elif key == 'dimensions':
query_dict[key] = [build_dimension(v) for v in val]
else:
query_dict[key] = val
self.last_query = Query(query_dict, query_type)
return self.last_query
def topn(self, args):
"""
A TopN query returns a set of the values in a given dimension, sorted by a specified metric. Conceptually, a
topN can be thought of as an approximate GroupByQuery over a single dimension with an Ordering spec. TopNs are
faster and more resource efficient than GroupBy for this use case.
:param dict args: dict of arguments
:return: topn query
:rtype: Query
"""
query_type = 'topN'
valid_parts = [
'datasource', 'granularity', 'filter', 'aggregations',
'post_aggregations', 'intervals', 'dimension', 'threshold',
'metric'
]
self.validate_query(query_type, valid_parts, args)
return self.build_query(query_type, args)
def timeseries(self, args):
"""
A timeseries query returns the values of the requested metrics (in aggregate) for each timestamp.
:param dict args: dict of args
:return: timeseries query
:rtype: Query
"""
query_type = 'timeseries'
valid_parts = [
'datasource', 'granularity', 'filter', 'aggregations', 'descending',
'post_aggregations', 'intervals'
]
self.validate_query(query_type, valid_parts, args)
return self.build_query(query_type, args)
def groupby(self, args):
"""
A group-by query groups a results set (the requested aggregate metrics) by the specified dimension(s).
:param dict args: dict of args
:return: group by query
:rtype: Query
"""
query_type = 'groupBy'
valid_parts = [
'datasource', 'granularity', 'filter', 'aggregations',
'having', 'post_aggregations', 'intervals', 'dimensions',
'limit_spec',
]
self.validate_query(query_type, valid_parts, args)
return self.build_query(query_type, args)
def segment_metadata(self, args):
"""
* Column type
* Estimated size in bytes
* Estimated size in bytes of each column
* Interval the segment covers
* Segment ID
:param dict args: dict of args
:return: segment metadata query
:rtype: Query
"""
query_type = 'segmentMetadata'
valid_parts = ['datasource', 'intervals', 'analysisTypes']
self.validate_query(query_type, valid_parts, args)
return self.build_query(query_type, args)
def time_boundary(self, args):
"""
A time boundary query returns the min and max timestamps present in a data source.
:param dict args: dict of args
:return: time boundary query
:rtype: Query
"""
query_type = 'timeBoundary'
valid_parts = ['datasource']
self.validate_query(query_type, valid_parts, args)
return self.build_query(query_type, args)
def select(self, args):
"""
A select query returns raw Druid rows and supports pagination.
:param dict args: dict of args
:return: select query
:rtype: Query
"""
query_type = 'select'
valid_parts = [
'datasource', 'granularity', 'filter', 'dimensions', 'metrics',
'paging_spec', 'intervals'
]
self.validate_query(query_type, valid_parts, args)
return self.build_query(query_type, args)