-
Notifications
You must be signed in to change notification settings - Fork 4.3k
/
tasks.py
323 lines (241 loc) · 11.1 KB
/
tasks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
import time
import logging
import signal
from flask.ext.mail import Message
import redis
from celery import Task
from celery.result import AsyncResult
from celery.utils.log import get_task_logger
from redash import redis_connection, models, statsd_client, settings, utils, mail
from redash.utils import gen_query_hash
from redash.worker import celery
from redash.query_runner import get_query_runner, InterruptException
logger = get_task_logger(__name__)
class BaseTask(Task):
abstract = True
def after_return(self, *args, **kwargs):
models.db.close_db(None)
def __call__(self, *args, **kwargs):
models.db.connect_db()
return super(BaseTask, self).__call__(*args, **kwargs)
class QueryTask(object):
MAX_RETRIES = 5
# TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this
STATUSES = {
'PENDING': 1,
'STARTED': 2,
'SUCCESS': 3,
'FAILURE': 4,
'REVOKED': 4
}
def __init__(self, job_id=None, async_result=None):
if async_result:
self._async_result = async_result
else:
self._async_result = AsyncResult(job_id, app=celery)
@property
def id(self):
return self._async_result.id
@classmethod
def add_task(cls, query, data_source, scheduled=False, metadata={}):
query_hash = gen_query_hash(query)
logging.info("[Manager][%s] Inserting job", query_hash)
logging.info("[Manager] Metadata: [%s]", metadata)
try_count = 0
job = None
while try_count < cls.MAX_RETRIES:
try_count += 1
pipe = redis_connection.pipeline()
try:
pipe.watch(cls._job_lock_id(query_hash, data_source.id))
job_id = pipe.get(cls._job_lock_id(query_hash, data_source.id))
if job_id:
logging.info("[Manager][%s] Found existing job: %s", query_hash, job_id)
job = cls(job_id=job_id)
if job.ready():
logging.info("[%s] job found is ready (%s), removing lock", query_hash, job.celery_status)
redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))
job = None
if not job:
pipe.multi()
if scheduled:
queue_name = data_source.scheduled_queue_name
else:
queue_name = data_source.queue_name
result = execute_query.apply_async(args=(query, data_source.id, metadata), queue=queue_name)
job = cls(async_result=result)
logging.info("[Manager][%s] Created new job: %s", query_hash, job.id)
pipe.set(cls._job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)
pipe.execute()
break
except redis.WatchError:
continue
if not job:
logging.error("[Manager][%s] Failed adding job for query.", query_hash)
return job
def to_dict(self):
if self._async_result.status == 'STARTED':
updated_at = self._async_result.result.get('start_time', 0)
else:
updated_at = 0
if self._async_result.failed() and isinstance(self._async_result.result, Exception):
error = self._async_result.result.message
elif self._async_result.status == 'REVOKED':
error = 'Query execution cancelled.'
else:
error = ''
if self._async_result.successful():
query_result_id = self._async_result.result
else:
query_result_id = None
return {
'id': self._async_result.id,
'updated_at': updated_at,
'status': self.STATUSES[self._async_result.status],
'error': error,
'query_result_id': query_result_id,
}
@property
def is_cancelled(self):
return self._async_result.status == 'REVOKED'
@property
def celery_status(self):
return self._async_result.status
def ready(self):
return self._async_result.ready()
def cancel(self):
return self._async_result.revoke(terminate=True, signal='SIGINT')
@staticmethod
def _job_lock_id(query_hash, data_source_id):
return "query_hash_job:%s:%s" % (data_source_id, query_hash)
@celery.task(base=BaseTask)
def refresh_queries():
# self.status['last_refresh_at'] = time.time()
# self._save_status()
logger.info("Refreshing queries...")
outdated_queries_count = 0
for query in models.Query.outdated_queries():
QueryTask.add_task(query.query, query.data_source, scheduled=True,
metadata={'Query ID': query.id, 'Username': 'Scheduled'})
outdated_queries_count += 1
statsd_client.gauge('manager.outdated_queries', outdated_queries_count)
logger.info("Done refreshing queries. Found %d outdated queries." % outdated_queries_count)
status = redis_connection.hgetall('redash:status')
now = time.time()
redis_connection.hmset('redash:status', {
'outdated_queries_count': outdated_queries_count,
'last_refresh_at': now
})
statsd_client.gauge('manager.seconds_since_refresh', now - float(status.get('last_refresh_at', now)))
@celery.task(base=BaseTask)
def cleanup_tasks():
# in case of cold restart of the workers, there might be jobs that still have their "lock" object, but aren't really
# going to run. this job removes them.
lock_keys = redis_connection.keys("query_hash_job:*") # TODO: use set instead of keys command
if not lock_keys:
return
query_tasks = [QueryTask(job_id=j) for j in redis_connection.mget(lock_keys)]
logger.info("Found %d locks", len(query_tasks))
inspect = celery.control.inspect()
active_tasks = inspect.active()
if active_tasks is None:
active_tasks = []
else:
active_tasks = active_tasks.values()
all_tasks = set()
for task_list in active_tasks:
for task in task_list:
all_tasks.add(task['id'])
logger.info("Active jobs count: %d", len(all_tasks))
for i, t in enumerate(query_tasks):
if t.ready():
# if locked task is ready already (failed, finished, revoked), we don't need the lock anymore
logger.warning("%s is ready (%s), removing lock.", lock_keys[i], t.celery_status)
redis_connection.delete(lock_keys[i])
# if t.celery_status == 'STARTED' and t.id not in all_tasks:
# logger.warning("Couldn't find active job for: %s, removing lock.", lock_keys[i])
# redis_connection.delete(lock_keys[i])
@celery.task(base=BaseTask)
def cleanup_query_results():
"""
Job to cleanup unused query results -- such that no query links to them anymore, and older than a week (so it's less
likely to be open in someone's browser and be used).
Each time the job deletes only 100 query results so it won't choke the database in case of many such results.
"""
unused_query_results = models.QueryResult.unused().limit(100)
total_unused_query_results = models.QueryResult.unused().count()
deleted_count = models.QueryResult.delete().where(models.QueryResult.id << unused_query_results).execute()
logger.info("Deleted %d unused query results out of total of %d." % (deleted_count, total_unused_query_results))
@celery.task(base=BaseTask)
def refresh_schemas():
"""
Refreshs the datasources schema.
"""
for ds in models.DataSource.all():
logger.info("Refreshing schema for: {}".format(ds.name))
ds.get_schema(refresh=True)
@celery.task(bind=True, base=BaseTask)
def check_alerts_for_query(self, query_id):
from redash.wsgi import app
logger.debug("Checking query %d for alerts", query_id)
query = models.Query.get_by_id(query_id)
for alert in query.alerts:
alert.query = query
new_state = alert.evaluate()
if new_state != alert.state:
logger.info("Alert %d new state: %s", alert.id, new_state)
old_state = alert.state
alert.update_instance(state=new_state)
if old_state == models.Alert.UNKNOWN_STATE and new_state == models.Alert.OK_STATE:
logger.debug("Skipping notification (previous state was unknown and now it's ok).")
continue
# message = Message
recipients = [s.email for s in alert.subscribers()]
logger.debug("Notifying: %s", recipients)
html = """
Check <a href="{host}/alerts/{alert_id}">alert</a> / check <a href="{host}/queries/{query_id}">query</a>.
""".format(host=settings.HOST, alert_id=alert.id, query_id=query.id)
with app.app_context():
message = Message(recipients=recipients,
subject="[{1}] {0}".format(alert.name, new_state.upper()),
html=html)
mail.send(message)
def signal_handler(*args):
raise InterruptException
@celery.task(bind=True, base=BaseTask, track_started=True)
def execute_query(self, query, data_source_id, metadata):
signal.signal(signal.SIGINT, signal_handler)
start_time = time.time()
logger.info("Loading data source (%d)...", data_source_id)
# TODO: we should probably cache data sources in Redis
data_source = models.DataSource.get_by_id(data_source_id)
self.update_state(state='STARTED', meta={'start_time': start_time, 'custom_message': ''})
logger.info("Executing query:\n%s", query)
query_hash = gen_query_hash(query)
query_runner = get_query_runner(data_source.type, data_source.options)
if query_runner.annotate_query():
metadata['Task ID'] = self.request.id
metadata['Query Hash'] = query_hash
metadata['Queue'] = self.request.delivery_info['routing_key']
annotation = u", ".join([u"{}: {}".format(k, v) for k, v in metadata.iteritems()])
logging.debug(u"Annotation: %s", annotation)
annotated_query = u"/* {} */ {}".format(annotation, query)
else:
annotated_query = query
with statsd_client.timer('query_runner.{}.{}.run_time'.format(data_source.type, data_source.name)):
data, error = query_runner.run_query(annotated_query)
run_time = time.time() - start_time
logger.info("Query finished... data length=%s, error=%s", data and len(data), error)
self.update_state(state='STARTED', meta={'start_time': start_time, 'error': error, 'custom_message': ''})
# Delete query_hash
redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))
if not error:
query_result, updated_query_ids = models.QueryResult.store_result(data_source.id, query_hash, query, data, run_time, utils.utcnow())
for query_id in updated_query_ids:
check_alerts_for_query.delay(query_id)
else:
raise Exception(error)
return query_result.id
@celery.task(base=BaseTask)
def record_event(event):
models.Event.record(event)