diff --git a/redash/tasks/failure_report.py b/redash/tasks/failure_report.py index d763cc94b2..0339f3107e 100644 --- a/redash/tasks/failure_report.py +++ b/redash/tasks/failure_report.py @@ -1,10 +1,12 @@ -import logging import datetime import re from collections import Counter from redash.tasks.general import send_mail from redash import redis_connection, settings, models from redash.utils import json_dumps, json_loads, base_url, render_template +from redash.worker import get_job_logger + +logger = get_job_logger(__name__) def key(user_id): @@ -88,7 +90,7 @@ def notify_of_failure(message, query): def track_failure(query, error): - logging.debug(error) + logger.debug(error) query.schedule_failures += 1 query.skip_updated_at = True diff --git a/redash/tasks/queries/execution.py b/redash/tasks/queries/execution.py index d7eacb00fb..b79b32d5e7 100644 --- a/redash/tasks/queries/execution.py +++ b/redash/tasks/queries/execution.py @@ -1,4 +1,3 @@ -import logging import signal import time import redis @@ -31,7 +30,7 @@ def enqueue_query( query, data_source, user_id, is_api_key=False, scheduled_query=None, metadata={} ): query_hash = gen_query_hash(query) - logging.info("Inserting job for %s with metadata=%s", query_hash, metadata) + logger.info("Inserting job for %s with metadata=%s", query_hash, metadata) try_count = 0 job = None @@ -43,13 +42,13 @@ def enqueue_query( pipe.watch(_job_lock_id(query_hash, data_source.id)) job_id = pipe.get(_job_lock_id(query_hash, data_source.id)) if job_id: - logging.info("[%s] Found existing job: %s", query_hash, job_id) + logger.info("[%s] Found existing job: %s", query_hash, job_id) job = Job.fetch(job_id) status = job.get_status() if status in [JobStatus.FINISHED, JobStatus.FAILED]: - logging.info( + logger.info( "[%s] job found is ready (%s), removing lock", query_hash, status, @@ -88,10 +87,10 @@ def enqueue_query( "scheduled": scheduled_query_id is not None, "query_id": metadata.get("Query ID"), "user_id": user_id, - } + }, ) - logging.info("[%s] Created new job: %s", query_hash, job.id) + logger.info("[%s] Created new job: %s", query_hash, job.id) pipe.set( _job_lock_id(query_hash, data_source.id), job.id, @@ -104,7 +103,7 @@ def enqueue_query( continue if not job: - logging.error("[Manager][%s] Failed adding job for query.", query_hash) + logger.error("[Manager][%s] Failed adding job for query.", query_hash) return job @@ -171,7 +170,7 @@ def run(self): error = str(e) data = None - logging.warning("Unexpected error while running query:", exc_info=1) + logger.warning("Unexpected error while running query:", exc_info=1) run_time = time.time() - started_at diff --git a/redash/tasks/queries/maintenance.py b/redash/tasks/queries/maintenance.py index 04045cedb8..f7956f2159 100644 --- a/redash/tasks/queries/maintenance.py +++ b/redash/tasks/queries/maintenance.py @@ -36,17 +36,17 @@ def refresh_queries(): with statsd_client.timer("manager.outdated_queries_lookup"): for query in models.Query.outdated_queries(): if settings.FEATURE_DISABLE_REFRESH_QUERIES: - logging.info("Disabled refresh queries.") + logger.info("Disabled refresh queries.") elif query.org.is_disabled: - logging.debug( + logger.debug( "Skipping refresh of %s because org is disabled.", query.id ) elif query.data_source is None: - logging.debug( + logger.debug( "Skipping refresh of %s because the datasource is none.", query.id ) elif query.data_source.paused: - logging.debug( + logger.debug( "Skipping refresh of %s because datasource - %s is paused (%s).", query.id, query.data_source.name, @@ -117,7 +117,7 @@ def cleanup_query_results(): the database in case of many such results. """ - logging.info( + logger.info( "Running query results clean up (removing maximum of %d unused results, that are %d days old or more)", settings.QUERY_RESULTS_CLEANUP_COUNT, settings.QUERY_RESULTS_CLEANUP_MAX_AGE,