Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use correct logger when enqueuing a query execution. #4614

Merged
merged 1 commit into from
Feb 6, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions redash/tasks/failure_report.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
import logging
import datetime
import re
from collections import Counter
from redash.tasks.general import send_mail
from redash import redis_connection, settings, models
from redash.utils import json_dumps, json_loads, base_url, render_template
from redash.worker import get_job_logger

logger = get_job_logger(__name__)


def key(user_id):
Expand Down Expand Up @@ -88,7 +90,7 @@ def notify_of_failure(message, query):


def track_failure(query, error):
logging.debug(error)
logger.debug(error)

query.schedule_failures += 1
query.skip_updated_at = True
Expand Down
15 changes: 7 additions & 8 deletions redash/tasks/queries/execution.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import logging
import signal
import time
import redis
Expand Down Expand Up @@ -31,7 +30,7 @@ def enqueue_query(
query, data_source, user_id, is_api_key=False, scheduled_query=None, metadata={}
):
query_hash = gen_query_hash(query)
logging.info("Inserting job for %s with metadata=%s", query_hash, metadata)
logger.info("Inserting job for %s with metadata=%s", query_hash, metadata)
try_count = 0
job = None

Expand All @@ -43,13 +42,13 @@ def enqueue_query(
pipe.watch(_job_lock_id(query_hash, data_source.id))
job_id = pipe.get(_job_lock_id(query_hash, data_source.id))
if job_id:
logging.info("[%s] Found existing job: %s", query_hash, job_id)
logger.info("[%s] Found existing job: %s", query_hash, job_id)

job = Job.fetch(job_id)

status = job.get_status()
if status in [JobStatus.FINISHED, JobStatus.FAILED]:
logging.info(
logger.info(
"[%s] job found is ready (%s), removing lock",
query_hash,
status,
Expand Down Expand Up @@ -88,10 +87,10 @@ def enqueue_query(
"scheduled": scheduled_query_id is not None,
"query_id": metadata.get("Query ID"),
"user_id": user_id,
}
},
)

logging.info("[%s] Created new job: %s", query_hash, job.id)
logger.info("[%s] Created new job: %s", query_hash, job.id)
pipe.set(
_job_lock_id(query_hash, data_source.id),
job.id,
Expand All @@ -104,7 +103,7 @@ def enqueue_query(
continue

if not job:
logging.error("[Manager][%s] Failed adding job for query.", query_hash)
logger.error("[Manager][%s] Failed adding job for query.", query_hash)

return job

Expand Down Expand Up @@ -171,7 +170,7 @@ def run(self):
error = str(e)

data = None
logging.warning("Unexpected error while running query:", exc_info=1)
logger.warning("Unexpected error while running query:", exc_info=1)

run_time = time.time() - started_at

Expand Down
10 changes: 5 additions & 5 deletions redash/tasks/queries/maintenance.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,17 +36,17 @@ def refresh_queries():
with statsd_client.timer("manager.outdated_queries_lookup"):
for query in models.Query.outdated_queries():
if settings.FEATURE_DISABLE_REFRESH_QUERIES:
logging.info("Disabled refresh queries.")
logger.info("Disabled refresh queries.")
elif query.org.is_disabled:
logging.debug(
logger.debug(
"Skipping refresh of %s because org is disabled.", query.id
)
elif query.data_source is None:
logging.debug(
logger.debug(
"Skipping refresh of %s because the datasource is none.", query.id
)
elif query.data_source.paused:
logging.debug(
logger.debug(
"Skipping refresh of %s because datasource - %s is paused (%s).",
query.id,
query.data_source.name,
Expand Down Expand Up @@ -117,7 +117,7 @@ def cleanup_query_results():
the database in case of many such results.
"""

logging.info(
logger.info(
"Running query results clean up (removing maximum of %d unused results, that are %d days old or more)",
settings.QUERY_RESULTS_CLEANUP_COUNT,
settings.QUERY_RESULTS_CLEANUP_MAX_AGE,
Expand Down