Skip to content

Commit

Permalink
Added sqlparse, replacing my simple string replace SQL keywords and u…
Browse files Browse the repository at this point in the history
…pdating

management command and SQL panel.
  • Loading branch information
robhudson committed Sep 23, 2009
1 parent 3917412 commit 6c05fad
Show file tree
Hide file tree
Showing 14 changed files with 2,565 additions and 56 deletions.
1 change: 1 addition & 0 deletions AUTHORS
Expand Up @@ -3,6 +3,7 @@ August 2008.

The following is a list of much appreciated contributors:

Andi Albrecht <albrecht.andi@gmail.com>
David Cramer <dcramer@gmail.com>
Augie Fackler <durin42@gmail.com>
Alex Gaynor <alex.gaynor@gmail.com>
Expand Down
12 changes: 2 additions & 10 deletions debug_toolbar/management/commands/debugsqlshell.py
Expand Up @@ -4,23 +4,15 @@
from django.core.management.base import NoArgsCommand
from django.db.backends import util

# Optional sqlparse to make the SQL look pretty...
# http://code.google.com/p/python-sqlparse/
try:
import sqlparse
except ImportError:
sqlparse = None
from debug_toolbar.utils import sqlparse

class PrintQueryWrapper(util.CursorDebugWrapper):
def execute(self, sql, params=()):
try:
return self.cursor.execute(sql, params)
finally:
raw_sql = self.db.ops.last_executed_query(self.cursor, sql, params)
if sqlparse:
print sqlparse.format(raw_sql, reindent=True)
else:
print raw_sql
print sqlparse.format(raw_sql, reindent=True)
print

util.CursorDebugWrapper = PrintQueryWrapper
Expand Down
65 changes: 19 additions & 46 deletions debug_toolbar/panels/sql.py
Expand Up @@ -13,55 +13,16 @@
from django.utils.hashcompat import sha_constructor

from debug_toolbar.panels import DebugPanel
from debug_toolbar.utils import sqlparse

# Figure out some paths
django_path = os.path.realpath(os.path.dirname(django.__file__))
socketserver_path = os.path.realpath(os.path.dirname(SocketServer.__file__))

# TODO:This should be set in the toolbar loader as a default and panels should
# get a copy of the toolbar object with access to its config dictionary
SQL_WARNING_THRESHOLD = getattr(settings, 'DEBUG_TOOLBAR_CONFIG', {}).get('SQL_WARNING_THRESHOLD', 500)

# Note: This isn't intended to catch ALL possible SQL keywords, just a good common set.
# Note: Subsets are listed last to avoid matching a subset of a keyword. This
# whole thing could be greatly improved but for now this works.
SQL_KEYWORDS = (
'ALTER',
'AND',
'ASC',
'AS',
'AVG',
'COUNT',
'CREATE',
'DESC',
'DELETE',
'DISTINCT',
'DROP',
'FROM',
'GROUP BY',
'HAVING',
'INNER JOIN',
'INSERT',
'IN',
'LEFT OUTER JOIN',
'LIKE',
'LIMIT',
'MAX',
'MIN',
'OFFSET',
'ON',
'ORDER BY',
'OR',
'SELECT',
'SET',
'STDDEV_POP',
'STDDEV_SAMP',
'SUM',
'UPDATE',
'VAR_POP',
'VAR_SAMP',
'WHERE',
)
SQL_WARNING_THRESHOLD = getattr(settings, 'DEBUG_TOOLBAR_CONFIG', {}) \
.get('SQL_WARNING_THRESHOLD', 500)

def tidy_stacktrace(strace):
"""
Expand Down Expand Up @@ -170,8 +131,20 @@ def ms_from_timedelta(td):
"""
return (td.seconds * 1000) + (td.microseconds / 1000.0)

def reformat_sql(sql):
for kwd in SQL_KEYWORDS:
sql = sql.replace(kwd, '<strong>%s</strong>' % (kwd,))
return sql
class BoldKeywordFilter(sqlparse.filters.Filter):
"""sqlparse filter to bold SQL keywords"""
def process(self, stack, stream):
"""Process the token stream"""
for token_type, value in stream:
is_keyword = token_type in sqlparse.tokens.Keyword
if is_keyword:
yield sqlparse.tokens.Text, '<strong>'
yield token_type, value
if is_keyword:
yield sqlparse.tokens.Text, '</strong>'

def reformat_sql(sql):
stack = sqlparse.engine.FilterStack()
stack.preprocess.append(BoldKeywordFilter()) # add our custom filter
stack.postprocess.append(sqlparse.filters.SerializerUnicode()) # tokens -> strings
return ''.join(stack.run(sql))
Empty file added debug_toolbar/utils/__init__.py
Empty file.
59 changes: 59 additions & 0 deletions debug_toolbar/utils/sqlparse/__init__.py
@@ -0,0 +1,59 @@
# Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php.

"""Parse SQL statements."""


__version__ = '0.1.1'


import os


class SQLParseError(Exception):
"""Base class for exceptions in this module."""


# Setup namespace
from debug_toolbar.utils.sqlparse import engine
from debug_toolbar.utils.sqlparse import filters
from debug_toolbar.utils.sqlparse import formatter


def parse(sql):
"""Parse sql and return a list of statements.
*sql* is a single string containting one or more SQL statements.
Returns a tuple of :class:`~sqlparse.sql.Statement` instances.
"""
stack = engine.FilterStack()
stack.full_analyze()
return tuple(stack.run(sql))


def format(sql, **options):
"""Format *sql* according to *options*.
Available options are documented in :ref:`formatting`.
Returns the formatted SQL statement as string.
"""
stack = engine.FilterStack()
options = formatter.validate_options(options)
stack = formatter.build_filter_stack(stack, options)
stack.postprocess.append(filters.SerializerUnicode())
return ''.join(stack.run(sql))


def split(sql):
"""Split *sql* into single statements.
Returns a list of strings.
"""
stack = engine.FilterStack()
stack.split_statements = True
return [unicode(stmt) for stmt in stack.run(sql)]

80 changes: 80 additions & 0 deletions debug_toolbar/utils/sqlparse/engine/__init__.py
@@ -0,0 +1,80 @@
# Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php.

"""filter"""

import re

from debug_toolbar.utils.sqlparse import lexer, SQLParseError
from debug_toolbar.utils.sqlparse.engine import grouping
from debug_toolbar.utils.sqlparse.engine.filter import StatementFilter

# XXX remove this when cleanup is complete
Filter = object


class FilterStack(object):

def __init__(self):
self.preprocess = []
self.stmtprocess = []
self.postprocess = []
self.split_statements = False
self._grouping = False

def _flatten(self, stream):
for token in stream:
if token.is_group():
for t in self._flatten(token.tokens):
yield t
else:
yield token

def enable_grouping(self):
self._grouping = True

def full_analyze(self):
self.enable_grouping()

def run(self, sql):
stream = lexer.tokenize(sql)
# Process token stream
if self.preprocess:
for filter_ in self.preprocess:
stream = filter_.process(self, stream)

if (self.stmtprocess or self.postprocess or self.split_statements
or self._grouping):
splitter = StatementFilter()
stream = splitter.process(self, stream)

if self._grouping:
def _group(stream):
for stmt in stream:
grouping.group(stmt)
yield stmt
stream = _group(stream)

if self.stmtprocess:
def _run(stream):
ret = []
for stmt in stream:
for filter_ in self.stmtprocess:
filter_.process(self, stmt)
ret.append(stmt)
return ret
stream = _run(stream)

if self.postprocess:
def _run(stream):
for stmt in stream:
stmt.tokens = list(self._flatten(stmt.tokens))
for filter_ in self.postprocess:
stmt = filter_.process(self, stmt)
yield stmt
stream = _run(stream)

return stream

99 changes: 99 additions & 0 deletions debug_toolbar/utils/sqlparse/engine/filter.py
@@ -0,0 +1,99 @@
# -*- coding: utf-8 -*-

from debug_toolbar.utils.sqlparse import tokens as T
from debug_toolbar.utils.sqlparse.engine.grouping import Statement, Token


class TokenFilter(object):

def __init__(self, **options):
self.options = options

def process(self, stack, stream):
"""Process token stream."""
raise NotImplementedError


class StatementFilter(TokenFilter):

def __init__(self):
TokenFilter.__init__(self)
self._in_declare = False
self._in_dbldollar = False
self._is_create = False

def _reset(self):
self._in_declare = False
self._in_dbldollar = False
self._is_create = False

def _change_splitlevel(self, ttype, value):
# PostgreSQL
if (ttype == T.Name.Builtin
and value.startswith('$') and value.endswith('$')):
if self._in_dbldollar:
self._in_dbldollar = False
return -1
else:
self._in_dbldollar = True
return 1
elif self._in_dbldollar:
return 0

# ANSI
if ttype is not T.Keyword:
return 0

unified = value.upper()

if unified == 'DECLARE':
self._in_declare = True
return 1

if unified == 'BEGIN':
if self._in_declare:
return 0
return 0

if unified == 'END':
# Should this respect a preceeding BEGIN?
# In CASE ... WHEN ... END this results in a split level -1.
return -1

if ttype is T.Keyword.DDL and unified.startswith('CREATE'):
self._is_create = True

if unified in ('IF', 'FOR') and self._is_create:
return 1

# Default
return 0

def process(self, stack, stream):
splitlevel = 0
stmt = None
consume_ws = False
stmt_tokens = []
for ttype, value in stream:
# Before appending the token
if (consume_ws and ttype is not T.Whitespace
and ttype is not T.Comment.Single):
consume_ws = False
stmt.tokens = stmt_tokens
yield stmt
self._reset()
stmt = None
splitlevel = 0
if stmt is None:
stmt = Statement()
stmt_tokens = []
splitlevel += self._change_splitlevel(ttype, value)
# Append the token
stmt_tokens.append(Token(ttype, value))
# After appending the token
if (splitlevel <= 0 and ttype is T.Punctuation
and value == ';'):
consume_ws = True
if stmt is not None:
stmt.tokens = stmt_tokens
yield stmt

0 comments on commit 6c05fad

Please sign in to comment.