forked from SmileyChris/django-debug-toolbar
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Added sqlparse, replacing my simple string replace SQL keywords and u…
…pdating management command and SQL panel.
- Loading branch information
Showing
14 changed files
with
2,565 additions
and
56 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,59 @@ | ||
# Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com | ||
# | ||
# This module is part of python-sqlparse and is released under | ||
# the BSD License: http://www.opensource.org/licenses/bsd-license.php. | ||
|
||
"""Parse SQL statements.""" | ||
|
||
|
||
__version__ = '0.1.1' | ||
|
||
|
||
import os | ||
|
||
|
||
class SQLParseError(Exception): | ||
"""Base class for exceptions in this module.""" | ||
|
||
|
||
# Setup namespace | ||
from debug_toolbar.utils.sqlparse import engine | ||
from debug_toolbar.utils.sqlparse import filters | ||
from debug_toolbar.utils.sqlparse import formatter | ||
|
||
|
||
def parse(sql): | ||
"""Parse sql and return a list of statements. | ||
*sql* is a single string containting one or more SQL statements. | ||
Returns a tuple of :class:`~sqlparse.sql.Statement` instances. | ||
""" | ||
stack = engine.FilterStack() | ||
stack.full_analyze() | ||
return tuple(stack.run(sql)) | ||
|
||
|
||
def format(sql, **options): | ||
"""Format *sql* according to *options*. | ||
Available options are documented in :ref:`formatting`. | ||
Returns the formatted SQL statement as string. | ||
""" | ||
stack = engine.FilterStack() | ||
options = formatter.validate_options(options) | ||
stack = formatter.build_filter_stack(stack, options) | ||
stack.postprocess.append(filters.SerializerUnicode()) | ||
return ''.join(stack.run(sql)) | ||
|
||
|
||
def split(sql): | ||
"""Split *sql* into single statements. | ||
Returns a list of strings. | ||
""" | ||
stack = engine.FilterStack() | ||
stack.split_statements = True | ||
return [unicode(stmt) for stmt in stack.run(sql)] | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,80 @@ | ||
# Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com | ||
# | ||
# This module is part of python-sqlparse and is released under | ||
# the BSD License: http://www.opensource.org/licenses/bsd-license.php. | ||
|
||
"""filter""" | ||
|
||
import re | ||
|
||
from debug_toolbar.utils.sqlparse import lexer, SQLParseError | ||
from debug_toolbar.utils.sqlparse.engine import grouping | ||
from debug_toolbar.utils.sqlparse.engine.filter import StatementFilter | ||
|
||
# XXX remove this when cleanup is complete | ||
Filter = object | ||
|
||
|
||
class FilterStack(object): | ||
|
||
def __init__(self): | ||
self.preprocess = [] | ||
self.stmtprocess = [] | ||
self.postprocess = [] | ||
self.split_statements = False | ||
self._grouping = False | ||
|
||
def _flatten(self, stream): | ||
for token in stream: | ||
if token.is_group(): | ||
for t in self._flatten(token.tokens): | ||
yield t | ||
else: | ||
yield token | ||
|
||
def enable_grouping(self): | ||
self._grouping = True | ||
|
||
def full_analyze(self): | ||
self.enable_grouping() | ||
|
||
def run(self, sql): | ||
stream = lexer.tokenize(sql) | ||
# Process token stream | ||
if self.preprocess: | ||
for filter_ in self.preprocess: | ||
stream = filter_.process(self, stream) | ||
|
||
if (self.stmtprocess or self.postprocess or self.split_statements | ||
or self._grouping): | ||
splitter = StatementFilter() | ||
stream = splitter.process(self, stream) | ||
|
||
if self._grouping: | ||
def _group(stream): | ||
for stmt in stream: | ||
grouping.group(stmt) | ||
yield stmt | ||
stream = _group(stream) | ||
|
||
if self.stmtprocess: | ||
def _run(stream): | ||
ret = [] | ||
for stmt in stream: | ||
for filter_ in self.stmtprocess: | ||
filter_.process(self, stmt) | ||
ret.append(stmt) | ||
return ret | ||
stream = _run(stream) | ||
|
||
if self.postprocess: | ||
def _run(stream): | ||
for stmt in stream: | ||
stmt.tokens = list(self._flatten(stmt.tokens)) | ||
for filter_ in self.postprocess: | ||
stmt = filter_.process(self, stmt) | ||
yield stmt | ||
stream = _run(stream) | ||
|
||
return stream | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,99 @@ | ||
# -*- coding: utf-8 -*- | ||
|
||
from debug_toolbar.utils.sqlparse import tokens as T | ||
from debug_toolbar.utils.sqlparse.engine.grouping import Statement, Token | ||
|
||
|
||
class TokenFilter(object): | ||
|
||
def __init__(self, **options): | ||
self.options = options | ||
|
||
def process(self, stack, stream): | ||
"""Process token stream.""" | ||
raise NotImplementedError | ||
|
||
|
||
class StatementFilter(TokenFilter): | ||
|
||
def __init__(self): | ||
TokenFilter.__init__(self) | ||
self._in_declare = False | ||
self._in_dbldollar = False | ||
self._is_create = False | ||
|
||
def _reset(self): | ||
self._in_declare = False | ||
self._in_dbldollar = False | ||
self._is_create = False | ||
|
||
def _change_splitlevel(self, ttype, value): | ||
# PostgreSQL | ||
if (ttype == T.Name.Builtin | ||
and value.startswith('$') and value.endswith('$')): | ||
if self._in_dbldollar: | ||
self._in_dbldollar = False | ||
return -1 | ||
else: | ||
self._in_dbldollar = True | ||
return 1 | ||
elif self._in_dbldollar: | ||
return 0 | ||
|
||
# ANSI | ||
if ttype is not T.Keyword: | ||
return 0 | ||
|
||
unified = value.upper() | ||
|
||
if unified == 'DECLARE': | ||
self._in_declare = True | ||
return 1 | ||
|
||
if unified == 'BEGIN': | ||
if self._in_declare: | ||
return 0 | ||
return 0 | ||
|
||
if unified == 'END': | ||
# Should this respect a preceeding BEGIN? | ||
# In CASE ... WHEN ... END this results in a split level -1. | ||
return -1 | ||
|
||
if ttype is T.Keyword.DDL and unified.startswith('CREATE'): | ||
self._is_create = True | ||
|
||
if unified in ('IF', 'FOR') and self._is_create: | ||
return 1 | ||
|
||
# Default | ||
return 0 | ||
|
||
def process(self, stack, stream): | ||
splitlevel = 0 | ||
stmt = None | ||
consume_ws = False | ||
stmt_tokens = [] | ||
for ttype, value in stream: | ||
# Before appending the token | ||
if (consume_ws and ttype is not T.Whitespace | ||
and ttype is not T.Comment.Single): | ||
consume_ws = False | ||
stmt.tokens = stmt_tokens | ||
yield stmt | ||
self._reset() | ||
stmt = None | ||
splitlevel = 0 | ||
if stmt is None: | ||
stmt = Statement() | ||
stmt_tokens = [] | ||
splitlevel += self._change_splitlevel(ttype, value) | ||
# Append the token | ||
stmt_tokens.append(Token(ttype, value)) | ||
# After appending the token | ||
if (splitlevel <= 0 and ttype is T.Punctuation | ||
and value == ';'): | ||
consume_ws = True | ||
if stmt is not None: | ||
stmt.tokens = stmt_tokens | ||
yield stmt |
Oops, something went wrong.