Skip to content

Commit

Permalink
Cleanup module code.
Browse files Browse the repository at this point in the history
  • Loading branch information
andialbrecht committed Oct 26, 2015
1 parent f7e07b7 commit 1992f6d
Show file tree
Hide file tree
Showing 8 changed files with 31 additions and 31 deletions.
4 changes: 2 additions & 2 deletions sqlparse/engine/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,8 @@ def run(self, sql, encoding=None):
for filter_ in self.preprocess:
stream = filter_.process(self, stream)

if (self.stmtprocess or self.postprocess or self.split_statements
or self._grouping):
if self.stmtprocess or self.postprocess or self.split_statements \
or self._grouping:
splitter = StatementFilter()
stream = splitter.process(self, stream)

Expand Down
8 changes: 4 additions & 4 deletions sqlparse/engine/filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ def _reset(self):
def _change_splitlevel(self, ttype, value):
"Get the new split level (increase, decrease or remain equal)"
# PostgreSQL
if (ttype == T.Name.Builtin
and value.startswith('$') and value.endswith('$')):
if ttype == T.Name.Builtin \
and value.startswith('$') and value.endswith('$'):
if self._in_dbldollar:
self._in_dbldollar = False
return -1
Expand Down Expand Up @@ -64,8 +64,8 @@ def _change_splitlevel(self, ttype, value):
self._is_create = True
return 0

if (unified in ('IF', 'FOR')
and self._is_create and self._begin_depth > 0):
if unified in ('IF', 'FOR') \
and self._is_create and self._begin_depth > 0:
return 1

# Default
Expand Down
12 changes: 7 additions & 5 deletions sqlparse/engine/grouping.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def group_as(tlist):
def _right_valid(token):
# Currently limited to DML/DDL. Maybe additional more non SQL reserved
# keywords should appear here (see issue8).
return not token.ttype in (T.DML, T.DDL)
return token.ttype not in (T.DML, T.DDL)

def _left_valid(token):
if token.ttype is T.Keyword and token.value in ('NULL',):
Expand Down Expand Up @@ -191,7 +191,8 @@ def _next_token(tl, i):

i1 = tl.token_index(t1, start=i) if t1 else None
t2_end = None if i1 is None else i1 + 1
t2 = tl.token_next_by_instance(i, (sql.Function, sql.Parenthesis), end=t2_end)
t2 = tl.token_next_by_instance(i, (sql.Function, sql.Parenthesis),
end=t2_end)

if t1 and t2:
i2 = tl.token_index(t2, start=i)
Expand Down Expand Up @@ -219,9 +220,10 @@ def _next_token(tl, i):
if identifier_tokens and identifier_tokens[-1].ttype is T.Whitespace:
identifier_tokens = identifier_tokens[:-1]
if not (len(identifier_tokens) == 1
and (isinstance(identifier_tokens[0], (sql.Function, sql.Parenthesis))
or identifier_tokens[0].ttype in (T.Literal.Number.Integer,
T.Literal.Number.Float))):
and (isinstance(identifier_tokens[0], (sql.Function,
sql.Parenthesis))
or identifier_tokens[0].ttype in (
T.Literal.Number.Integer, T.Literal.Number.Float))):
group = tlist.group_tokens(sql.Identifier, identifier_tokens)
idx = tlist.token_index(group, start=idx) + 1
else:
Expand Down
11 changes: 4 additions & 7 deletions sqlparse/filters.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
from sqlparse import sql, tokens as T
from sqlparse.compat import u, text_type
from sqlparse.engine import FilterStack
from sqlparse.lexer import tokenize
from sqlparse.pipeline import Pipeline
from sqlparse.tokens import (Comment, Comparison, Keyword, Name, Punctuation,
String, Whitespace)
Expand Down Expand Up @@ -144,8 +143,6 @@ def process(self, stack, stream):

# Found file path to include
if token_type in String.Symbol:
# if token_type in tokens.String.Symbol:

# Get path of file to include
path = join(self.dirpath, value[1:-1])

Expand Down Expand Up @@ -251,9 +248,9 @@ def _stripws_identifierlist(self, tlist):
# Removes newlines before commas, see issue140
last_nl = None
for token in tlist.tokens[:]:
if (token.ttype is T.Punctuation
and token.value == ','
and last_nl is not None):
if token.ttype is T.Punctuation \
and token.value == ',' \
and last_nl is not None:
tlist.tokens.remove(last_nl)
if token.is_whitespace():
last_nl = token
Expand Down Expand Up @@ -492,7 +489,7 @@ def _process(self, stack, group, stream):
else:
self.line = token.value.splitlines()[-1]
elif (token.is_group()
and not token.__class__ in self.keep_together):
and token.__class__ not in self.keep_together):
token.tokens = self._process(stack, token, token.tokens)
else:
val = u(token)
Expand Down
4 changes: 2 additions & 2 deletions sqlparse/formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,8 @@ def build_filter_stack(stack, options):
stack.enable_grouping()
stack.stmtprocess.append(filters.StripCommentsFilter())

if (options.get('strip_whitespace', False)
or options.get('reindent', False)):
if options.get('strip_whitespace', False) \
or options.get('reindent', False):
stack.enable_grouping()
stack.stmtprocess.append(filters.StripWhitespaceFilter())

Expand Down
2 changes: 1 addition & 1 deletion sqlparse/lexer.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def _process_state(cls, unprocessed, processed, state):
for state in tdef2:
assert (state in unprocessed or
state in ('#pop', '#push')), \
'unknown new state ' + state
'unknown new state ' + state
new_state = tdef2
else:
assert False, 'unknown new state def %r' % tdef2
Expand Down
17 changes: 9 additions & 8 deletions sqlparse/sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,6 @@ def is_group(self):
return True

def get_sublists(self):
# return [x for x in self.tokens if isinstance(x, TokenList)]
for x in self.tokens:
if isinstance(x, TokenList):
yield x
Expand Down Expand Up @@ -347,9 +346,9 @@ def token_next(self, idx, skip_ws=True):
def token_index(self, token, start=0):
"""Return list index of token."""
if start > 0:
# Performing `index` manually is much faster when starting in the middle
# of the list of tokens and expecting to find the token near to the starting
# index.
# Performing `index` manually is much faster when starting
# in the middle of the list of tokens and expecting to find
# the token near to the starting index.
for i in range(start, len(self.tokens)):
if self.tokens[i] == token:
return i
Expand Down Expand Up @@ -471,6 +470,7 @@ def _get_first_name(self, idx=None, reverse=False, keywords=False):
return tok.get_name()
return None


class Statement(TokenList):
"""Represents a SQL statement."""

Expand Down Expand Up @@ -570,6 +570,7 @@ class SquareBrackets(TokenList):
def _groupable_tokens(self):
return self.tokens[1:-1]


class Assignment(TokenList):
"""An assignment like 'var := val;'"""
__slots__ = ('value', 'ttype', 'tokens')
Expand Down Expand Up @@ -672,10 +673,10 @@ def get_parameters(self):
for t in parenthesis.tokens:
if isinstance(t, IdentifierList):
return t.get_identifiers()
elif isinstance(t, Identifier) or \
isinstance(t, Function) or \
t.ttype in T.Literal:
return [t,]
elif (isinstance(t, Identifier) or
isinstance(t, Function) or
t.ttype in T.Literal):
return [t, ]
return []


Expand Down
4 changes: 2 additions & 2 deletions sqlparse/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@ def memoize_generator(func):
cache = Cache()

def wrapped_func(*args, **kwargs):
# params = (args, kwargs)
params = (args, tuple(sorted(kwargs.items())))

# Look if cached
Expand Down Expand Up @@ -120,6 +119,7 @@ def wrapped_func(*args, **kwargs):

LINE_MATCH = re.compile(r'(\r\n|\r|\n)')


def split_unquoted_newlines(text):
"""Split a string on all unquoted newlines.
Expand All @@ -134,4 +134,4 @@ def split_unquoted_newlines(text):
outputlines.append('')
else:
outputlines[-1] += line
return outputlines
return outputlines

0 comments on commit 1992f6d

Please sign in to comment.