Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion CHANGELOG
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
Development Version
-------------------

Nothing, yet.
Internal Changes

* `is_whitespace` and `is_group` changed into properties


Release 0.2.1 (Aug 13, 2016)
Expand Down
2 changes: 1 addition & 1 deletion examples/extract_table_names.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@


def is_subselect(parsed):
if not parsed.is_group():
if not parsed.is_group:
return False
for item in parsed.tokens:
if item.ttype is DML and item.value.upper() == 'SELECT':
Expand Down
10 changes: 5 additions & 5 deletions sqlparse/engine/grouping.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,13 @@ def _group_matching(tlist, cls):
for idx, token in enumerate(list(tlist)):
tidx = idx - tidx_offset

if token.is_whitespace():
if token.is_whitespace:
# ~50% of tokens will be whitespace. Will checking early
# for them avoid 3 comparisons, but then add 1 more comparison
# for the other ~50% of tokens...
continue

if token.is_group() and not isinstance(token, cls):
if token.is_group and not isinstance(token, cls):
# Check inside previously grouped (ie. parenthesis) if group
# of differnt type is inside (ie, case). though ideally should
# should check for all open/close tokens at once to avoid recursion
Expand Down Expand Up @@ -246,7 +246,7 @@ def group_comments(tlist):
tidx, token = tlist.token_next_by(t=T.Comment)
while token:
eidx, end = tlist.token_not_matching(
lambda tk: imt(tk, t=T.Comment) or tk.is_whitespace(), idx=tidx)
lambda tk: imt(tk, t=T.Comment) or tk.is_whitespace, idx=tidx)
if end is not None:
eidx, end = tlist.token_prev(eidx, skip_ws=False)
tlist.group_tokens(sql.Comment, tidx, eidx)
Expand Down Expand Up @@ -372,10 +372,10 @@ def _group(tlist, cls, match,
for idx, token in enumerate(list(tlist)):
tidx = idx - tidx_offset

if token.is_whitespace():
if token.is_whitespace:
continue

if recurse and token.is_group() and not isinstance(token, cls):
if recurse and token.is_group and not isinstance(token, cls):
_group(token, cls, match, valid_prev, valid_next, post, extend)

if match(token):
Expand Down
2 changes: 1 addition & 1 deletion sqlparse/filters/aligned_indent.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def nl(self, offset=1):
self._max_kwd_len + offset + indent + self.offset))

def _process_statement(self, tlist):
if tlist.tokens[0].is_whitespace() and self.indent == 0:
if tlist.tokens[0].is_whitespace and self.indent == 0:
tlist.tokens.pop(0)

# process the main query body
Expand Down
18 changes: 9 additions & 9 deletions sqlparse/filters/others.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ def get_next_comment():
# Replace by whitespace if prev and next exist and if they're not
# whitespaces. This doesn't apply if prev or next is a paranthesis.
if (prev_ is None or next_ is None or
prev_.is_whitespace() or prev_.match(T.Punctuation, '(') or
next_.is_whitespace() or next_.match(T.Punctuation, ')')):
prev_.is_whitespace or prev_.match(T.Punctuation, '(') or
next_.is_whitespace or next_.match(T.Punctuation, ')')):
tlist.tokens.remove(token)
else:
tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ')
Expand All @@ -48,9 +48,9 @@ def _stripws_default(tlist):
last_was_ws = False
is_first_char = True
for token in tlist.tokens:
if token.is_whitespace():
if token.is_whitespace:
token.value = '' if last_was_ws or is_first_char else ' '
last_was_ws = token.is_whitespace()
last_was_ws = token.is_whitespace
is_first_char = False

def _stripws_identifierlist(self, tlist):
Expand All @@ -59,25 +59,25 @@ def _stripws_identifierlist(self, tlist):
for token in list(tlist.tokens):
if last_nl and token.ttype is T.Punctuation and token.value == ',':
tlist.tokens.remove(last_nl)
last_nl = token if token.is_whitespace() else None
last_nl = token if token.is_whitespace else None

# next_ = tlist.token_next(token, skip_ws=False)
# if (next_ and not next_.is_whitespace() and
# if (next_ and not next_.is_whitespace and
# token.ttype is T.Punctuation and token.value == ','):
# tlist.insert_after(token, sql.Token(T.Whitespace, ' '))
return self._stripws_default(tlist)

def _stripws_parenthesis(self, tlist):
if tlist.tokens[1].is_whitespace():
if tlist.tokens[1].is_whitespace:
tlist.tokens.pop(1)
if tlist.tokens[-2].is_whitespace():
if tlist.tokens[-2].is_whitespace:
tlist.tokens.pop(-2)
self._stripws_default(tlist)

def process(self, stmt, depth=0):
[self.process(sgroup, depth + 1) for sgroup in stmt.get_sublists()]
self._stripws(stmt)
if depth == 0 and stmt.tokens and stmt.tokens[-1].is_whitespace():
if depth == 0 and stmt.tokens and stmt.tokens[-1].is_whitespace:
stmt.tokens.pop(-1)
return stmt

Expand Down
4 changes: 2 additions & 2 deletions sqlparse/filters/output.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def _process(self, stream, varname, has_nl):
# Print the tokens on the quote
for token in stream:
# Token is a new line separator
if token.is_whitespace() and '\n' in token.value:
if token.is_whitespace and '\n' in token.value:
# Close quote and add a new line
yield sql.Token(T.Text, " '")
yield sql.Token(T.Whitespace, '\n')
Expand Down Expand Up @@ -93,7 +93,7 @@ def _process(self, stream, varname, has_nl):
# Print the tokens on the quote
for token in stream:
# Token is a new line separator
if token.is_whitespace() and '\n' in token.value:
if token.is_whitespace and '\n' in token.value:
# Close quote and add a new line
yield sql.Token(T.Text, ' ";')
yield sql.Token(T.Whitespace, '\n')
Expand Down
6 changes: 3 additions & 3 deletions sqlparse/filters/reindent.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def __init__(self, width=2, char=' ', wrap_after=0, n='\n'):

def _flatten_up_to_token(self, token):
"""Yields all tokens up to token but excluding current."""
if token.is_group():
if token.is_group:
token = next(token.flatten())

for t in self._curr_stmt.flatten():
Expand Down Expand Up @@ -65,7 +65,7 @@ def _split_kwds(self, tlist):
pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
uprev = text_type(prev_)

if prev_ and prev_.is_whitespace():
if prev_ and prev_.is_whitespace:
del tlist.tokens[pidx]
tidx -= 1

Expand All @@ -80,7 +80,7 @@ def _split_statements(self, tlist):
tidx, token = tlist.token_next_by(t=ttypes)
while token:
pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
if prev_ and prev_.is_whitespace():
if prev_ and prev_.is_whitespace:
del tlist.tokens[pidx]
tidx -= 1
# only break if it's not the first token
Expand Down
4 changes: 2 additions & 2 deletions sqlparse/filters/right_margin.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,12 @@ def __init__(self, width=79):

def _process(self, group, stream):
for token in stream:
if token.is_whitespace() and '\n' in token.value:
if token.is_whitespace and '\n' in token.value:
if token.value.endswith('\n'):
self.line = ''
else:
self.line = token.value.splitlines()[-1]
elif token.is_group() and type(token) not in self.keep_together:
elif token.is_group and type(token) not in self.keep_together:
token.tokens = self._process(token, token.tokens)
else:
val = text_type(token)
Expand Down
31 changes: 12 additions & 19 deletions sqlparse/sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,17 @@ class Token(object):
the type of the token.
"""

__slots__ = ('value', 'ttype', 'parent', 'normalized', 'is_keyword')
__slots__ = ('value', 'ttype', 'parent', 'normalized', 'is_keyword',
'is_group', 'is_whitespace')

def __init__(self, ttype, value):
value = text_type(value)
self.value = value
self.ttype = ttype
self.parent = None
self.is_group = False
self.is_keyword = ttype in T.Keyword
self.is_whitespace = self.ttype in T.Whitespace
self.normalized = value.upper() if self.is_keyword else value

def __str__(self):
Expand Down Expand Up @@ -96,14 +99,6 @@ def match(self, ttype, values, regex=False):

return self.normalized in values

def is_group(self):
"""Returns ``True`` if this object has children."""
return False

def is_whitespace(self):
"""Return ``True`` if this token is a whitespace token."""
return self.ttype in T.Whitespace

def within(self, group_cls):
"""Returns ``True`` if this token is within *group_cls*.

Expand Down Expand Up @@ -145,6 +140,7 @@ def __init__(self, tokens=None):
self.tokens = tokens or []
[setattr(token, 'parent', self) for token in tokens]
super(TokenList, self).__init__(None, text_type(self))
self.is_group = True

def __str__(self):
return ''.join(token.value for token in self.flatten())
Expand Down Expand Up @@ -173,7 +169,7 @@ def _pprint_tree(self, max_depth=None, depth=0, f=None):
print("{indent}{idx:2d} {cls} {q}{value}{q}"
.format(**locals()), file=f)

if token.is_group() and (max_depth is None or depth < max_depth):
if token.is_group and (max_depth is None or depth < max_depth):
token._pprint_tree(max_depth, depth + 1, f)

def get_token_at_offset(self, offset):
Expand All @@ -191,18 +187,15 @@ def flatten(self):
This method is recursively called for all child tokens.
"""
for token in self.tokens:
if token.is_group():
if token.is_group:
for item in token.flatten():
yield item
else:
yield token

def is_group(self):
return True

def get_sublists(self):
for token in self.tokens:
if token.is_group():
if token.is_group:
yield token

@property
Expand Down Expand Up @@ -241,7 +234,7 @@ def token_first(self, skip_ws=True, skip_cm=False):
ignored too.
"""
# this on is inconsistent, using Comment instead of T.Comment...
funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or
funcs = lambda tk: not ((skip_ws and tk.is_whitespace) or
(skip_cm and imt(tk, t=T.Comment, i=Comment)))
return self._token_matching(funcs)[1]

Expand Down Expand Up @@ -278,7 +271,7 @@ def token_next(self, idx, skip_ws=True, skip_cm=False, _reverse=False):
if idx is None:
return None, None
idx += 1 # alot of code usage current pre-compensates for this
funcs = lambda tk: not ((skip_ws and tk.is_whitespace()) or
funcs = lambda tk: not ((skip_ws and tk.is_whitespace) or
(skip_cm and imt(tk, t=T.Comment, i=Comment)))
return self._token_matching(funcs, idx, reverse=_reverse)

Expand All @@ -296,7 +289,7 @@ def group_tokens(self, grp_cls, start, end, include_end=True,
end_idx = end + include_end

# will be needed later for new group_clauses
# while skip_ws and tokens and tokens[-1].is_whitespace():
# while skip_ws and tokens and tokens[-1].is_whitespace:
# tokens = tokens[:-1]

if extend and isinstance(start, grp_cls):
Expand Down Expand Up @@ -471,7 +464,7 @@ def get_identifiers(self):
Whitespaces and punctuations are not included in this generator.
"""
for token in self.tokens:
if not (token.is_whitespace() or token.match(T.Punctuation, ',')):
if not (token.is_whitespace or token.match(T.Punctuation, ',')):
yield token


Expand Down
2 changes: 1 addition & 1 deletion tests/test_grouping.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def test_grouping_identifiers():

s = "INSERT INTO `test` VALUES('foo', 'bar');"
parsed = sqlparse.parse(s)[0]
types = [l.ttype for l in parsed.tokens if not l.is_whitespace()]
types = [l.ttype for l in parsed.tokens if not l.is_whitespace]
assert types == [T.DML, T.Keyword, None, T.Keyword, None, T.Punctuation]

s = "select 1.0*(a+b) as col, sum(c)/sum(d) from myschema.mytable"
Expand Down