Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Code cleanup and test coverage.

  • Loading branch information...
commit 974222bcb24a5b2bf3a0e5ecd616a2c3855e8342 1 parent ac165c9
@andialbrecht authored
View
1  .hgignore
@@ -2,6 +2,7 @@ syntax: glob
docs/build
dist
MANIFEST
+.coverage
extras/appengine/django
extras/appengine/django.zip
extras/appengine/release
View
1  CHANGES
@@ -6,6 +6,7 @@ In Development
* Improved parsing of identifier lists (issue2).
* Recursive recognition of AS (issue4) and CASE.
* Improved support for UPDATE statements.
+ * Code cleanup and better test coverage.
Release 0.1.0
View
3  Makefile
@@ -12,6 +12,9 @@ help:
test:
$(PYTHON) tests/run_tests.py
+coverage:
+ nosetests --with-coverage --cover-inclusive --cover-package=sqlparse
+
clean:
$(PYTHON) setup.py clean
find . -name '*.pyc' -delete
View
1  sqlparse/engine/filter.py
@@ -17,6 +17,7 @@ def process(self, stack, stream):
class StatementFilter(TokenFilter):
def __init__(self):
+ TokenFilter.__init__(self)
self._in_declare = False
self._in_dbldollar = False
self._is_create = False
View
38 sqlparse/formatter.py
@@ -120,41 +120,3 @@ def build_filter_stack(stack, options):
return stack
-def format(statement, **options):
- import filters
- lexer = Lexer()
-# lexer.add_filter('whitespace')
- lexer.add_filter(filters.GroupFilter())
- if options.get('reindent', False):
- lexer.add_filter(filters.StripWhitespaceFilter())
- lexer.add_filter(filters.IndentFilter(
- n_indents=options.get('n_indents', 2)))
- if options.get('ltrim', False):
- lexer.add_filter(filters.LTrimFilter())
- keyword_case = options.get('keyword_case', None)
- if keyword_case is not None:
- assert keyword_case in ('lower', 'upper', 'capitalize')
- lexer.add_filter(filters.KeywordCaseFilter(case=keyword_case))
- identifier_case = options.get('identifier_case', None)
- if identifier_case is not None:
- assert identifier_case in ('lower', 'upper', 'capitalize')
- lexer.add_filter(filters.IdentifierCaseFilter(case=identifier_case))
- if options.get('strip_comments', False):
- lexer.add_filter(filters.StripCommentsFilter())
- right_margin = options.get('right_margin', None)
- if right_margin is not None:
- right_margin = int(right_margin)
- assert right_margin > 0
- lexer.add_filter(filters.RightMarginFilter(margin=right_margin))
- lexer.add_filter(filters.UngroupFilter())
- if options.get('output_format', None):
- ofrmt = options['output_format']
- assert ofrmt in ('sql', 'python', 'php')
- if ofrmt == 'python':
- lexer.add_filter(filters.OutputPythonFilter())
- elif ofrmt == 'php':
- lexer.add_filter(filters.OutputPHPFilter())
- tokens = []
- for ttype, value in lexer.get_tokens(unicode(statement)):
- tokens.append((ttype, value))
- return statement.__class__(tokens)
View
26 sqlparse/sql.py
@@ -118,19 +118,19 @@ def __str__(self):
def _get_repr_name(self):
return self.__class__.__name__
- def _pprint_tree(self, max_depth=None, depth=0):
- """Pretty-print the object tree."""
- indent = ' '*(depth*2)
- for token in self.tokens:
- if token.is_group():
- pre = ' | '
- else:
- pre = ' | '
- print '%s%s%s \'%s\'' % (indent, pre, token._get_repr_name(),
- token._get_repr_value())
- if (token.is_group() and max_depth is not None
- and depth < max_depth):
- token._pprint_tree(max_depth, depth+1)
+ ## def _pprint_tree(self, max_depth=None, depth=0):
+ ## """Pretty-print the object tree."""
+ ## indent = ' '*(depth*2)
+ ## for token in self.tokens:
+ ## if token.is_group():
+ ## pre = ' | '
+ ## else:
+ ## pre = ' | '
+ ## print '%s%s%s \'%s\'' % (indent, pre, token._get_repr_name(),
+ ## token._get_repr_value())
+ ## if (token.is_group() and max_depth is not None
+ ## and depth < max_depth):
+ ## token._pprint_tree(max_depth, depth+1)
def flatten(self):
"""Generator yielding ungrouped tokens.
View
12 tests/test_grouping.py
@@ -123,3 +123,15 @@ def test_alias(self):
p = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, p.to_unicode())
self.assertEqual(p.tokens[4].get_alias(), 'view')
+
+
+
+class TestStatement(TestCaseBase):
+
+ def test_get_type(self):
+ f = lambda sql: sqlparse.parse(sql)[0]
+ self.assertEqual(f('select * from foo').get_type(), 'SELECT')
+ self.assertEqual(f('update foo').get_type(), 'UPDATE')
+ self.assertEqual(f(' update foo').get_type(), 'UPDATE')
+ self.assertEqual(f('\nupdate foo').get_type(), 'UPDATE')
+ self.assertEqual(f('foo').get_type(), 'UNKNOWN')
View
46 tests/test_tokenize.py
@@ -3,7 +3,9 @@
import unittest
import types
+import sqlparse
from sqlparse import lexer
+from sqlparse import sql
from sqlparse.tokens import *
@@ -38,3 +40,47 @@ def test_linebreaks(self): # issue1
sql = 'foo\r\nbar\n'
tokens = lexer.tokenize(sql)
self.assertEqual(''.join(str(x[1]) for x in tokens), sql)
+
+
+class TestToken(unittest.TestCase):
+
+ def test_str(self):
+ token = sql.Token(None, 'FoO')
+ self.assertEqual(str(token), 'FoO')
+
+ def test_repr(self):
+ token = sql.Token(Keyword, 'foo')
+ tst = "<Keyword 'foo' at 0x"
+ self.assertEqual(repr(token)[:len(tst)], tst)
+ token = sql.Token(Keyword, '1234567890')
+ tst = "<Keyword '123456...' at 0x"
+ self.assertEqual(repr(token)[:len(tst)], tst)
+
+ def test_flatten(self):
+ token = sql.Token(Keyword, 'foo')
+ gen = token.flatten()
+ self.assertEqual(type(gen), types.GeneratorType)
+ lgen = list(gen)
+ self.assertEqual(lgen, [token])
+
+
+class TestTokenList(unittest.TestCase):
+
+ def test_token_first(self):
+ p = sqlparse.parse(' select foo')[0]
+ first = p.token_first()
+ self.assertEqual(first.value, 'select')
+ self.assertEqual(p.token_first(ignore_whitespace=False).value, ' ')
+ self.assertEqual(sql.TokenList([]).token_first(), None)
+
+ def test_token_matching(self):
+ t1 = sql.Token(Keyword, 'foo')
+ t2 = sql.Token(Punctuation, ',')
+ x = sql.TokenList([t1, t2])
+ self.assertEqual(x.token_matching(0, [lambda t: t.ttype is Keyword]),
+ t1)
+ self.assertEqual(x.token_matching(0,
+ [lambda t: t.ttype is Punctuation]),
+ t2)
+ self.assertEqual(x.token_matching(1, [lambda t: t.ttype is Keyword]),
+ None)
Please sign in to comment.
Something went wrong with that request. Please try again.