diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000..ff6415d --- /dev/null +++ b/.coveragerc @@ -0,0 +1,7 @@ +[report] +exclude_lines = + # Have to re-enable the standard pragma + pragma: no cover + + # Don't complain if tests don't hit defensive assertion code: + raise NotImplementedError diff --git a/gsheetsdb/convert.py b/gsheetsdb/convert.py index 4925879..4d4350f 100644 --- a/gsheetsdb/convert.py +++ b/gsheetsdb/convert.py @@ -9,9 +9,6 @@ def parse_datetime(v): """Parse a string like 'Date(2018,0,1,0,0,0)'""" - if v is None: - return None - args = [int(number) for number in v[len('Date('):-1].split(',')] args[1] += 1 # month is zero indexed in the response return datetime.datetime(*args) @@ -19,18 +16,12 @@ def parse_datetime(v): def parse_date(v): """Parse a string like 'Date(2018,0,1)'""" - if v is None: - return None - args = [int(number) for number in v[len('Date('):-1].split(',')] args[1] += 1 # month is zero indexed in the response return datetime.date(*args) def parse_timeofday(v): - if v is None: - return None - return datetime.time(*v) diff --git a/gsheetsdb/processors.py b/gsheetsdb/processors.py index 57b27b1..da0a69e 100644 --- a/gsheetsdb/processors.py +++ b/gsheetsdb/processors.py @@ -50,6 +50,9 @@ def is_subset(json, other): return True + elif isinstance(other, list): + return json in other + else: return json == other diff --git a/gsheetsdb/translator.py b/gsheetsdb/translator.py index d8fdd30..c6121b5 100644 --- a/gsheetsdb/translator.py +++ b/gsheetsdb/translator.py @@ -74,7 +74,10 @@ def extract_column_aliases(parsed_query): return aliases -def translate(parsed_query, column_map): +def translate(parsed_query, column_map=None): + if column_map is None: + column_map = {} + # HAVING is not supported if 'having' in parsed_query: raise NotSupportedError('HAVING not supported') diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..e29b9ce --- /dev/null +++ b/setup.cfg @@ -0,0 +1,6 @@ +[nosetests] +match=^test +nocapture=1 +with-coverage=1 +cover-package=gsheetsdb +cover-erase=1 diff --git a/setup.py b/setup.py index 5d18379..6332104 100644 --- a/setup.py +++ b/setup.py @@ -38,6 +38,7 @@ ] development_extras = [ + 'coverage', 'nose', 'pipreqs', 'twine', diff --git a/tests/context.py b/tests/context.py index abe3d22..952f428 100644 --- a/tests/context.py +++ b/tests/context.py @@ -5,5 +5,6 @@ import gsheetsdb from gsheetsdb import exceptions -from gsheetsdb.processors import CountStar, SubsetMatcher +from gsheetsdb.convert import convert_rows +from gsheetsdb.processors import CountStar, is_subset, SubsetMatcher from gsheetsdb.translator import extract_column_aliases, translate diff --git a/tests/test_convert.py b/tests/test_convert.py new file mode 100644 index 0000000..43a1a92 --- /dev/null +++ b/tests/test_convert.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- + +from collections import namedtuple +import datetime +import unittest + +from .context import convert_rows + + + +class ConvertTestSuite(unittest.TestCase): + + payload = { + "version": "0.6", + "reqId": "0", + "status": "ok", + "sig": "1788543417", + "table": { + "cols": [ + { + "id": "A", + "label": "datetime", + "type": "datetime", + "pattern": "M/d/yyyy H:mm:ss", + }, + { + "id": "B", + "label": "number", + "type": "number", + "pattern": "General", + }, + { + "id": "C", + "label": "boolean", + "type": "boolean", + }, + { + "id": "D", + "label": "date", + "type": "date", + "pattern": "M/d/yyyy", + },{ + "id": "E", + "label": "timeofday", + "type": "timeofday", + "pattern": "h:mm:ss am/pm", + },{ + "id": "F", + "label": "string", + "type": "string", + }, + ], + "rows": [ + { + "c": [ + {"v": "Date(2018,8,1,0,0,0)", "f": "9/1/2018 0:00:00"}, + {"v": 1.0, "f": "1"}, + {"v": True, "f": "TRUE"}, + {"v": "Date(2018,0,1)", "f": "1/1/2018"}, + {"v": [17,0,0,0], "f": "5:00:00 PM"}, + {"v": "test"}, + ], + }, + { + "c": [ + None, + {"v": 1.0, "f": "1"}, + {"v": True, "f": "TRUE"}, + None, + None, + {"v": "test"}, + ], + }, + ], + }, + } + + def test_convert(self): + cols = self.payload['table']['cols'] + rows = self.payload['table']['rows'] + result = convert_rows(cols, rows) + Row = namedtuple('Row', 'datetime number boolean date timeofday string') + expected = [ + Row( + datetime=datetime.datetime(2018, 9, 1, 0, 0), + number=1.0, + boolean=True, + date=datetime.date(2018, 1, 1), + timeofday=datetime.time(17, 0), + string='test', + ), + Row( + datetime=None, + number=1.0, + boolean=True, + date=None, + timeofday=None, + string='test', + ), + ] + self.assertEquals(result, expected) diff --git a/tests/test_processing.py b/tests/test_processing.py index 19b1153..6b0c2ba 100644 --- a/tests/test_processing.py +++ b/tests/test_processing.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -from .context import CountStar, SubsetMatcher +from .context import CountStar, is_subset, SubsetMatcher import unittest @@ -58,6 +58,37 @@ def test_count_star(self): } self.assertEquals(result, expected) + def test_count_star_no_results(self): + sql = 'SELECT COUNT(*) AS total FROM "http://example.com"' + parsed_query = parse(sql) + column_map = {'country': 'A', 'cnt': 'B'} + + processor = CountStar() + processor.pre_process(parsed_query, column_map) + + payload = { + 'status': 'ok', + 'table': { + 'cols': [ + {'id': 'count-A', 'label': 'count country', 'type': 'number'}, + {'id': 'count-B', 'label': 'count cnt', 'type': 'number'}, + ], + 'rows': [], + }, + } + aliases = ['__CountStar__country', '__CountStar__cnt'] + result = processor.post_process(payload, aliases) + expected = { + 'status': 'ok', + 'table': { + 'cols': [ + {'id': 'count-star', 'label': 'total', 'type': 'number'}, + ], + 'rows': [{'c': [{'v': 0}]}], + }, + } + self.assertEquals(result, expected) + def test_count_star_with_groupby(self): sql = 'SELECT country, COUNT(*) FROM "http://example.com" GROUP BY country' parsed_query = parse(sql) @@ -133,3 +164,15 @@ def test_subset_matcher(self): parsed_query = parse( 'SELECT country, COUNT(*) FROM "http://example.com" GROUP BY country') self.assertTrue(pattern.match(parsed_query)) + + def test_is_subset(self): + json = [1, 2, 3] + + other = [1, 2, 3, 4] + self.assertTrue(is_subset(json, other)) + + other = 1 + self.assertFalse(is_subset(json, other)) + + other = [1, 3, 4] + self.assertFalse(is_subset(json, other)) diff --git a/tests/test_translation.py b/tests/test_translation.py index c2b6c43..ba555f5 100644 --- a/tests/test_translation.py +++ b/tests/test_translation.py @@ -54,6 +54,11 @@ def test_having(self): with self.assertRaises(exceptions.NotSupportedError): result = translate(parse(sql), {'country': 'A', 'cnt': 'B'}) + def test_subquery(self): + sql = 'SELECT * from XYZZY, ABC' + with self.assertRaises(exceptions.NotSupportedError): + result = translate(parse(sql)) + def test_orderby(self): sql = ''' SELECT @@ -94,12 +99,24 @@ def test_multiple_aliases(self): result = translate(parse(sql), {'country': 'A', 'cnt': 'B'}) self.assertEquals(result, expected) + def test_unalias_orderby(self): + sql = 'SELECT cnt AS value FROM "http://example.com" ORDER BY value' + expected = 'SELECT B ORDER BY B' + result = translate(parse(sql), {'cnt': 'B'}) + self.assertEquals(result, expected) + def test_column_aliases(self): sql = 'SELECT SUM(cnt) AS total FROM "http://example.com"' expected = ['total'] result = extract_column_aliases(parse(sql)) self.assertEquals(result, expected) + def test_column_aliases_star(self): + sql = 'SELECT * FROM "http://example.com"' + expected = [None] + result = extract_column_aliases(parse(sql)) + self.assertEquals(result, expected) + def test_column_aliases_multiple(self): sql = 'SELECT SUM(cnt) AS total, country, gender AS dim1 FROM "http://example.com"' expected = ['total', None, 'dim1']