From 895e109ca169e240e0f26e087b93c3b47df2232a Mon Sep 17 00:00:00 2001 From: Dick Marinus Date: Sat, 17 Sep 2016 10:10:26 +0200 Subject: [PATCH] Support python-sqlparse 0.2 --- mycli/packages/completion_engine.py | 9 +++++---- setup.py | 2 +- tests/test_sqlexecute.py | 6 +++--- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/mycli/packages/completion_engine.py b/mycli/packages/completion_engine.py index 91a22c90..774bb1ef 100644 --- a/mycli/packages/completion_engine.py +++ b/mycli/packages/completion_engine.py @@ -2,6 +2,7 @@ import sys import sqlparse from sqlparse.sql import Comparison, Identifier, Where +from sqlparse.compat import text_type from .parseutils import last_word, extract_tables, find_prev_keyword from .special import parse_special_command @@ -56,7 +57,7 @@ def suggest_type(full_text, text_before_cursor): stmt_start, stmt_end = 0, 0 for statement in parsed: - stmt_len = len(statement.to_unicode()) + stmt_len = len(text_type(statement)) stmt_start, stmt_end = stmt_end, stmt_end + stmt_len if stmt_end >= current_pos: @@ -79,7 +80,7 @@ def suggest_type(full_text, text_before_cursor): if tok1 and tok1.value == '\\': return suggest_special(text_before_cursor) - last_token = statement and statement.token_prev(len(statement.tokens)) or '' + last_token = statement and statement.token_prev(len(statement.tokens))[1] or '' return suggest_based_on_last_token(last_token, text_before_cursor, full_text, identifier) @@ -157,7 +158,7 @@ def suggest_based_on_last_token(token, text_before_cursor, full_text, identifier # Check for a subquery expression (cases 3 & 4) where = p.tokens[-1] - prev_tok = where.token_prev(len(where.tokens) - 1) + idx, prev_tok = where.token_prev(len(where.tokens) - 1) if isinstance(prev_tok, Comparison): # e.g. "SELECT foo FROM bar WHERE foo = ANY(" @@ -170,7 +171,7 @@ def suggest_based_on_last_token(token, text_before_cursor, full_text, identifier return column_suggestions # Get the token before the parens - prev_tok = p.token_prev(len(p.tokens) - 1) + idx, prev_tok = p.token_prev(len(p.tokens) - 1) if prev_tok and prev_tok.value and prev_tok.value.lower() == 'using': # tbl1 INNER JOIN tbl2 USING (col1, col2) tables = extract_tables(full_text) diff --git a/setup.py b/setup.py index f21bdb4c..31f2fde4 100644 --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ 'Pygments >= 2.0', # Pygments has to be Capitalcased. WTF? 'prompt_toolkit>=1.0.0,<1.1.0', 'PyMySQL >= 0.6.2', - 'sqlparse == 0.1.19', + 'sqlparse >= 0.2.0', 'configobj >= 5.0.6', ] diff --git a/tests/test_sqlexecute.py b/tests/test_sqlexecute.py index 69b9a6d2..d9ed6253 100644 --- a/tests/test_sqlexecute.py +++ b/tests/test_sqlexecute.py @@ -90,10 +90,10 @@ def test_invalid_column_name(executor): @dbtest def test_unicode_support_in_output(executor): run(executor, "create table unicodechars(t text)") - run(executor, "insert into unicodechars (t) values ('é')") + run(executor, u"insert into unicodechars (t) values ('é')") # See issue #24, this raises an exception without proper handling - assert u'é' in run(executor, "select * from unicodechars", join=True) + assert u'é' in run(executor, u"select * from unicodechars", join=True) @dbtest def test_expanded_output(executor): @@ -247,7 +247,7 @@ def test_cd_command_current_dir(executor): @dbtest def test_unicode_support(executor): - assert u'日本語' in run(executor, "SELECT '日本語' AS japanese;", join=True) + assert u'日本語' in run(executor, u"SELECT '日本語' AS japanese;", join=True) @dbtest def test_favorite_query_multiline_statement(executor):