Permalink
Browse files

Update Python, astroid, and pylint versions.

Use Python3.7 now.
Major changes to typing module required corresponding changes here,
including:
  - Changing generic implementation (_GenericAlias, __origin__)
  - Changing lookup for callable attributes requires change to
    (dunder) __polymorphic_tvars__
  - _ForwardRef becomes ForwardRef
  • Loading branch information...
david-yz-liu committed Aug 10, 2018
1 parent a2ce3a7 commit f3286c69cc3c53b313c3db33188cebb677667fdf
Showing with 277 additions and 232 deletions.
  1. +6 −4 .travis.yml
  2. +7 −8 python_ta/parser/__init__.py
  3. +4 −4 python_ta/patches/messages.py
  4. +1 −4 python_ta/reporters/node_printers.py
  5. +1 −2 python_ta/reporters/plain_reporter.py
  6. +28 −22 python_ta/transforms/setendings.py
  7. +39 −40 python_ta/transforms/type_inference_visitor.py
  8. +76 −53 python_ta/typecheck/base.py
  9. +0 −1 python_ta/typecheck/errors.py
  10. +26 −14 python_ta/typecheck/type_store.py
  11. +13 −9 python_ta/typecheck/typeshed/README.md
  12. +5 −0 python_ta/typecheck/typeshed/builtins.pyi
  13. +6 −6 sample_usage/draw_tnodes.py
  14. +2 −2 setup.py
  15. +3 −3 tests/custom_hypothesis_support.py
  16. +1 −1 tests/test_csc108_docstring.py
  17. +21 −22 tests/test_type_constraints/test_tnode_structure.py
  18. +6 −6 tests/test_type_constraints/test_unify.py
  19. +6 −5 tests/test_type_inference/test_annassign.py
  20. +0 −1 tests/test_type_inference/test_assign_tuple.py
  21. +3 −4 tests/test_type_inference/test_attribute.py
  22. +3 −2 tests/test_type_inference/test_binops.py
  23. +1 −1 tests/test_type_inference/test_const.py
  24. +4 −3 tests/test_type_inference/test_dict.py
  25. +0 −2 tests/test_type_inference/test_for.py
  26. +1 −1 tests/test_type_inference/test_function_annotation.py
  27. +3 −3 tests/test_type_inference/test_function_def_inference.py
  28. +5 −5 tests/test_type_inference/test_initializer.py
  29. +2 −1 tests/test_type_inference/test_lambda.py
  30. +1 −1 tests/test_type_inference/test_setcomp.py
  31. +2 −1 tests/test_type_inference/test_starred.py
  32. +1 −1 tests/test_type_inference/test_subscript.py
@@ -1,10 +1,12 @@
language: python
python:
- "3.6"
- "3.7"
dist: xenial
sudo: true
install:
- sudo apt install graphviz
- pip install funcparserlib colorama jinja2 pycodestyle hypothesis pygments graphviz
- pip install 'astroid==1.6.5' 'pylint==1.9'
- pip install .
- sudo apt-get install graphviz
- pip install graphviz
script:
- nosetests
notifications:
@@ -1,7 +1,6 @@
import keyword
import token
from tokenize import generate_tokens
import tokenize
from funcparserlib.parser import some, many, skip, maybe, \
with_forward_decls, NoParseError, Parser
@@ -31,9 +30,9 @@ def name_(string):
INDENT = some(lambda tok: tok.type == token.INDENT)
DEDENT = some(lambda tok: tok.type == token.DEDENT)
NEWLINE = skip(
some(lambda tok: tok.type == tokenize.NL or tok.type == token.NEWLINE))
some(lambda tok: tok.type == token.NL or tok.type == token.NEWLINE))
ENDMARKER = skip(some(lambda tok: tok.type == token.ENDMARKER))
COMMENT = some(lambda tok: tok.type == tokenize.COMMENT)
COMMENT = some(lambda tok: tok.type == token.COMMENT)
NUMBER = some(lambda tok: tok.type == token.NUMBER)
NAME = some(lambda tok: tok.type == token.NAME)
IDENTIFIER = some(
@@ -48,8 +47,8 @@ def name_(string):
AND = skip(name_('and'))
AS = skip(name_('as'))
ASSERT = skip(name_('assert'))
AWAIT = skip(some(lambda tok: tok.type == token.AWAIT))
ASYNC = skip(some(lambda tok: tok.type == token.ASYNC))
AWAIT = skip(name_('await'))
ASYNC = skip(name_('async'))
BREAK = skip(name_('break'))
CLASS = skip(name_('class'))
CONTINUE = skip(name_('continue'))
@@ -371,11 +370,11 @@ def _try_p(tokens, s):
def parse_file(filename):
with open(filename) as f:
tokens = [t for t in generate_tokens(f.readline)
if t.type != tokenize.COMMENT and t.type != tokenize.NL]
if t.type != token.COMMENT and t.type != token.NL]
try:
file_input.parse(tokens)
except CustomParseError as err:
token = tokens[err.orig_error.state.pos]
lineno = token.start[0]
tok = tokens[err.orig_error.state.pos]
lineno = tok.start[0]
print('Syntax error at line {}. Details:'.format(lineno))
print(' ' + err.msg)
@@ -9,10 +9,10 @@ def patch_messages():
"""Patch MessagesHandlerMixIn to pass the node to reporter."""
old_add_message = MessagesHandlerMixIn.add_message
def new_add_message(self, msg_descr, line=None, node=None, args=None,
confidence=UNDEFINED):
old_add_message(self, msg_descr, line, node, args, confidence)
msg_info = self.msgs_store.check_message_id(msg_descr)
def new_add_message(self, msg_id, line=None, node=None, args=None,
confidence=UNDEFINED, col_offset=None):
old_add_message(self, msg_id, line, node, args, confidence, col_offset)
msg_info = self.msgs_store.get_message_definition(msg_id)
self.reporter.handle_node(msg_info, node)
MessagesHandlerMixIn.add_message = new_add_message
@@ -43,10 +43,7 @@ def render_missing_docstring(msg, source_lines=None):
yield from render_context(1, 3, source_lines)
elif isinstance(msg.node, astroid.ClassDef) or isinstance(msg.node, astroid.FunctionDef):
start = msg.node.fromlineno
if isinstance(msg.node, astroid.ClassDef):
end = msg.node.body[0].fromlineno
else:
end = msg.node.args.end_lineno + 1
end = msg.node.body[0].fromlineno
yield from render_context(start, end, source_lines)
# Calculate indentation
body = source_lines[end-1]
@@ -66,8 +66,7 @@
'trailing-newlines'}
# Messages without a source code line to highlight
no_hl = {'always-returning-in-a-loop',
'invalid-name'}
no_hl = {'invalid-name'}
# the "Invalid module name" subsection of "invalid-name" belongs here
@@ -140,7 +140,7 @@ def _is_within_close_bracket(s, index, node):
"""Fix to include right ']'."""
if index >= len(s) - 1:
return False
return s[index] == ']' or s[index + 1] == ']'
return s[index + 1] == ']'
def _is_within_open_bracket(s, index, node):
@@ -261,30 +261,28 @@ def fix_slice(source_code):
-- Step 2) use other transforms to then expand outwards to the '[' or ']'
"""
def _find_colon(node):
if node.last_child():
return
if _get_last_child(node):
set_from_last_child(node)
return node
if not hasattr(node, 'end_lineno'):
set_without_children(node)
line_i = node.parent.fromlineno - 1 # 1-based
char_i = node.parent.col_offset # 0-based
# Search for the first ":" after ending position of parent's value node.
if node.parent.value:
line_i = node.parent.value.fromlineno - 1 # convert 1 to 0 index.
char_i = node.parent.value.end_col_offset
line_i = node.parent.value.end_lineno - 1 # convert 1 to 0 index.
char_i = node.parent.value.end_col_offset + 1
# Search the remaining source code for the ":" char.
while source_code[line_i][char_i] != ':':
while char_i < len(source_code[line_i]) and source_code[line_i][char_i] != ':':
if char_i == len(source_code[line_i]) - 1 or source_code[line_i][char_i] is '#':
char_i = 0
line_i += 1
else:
char_i += 1
node.fromlineno = line_i + 1
node.end_col_offset = char_i
node.col_offset = char_i
node.fromlineno, node.col_offset = line_i + 1, char_i
node.end_lineno, node.end_col_offset = line_i + 1, char_i
return node
return _find_colon
@@ -314,13 +312,15 @@ def fix_start_attributes(node):
node.fromlineno = statement.fromlineno
if node.col_offset is None:
node.col_offset = statement.col_offset
return node
def _set_start_from_first_child(node):
"""Set the start attributes of this node from its first child."""
first_child = next(node.get_children())
node.fromlineno = first_child.fromlineno
node.col_offset = first_child.col_offset
return node
def set_from_last_child(node):
@@ -333,7 +333,7 @@ def set_from_last_child(node):
last_child = _get_last_child(node)
if not last_child:
set_without_children(node)
return
return node
elif not hasattr(last_child, 'end_lineno'): # Newly added for Slice() node.
set_without_children(last_child)
@@ -344,6 +344,7 @@ def set_from_last_child(node):
.format(last_child, node)
node.end_lineno, node.end_col_offset = last_child.end_lineno, last_child.end_col_offset
return node
def set_without_children(node):
@@ -362,6 +363,7 @@ def set_without_children(node):
# whitespace possibilities that may not be reflected in it!
if not hasattr(node, 'end_col_offset'):
node.end_col_offset = node.col_offset + len(node.as_string())
return node
def set_arguments(node):
@@ -374,7 +376,7 @@ def set_arguments(node):
else: # node does not have children.
# TODO: this should be replaced with the string parsing strategy
node.end_lineno, node.end_col_offset = node.fromlineno, node.col_offset
return node
def _get_last_child(node):
"""Returns the last child node, or None.
@@ -419,9 +421,9 @@ def set_endings_from_source(node):
break # skip over comment lines
if pred(source_code[lineno], j, node):
node.end_col_offset = j + 1
return
return node
elif only_consumables and source_code[lineno][j] not in CONSUMABLES:
return
return node
# If that doesn't work, search remaining lines
for i in range(lineno + 1, len(source_code)):
@@ -431,10 +433,11 @@ def set_endings_from_source(node):
break # skip over comment lines
if pred(source_code[i], j, node):
node.end_col_offset, node.end_lineno = j + 1, i + 1
return
return node
# only consume inert characters.
elif source_code[i][j] not in CONSUMABLES:
return
return node
return node
return set_endings_from_source
@@ -459,18 +462,19 @@ def set_start_from_source(node):
for j in range(min(len(source_code[lineno]) - 1, col_offset), -1, -1):
if pred(source_code[lineno], j, node):
node.col_offset = j
return
return node
# If that doesn't work, search remaining lines
for i in range(lineno - 1, -1, -1):
# Search each character, right-to-left
for j in range(len(source_code[i]) - 1, -1, -1):
if pred(source_code[i], j, node):
node.end_col_offset, node.end_lineno = j, i + 1
return
return node
# only consume inert characters.
elif source_code[i][j] not in CONSUMABLES:
return
return node
return node
return set_start_from_source
@@ -555,6 +559,8 @@ def h(node):
if isinstance(node.parent, astroid.Call) and len(node.parent.args) == 1:
node.fromlineno, node.col_offset, node.end_lineno, node.end_col_offset = prev
return node
return h
Oops, something went wrong.

0 comments on commit f3286c6

Please sign in to comment.