Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Drop support for dead pythons #67

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,6 @@ language: python
sudo: false

python:
- 2.7
- 3.5
- 3.6
- 3.7
- 3.8-dev
Expand Down
6 changes: 2 additions & 4 deletions asttokens/asttokens.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@
import token
import tokenize
import io
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from .line_numbers import LineNumbers
from .util import Token, match_token, is_non_coding_token
from .mark_tokens import MarkTokens
Expand Down Expand Up @@ -49,7 +47,7 @@ def __init__(self, source_text, parse=False, tree=None, filename='<unknown>'):
# Decode source after parsing to let Python 2 handle coding declarations.
# (If the encoding was not utf-8 compatible, then even if it parses correctly,
# we'll fail with a unicode error here.)
if isinstance(source_text, six.binary_type):
if isinstance(source_text, bytes):
source_text = source_text.decode('utf8')

self._text = source_text
Expand Down Expand Up @@ -170,7 +168,7 @@ def token_range(self, first_token, last_token, include_extra=False):
Yields all tokens in order from first_token through and including last_token. If
include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT.
"""
for i in xrange(first_token.index, last_token.index + 1):
for i in range(first_token.index, last_token.index + 1):
if include_extra or not is_non_coding_token(self._tokens[i].type):
yield self._tokens[i]

Expand Down
18 changes: 1 addition & 17 deletions asttokens/mark_tokens.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@
import sys
import token

import six

from . import util

# Mapping of matching braces. To find a token here, look up token[:2].
Expand Down Expand Up @@ -164,14 +162,6 @@ def handle_comp(self, open_brace, node, first_token, last_token):
def visit_listcomp(self, node, first_token, last_token):
return self.handle_comp('[', node, first_token, last_token)

if six.PY2:
# We shouldn't do this on PY3 because its SetComp/DictComp already have a correct start.
def visit_setcomp(self, node, first_token, last_token):
return self.handle_comp('{', node, first_token, last_token)

def visit_dictcomp(self, node, first_token, last_token):
return self.handle_comp('{', node, first_token, last_token)

def visit_comprehension(self, node, first_token, last_token):
# The 'comprehension' node starts with 'for' but we only get first child; we search backwards
# to find the 'for' keyword.
Expand Down Expand Up @@ -318,7 +308,7 @@ def visit_num(self, node, first_token, last_token):
def visit_const(self, node, first_token, last_token):
if isinstance(node.value, numbers.Number):
return self.handle_num(node, node.value, first_token, last_token)
elif isinstance(node.value, (six.text_type, six.binary_type)):
elif isinstance(node.value, (str, bytes)):
return self.visit_str(node, first_token, last_token)
return (first_token, last_token)

Expand Down Expand Up @@ -352,12 +342,6 @@ def visit_assignname(self, node, first_token, last_token):
first_token = last_token = self._code.prev_token(colon)
return (first_token, last_token)

if six.PY2:
# No need for this on Python3, which already handles 'with' nodes correctly.
def visit_with(self, node, first_token, last_token):
first = self._code.find_token(first_token, token.NAME, 'with', reverse=True)
return (first, last_token)

# Async nodes should typically start with the word 'async'
# but Python < 3.7 doesn't put the col_offset there
# AsyncFunctionDef is slightly different because it might have
Expand Down
7 changes: 3 additions & 4 deletions asttokens/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
import ast
import collections
import token
from six import iteritems


def token_repr(tok_type, string):
Expand Down Expand Up @@ -90,7 +89,7 @@ def iter_children_astroid(node):
return node.get_children()


SINGLETONS = {c for n, c in iteritems(ast.__dict__) if isinstance(c, type) and
SINGLETONS = {c for n, c in ast.__dict__.items() if isinstance(c, type) and
issubclass(c, (ast.expr_context, ast.boolop, ast.operator, ast.unaryop, ast.cmpop))}

def iter_children_ast(node):
Expand All @@ -115,9 +114,9 @@ def iter_children_ast(node):
yield child


stmt_class_names = {n for n, c in iteritems(ast.__dict__)
stmt_class_names = {n for n, c in ast.__dict__.items()
if isinstance(c, type) and issubclass(c, ast.stmt)}
expr_class_names = ({n for n, c in iteritems(ast.__dict__)
expr_class_names = ({n for n, c in ast.__dict__.items()
if isinstance(c, type) and issubclass(c, ast.expr)} |
{'AssignName', 'DelName', 'Const', 'AssignAttr', 'DelAttr'})

Expand Down
2 changes: 0 additions & 2 deletions docs/conf.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
# -*- coding: utf-8 -*-
#
# asttokens documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 10 13:00:48 2016.
#
Expand Down
3 changes: 0 additions & 3 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,6 @@ classifiers =
Topic :: Software Development :: Pre-processors
Environment :: Console
Operating System :: OS Independent
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Expand All @@ -31,7 +29,6 @@ classifiers =

[options]
packages = asttokens
install_requires = six
setup_requires = setuptools>=44; wheel; setuptools_scm[toml]>=3.4.3

[options.extras_require]
Expand Down
3 changes: 0 additions & 3 deletions tests/test_astroid.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function

import astroid
from astroid.node_classes import NodeNG

Expand Down
7 changes: 2 additions & 5 deletions tests/test_asttokens.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,4 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import ast
import six
import token
import tokenize
import unittest
Expand Down Expand Up @@ -84,7 +81,7 @@ def test_unicode_offsets(self):
# translate correctly.
source = "foo('фыва',a,b)\n"
atok = asttokens.ASTTokens(source)
self.assertEqual([six.text_type(t) for t in atok.tokens], [
self.assertEqual([str(t) for t in atok.tokens], [
"NAME:'foo'",
"OP:'('",
'STRING:"%s"' % repr('фыва').lstrip('u'),
Expand Down Expand Up @@ -128,7 +125,7 @@ def test_coding_declaration(self):
"""ASTTokens should be able to parse a string with a coding declaration."""
# In Python 2, a unicode string with a coding declaration is a SyntaxError, but we should be
# able to parse a byte string with a coding declaration (as long as its utf-8 compatible).
atok = asttokens.ASTTokens(str("# coding: ascii\n1\n"), parse=True)
atok = asttokens.ASTTokens("# coding: ascii\n1\n", parse=True)
self.assertEqual([str(t) for t in atok.tokens], [
"COMMENT:'# coding: ascii'",
"NL:'\\n'",
Expand Down
2 changes: 0 additions & 2 deletions tests/test_line_numbers.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from .context import asttokens

Expand Down
Loading