Skip to content

Commit

Permalink
handle multi-line tokens in token_at_cursor
Browse files Browse the repository at this point in the history
offset tracking was wrong in the case of tokens spanning multiple lines (e.g. docstrings)
  • Loading branch information
minrk committed Oct 2, 2015
1 parent 32ee8c1 commit a85e1dc
Show file tree
Hide file tree
Showing 2 changed files with 34 additions and 8 deletions.
19 changes: 19 additions & 0 deletions IPython/utils/tests/test_tokenutil.py
Expand Up @@ -53,6 +53,25 @@ def test_multiline():
for i in range(start, start + len(expected)):
expect_token(expected, cell, i)

def test_multiline_token():
cell = '\n'.join([
'"""\n\nxxxxxxxxxx\n\n"""',
'5, """',
'docstring',
'multiline token',
'""", [',
'2, 3, "complicated"]',
'b = hello("string", there)'
])
expected = 'hello'
start = cell.index(expected) + 1
for i in range(start, start + len(expected)):
expect_token(expected, cell, i)
expected = 'hello'
start = cell.index(expected) + 1
for i in range(start, start + len(expected)):
expect_token(expected, cell, i)

def test_nested_call():
cell = "foo(bar(a=5), b=10)"
expected = 'foo'
Expand Down
23 changes: 15 additions & 8 deletions IPython/utils/tokenutil.py
Expand Up @@ -72,15 +72,24 @@ def token_at_cursor(cell, cursor_pos=0):
cell = cast_unicode_py2(cell)
names = []
tokens = []
offset = 0
call_names = []

offsets = {1: 0} # lines start at 1
for tup in generate_tokens(StringIO(cell).readline):

tok = Token(*tup)

# token, text, start, end, line = tup
start_col = tok.start[1]
end_col = tok.end[1]
start_line, start_col = tok.start
end_line, end_col = tok.end
if end_line + 1 not in offsets:
# keep track of offsets for each line
lines = tok.line.splitlines(True)
for lineno, line in zip(range(start_line + 1, end_line + 2), lines):
if lineno not in offsets:
offsets[lineno] = offsets[lineno-1] + len(line)

offset = offsets[start_line]
# allow '|foo' to find 'foo' at the beginning of a line
boundary = cursor_pos + 1 if start_col == 0 else cursor_pos
if offset + start_col >= boundary:
Expand All @@ -103,14 +112,12 @@ def token_at_cursor(cell, cursor_pos=0):
elif tok.text == ')' and call_names:
call_names.pop(-1)

if offset + end_col > cursor_pos:
tokens.append(tok)

if offsets[end_line] + end_col > cursor_pos:
# we found the cursor, stop reading
break

tokens.append(tok)
if tok.token in (tokenize2.NEWLINE, tokenize2.NL):
offset += len(tok.line)

if call_names:
return call_names[-1]
elif names:
Expand Down

0 comments on commit a85e1dc

Please sign in to comment.