diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py index fd9c919ce6a0d1..251ce2b864a9d8 100644 --- a/Lib/test/test_tokenize.py +++ b/Lib/test/test_tokenize.py @@ -1057,6 +1057,23 @@ async def bar(): pass DEDENT '' (6, 12) (6, 12) """) + def test_newline_after_parenthesized_block_with_comment(self): + self.check_tokenize('''\ +[ + # A comment here + 1 +] +''', """\ + OP '[' (1, 0) (1, 1) + NL '\\n' (1, 1) (1, 2) + COMMENT '# A comment here' (2, 4) (2, 20) + NL '\\n' (2, 20) (2, 21) + NUMBER '1' (3, 4) (3, 5) + NL '\\n' (3, 5) (3, 6) + OP ']' (4, 0) (4, 1) + NEWLINE '\\n' (4, 1) (4, 2) + """) + class GenerateTokensTest(TokenizeTest): def check_tokenize(self, s, expected): # Format the tokens in s in a table format. diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c index 472d4174726354..1e8f785a331ac5 100644 --- a/Parser/tokenizer.c +++ b/Parser/tokenizer.c @@ -2007,6 +2007,9 @@ tok_get_normal_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct t tok->atbol = 1; if (blankline || tok->level > 0) { if (tok->tok_extra_tokens) { + if (tok->comment_newline) { + tok->comment_newline = 0; + } p_start = tok->start; p_end = tok->cur; return MAKE_TOKEN(NL); @@ -2015,9 +2018,9 @@ tok_get_normal_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct t } if (tok->comment_newline && tok->tok_extra_tokens) { tok->comment_newline = 0; - p_start = tok->start; - p_end = tok->cur; - return MAKE_TOKEN(NL); + p_start = tok->start; + p_end = tok->cur; + return MAKE_TOKEN(NL); } p_start = tok->start; p_end = tok->cur - 1; /* Leave '\n' out of the string */