Skip to content

Commit

Permalink
Move UP034 to use TokenKind instead of Tok (#11424)
Browse files Browse the repository at this point in the history
## Summary

This PR follows up from #11420 to move `UP034` to use `TokenKind`
instead of `Tok`.

The main reason to have a separate PR is so that the reviewing is easy.
This required a lot more updates because the rule used an index (`i`) to
keep track of the current position in the token vector. Now, as it's
just an iterator, we just use `next` to move the iterator forward and
extract the relevant information.

This is part of #11401

## Test Plan

`cargo test`
  • Loading branch information
dhruvmanila committed May 14, 2024
1 parent bb1c107 commit 96f6288
Show file tree
Hide file tree
Showing 2 changed files with 60 additions and 92 deletions.
2 changes: 1 addition & 1 deletion crates/ruff_linter/src/checkers/tokens.rs
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ pub(crate) fn check_tokens(
}

if settings.rules.enabled(Rule::ExtraneousParentheses) {
pyupgrade::rules::extraneous_parentheses(&mut diagnostics, tokens, locator);
pyupgrade::rules::extraneous_parentheses(&mut diagnostics, tokens.kinds(), locator);
}

if source_type.is_stub() && settings.rules.enabled(Rule::TypeCommentInStub) {
Expand Down
150 changes: 59 additions & 91 deletions crates/ruff_linter/src/rules/pyupgrade/rules/extraneous_parentheses.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
use ruff_python_parser::lexer::LexResult;
use ruff_python_parser::Tok;
use ruff_text_size::{Ranged, TextRange};
use ruff_python_parser::{TokenKind, TokenKindIter};
use ruff_text_size::TextRange;

use ruff_diagnostics::{AlwaysFixableViolation, Diagnostic, Edit, Fix};
use ruff_macros::{derive_message_formats, violation};
Expand Down Expand Up @@ -37,93 +36,70 @@ impl AlwaysFixableViolation for ExtraneousParentheses {
}

// See: https://github.com/asottile/pyupgrade/blob/97ed6fb3cf2e650d4f762ba231c3f04c41797710/pyupgrade/_main.py#L148
fn match_extraneous_parentheses(tokens: &[LexResult], mut i: usize) -> Option<(usize, usize)> {
i += 1;
fn match_extraneous_parentheses(tokens: &mut TokenKindIter) -> Option<(TextRange, TextRange)> {
// Store the location of the extraneous opening parenthesis.
let start_range = loop {
let (token, range) = tokens.next()?;

loop {
if i >= tokens.len() {
return None;
}
let Ok((tok, _)) = &tokens[i] else {
return None;
};
match tok {
Tok::Comment(..) | Tok::NonLogicalNewline => {
i += 1;
match token {
TokenKind::Comment | TokenKind::NonLogicalNewline => {
continue;
}
Tok::Lpar => {
break;
TokenKind::Lpar => {
break range;
}
_ => {
return None;
}
}
}
};

// Store the location of the extraneous opening parenthesis.
let start = i;
// Verify that we're not in an empty tuple.
let mut empty_tuple = true;

// Verify that we're not in a tuple or coroutine.
let mut depth = 1u32;
while depth > 0 {
i += 1;
if i >= tokens.len() {
return None;
}
let Ok((tok, _)) = &tokens[i] else {
return None;
};

// Store the location of the extraneous closing parenthesis.
let end_range = loop {
let (token, range) = tokens.next()?;

// If we find a comma or a yield at depth 1 or 2, it's a tuple or coroutine.
if depth == 1 && matches!(tok, Tok::Comma | Tok::Yield) {
if depth == 1 && matches!(token, TokenKind::Comma | TokenKind::Yield) {
return None;
} else if matches!(tok, Tok::Lpar | Tok::Lbrace | Tok::Lsqb) {
} else if matches!(token, TokenKind::Lpar | TokenKind::Lbrace | TokenKind::Lsqb) {
depth = depth.saturating_add(1);
} else if matches!(tok, Tok::Rpar | Tok::Rbrace | Tok::Rsqb) {
} else if matches!(token, TokenKind::Rpar | TokenKind::Rbrace | TokenKind::Rsqb) {
depth = depth.saturating_sub(1);
}
}

// Store the location of the extraneous closing parenthesis.
let end = i;
if depth == 0 {
break range;
}

// Verify that we're not in an empty tuple.
if (start + 1..i).all(|i| {
matches!(
tokens[i],
Ok((Tok::Comment(..) | Tok::NonLogicalNewline, _))
)
}) {
if !matches!(token, TokenKind::Comment | TokenKind::NonLogicalNewline) {
empty_tuple = false;
}
};

if empty_tuple {
return None;
}

// Find the next non-coding token.
i += 1;
loop {
if i >= tokens.len() {
return None;
}
let Ok((tok, _)) = &tokens[i] else {
return None;
};
match tok {
Tok::Comment(..) | Tok::NonLogicalNewline => {
i += 1;
}
let token = loop {
let (token, _) = tokens.next()?;

match token {
TokenKind::Comment | TokenKind::NonLogicalNewline => continue,
_ => {
break;
break token;
}
}
}

if i >= tokens.len() {
return None;
}
let Ok((tok, _)) = &tokens[i] else {
return None;
};
if matches!(tok, Tok::Rpar) {
Some((start, end))

if matches!(token, TokenKind::Rpar) {
Some((start_range, end_range))
} else {
None
}
Expand All @@ -132,36 +108,28 @@ fn match_extraneous_parentheses(tokens: &[LexResult], mut i: usize) -> Option<(u
/// UP034
pub(crate) fn extraneous_parentheses(
diagnostics: &mut Vec<Diagnostic>,
tokens: &[LexResult],
mut tokens: TokenKindIter,
locator: &Locator,
) {
let mut i = 0;
while i < tokens.len() {
if matches!(tokens[i], Ok((Tok::Lpar, _))) {
if let Some((start, end)) = match_extraneous_parentheses(tokens, i) {
i = end + 1;
let Ok((_, start_range)) = &tokens[start] else {
return;
};
let Ok((.., end_range)) = &tokens[end] else {
return;
};
let mut diagnostic = Diagnostic::new(
ExtraneousParentheses,
TextRange::new(start_range.start(), end_range.end()),
);
let contents = locator.slice(TextRange::new(start_range.start(), end_range.end()));
diagnostic.set_fix(Fix::safe_edit(Edit::replacement(
contents[1..contents.len() - 1].to_string(),
start_range.start(),
end_range.end(),
)));
diagnostics.push(diagnostic);
} else {
i += 1;
}
} else {
i += 1;
while let Some((token, _)) = tokens.next() {
if !matches!(token, TokenKind::Lpar) {
continue;
}

let Some((start_range, end_range)) = match_extraneous_parentheses(&mut tokens) else {
continue;
};

let mut diagnostic = Diagnostic::new(
ExtraneousParentheses,
TextRange::new(start_range.start(), end_range.end()),
);
let contents = locator.slice(TextRange::new(start_range.start(), end_range.end()));
diagnostic.set_fix(Fix::safe_edit(Edit::replacement(
contents[1..contents.len() - 1].to_string(),
start_range.start(),
end_range.end(),
)));
diagnostics.push(diagnostic);
}
}

0 comments on commit 96f6288

Please sign in to comment.