diff --git a/src/black/brackets.py b/src/black/brackets.py index bb865a0d5b7..958fc19b33b 100644 --- a/src/black/brackets.py +++ b/src/black/brackets.py @@ -136,7 +136,7 @@ def delimiter_count_with_priority(self, priority: Priority = 0) -> int: return 0 priority = priority or self.max_delimiter_priority() - return sum(1 for p in self.delimiters.values() if p == priority) + return self.delimiters.values().count(priority) def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool: """In a for loop, or comprehension, the variables are often unpacks. @@ -317,7 +317,7 @@ def max_delimiter_priority_in_atom(node: LN) -> Priority: first = node.children[0] last = node.children[-1] - if not (first.type == token.LPAR and last.type == token.RPAR): + if first.type != token.LPAR or last.type != token.RPAR: return 0 bt = BracketTracker() diff --git a/src/black/comments.py b/src/black/comments.py index c7513c21ef5..2bec29003cf 100644 --- a/src/black/comments.py +++ b/src/black/comments.py @@ -210,8 +210,7 @@ def generate_ignored_nodes(leaf: Leaf, comment: ProtoComment) -> Iterator[LN]: ): prev_sibling = prev_sibling.prev_sibling siblings.insert(0, prev_sibling) - for sibling in siblings: - yield sibling + yield from siblings elif leaf.parent is not None: yield leaf.parent return @@ -245,17 +244,18 @@ def is_fmt_on(container: LN) -> bool: def contains_fmt_on_at_column(container: LN, column: int) -> bool: """Determine if children at a given column have formatting switched on.""" - for child in container.children: - if ( - isinstance(child, Node) - and first_leaf_column(child) == column - or isinstance(child, Leaf) - and child.column == column - ): - if is_fmt_on(child): - return True - - return False + return any( + ( + ( + isinstance(child, Node) + and first_leaf_column(child) == column + or isinstance(child, Leaf) + and child.column == column + ) + ) + and is_fmt_on(child) + for child in container.children + ) def contains_pragma_comment(comment_list: List[Leaf]) -> bool: @@ -265,8 +265,7 @@ def contains_pragma_comment(comment_list: List[Leaf]) -> bool: of the more common static analysis tools for python (e.g. mypy, flake8, pylint). """ - for comment in comment_list: - if comment.value.startswith(("# type:", "# noqa", "# pylint:")): - return True - - return False + return any( + comment.value.startswith(("# type:", "# noqa", "# pylint:")) + for comment in comment_list + ) diff --git a/src/black/linegen.py b/src/black/linegen.py index 76b553a959a..27e7470cee5 100644 --- a/src/black/linegen.py +++ b/src/black/linegen.py @@ -423,10 +423,9 @@ def left_hand_split(line: Line, _features: Collection[Feature] = ()) -> Iterator ): current_leaves = tail_leaves if body_leaves else head_leaves current_leaves.append(leaf) - if current_leaves is head_leaves: - if leaf.type in OPENING_BRACKETS: - matching_bracket = leaf - current_leaves = body_leaves + if current_leaves is head_leaves and leaf.type in OPENING_BRACKETS: + matching_bracket = leaf + current_leaves = body_leaves if not matching_bracket: raise CannotSplit("No brackets found") @@ -460,15 +459,17 @@ def right_hand_split( opening_bracket: Optional[Leaf] = None closing_bracket: Optional[Leaf] = None for leaf in reversed(line.leaves): - if current_leaves is body_leaves: - if leaf is opening_bracket: - current_leaves = head_leaves if body_leaves else tail_leaves + if current_leaves is body_leaves and leaf is opening_bracket: + current_leaves = head_leaves if body_leaves else tail_leaves current_leaves.append(leaf) - if current_leaves is tail_leaves: - if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit: - opening_bracket = leaf.opening_bracket - closing_bracket = leaf - current_leaves = body_leaves + if ( + current_leaves is tail_leaves + and leaf.type in CLOSING_BRACKETS + and id(leaf) not in omit + ): + opening_bracket = leaf.opening_bracket + closing_bracket = leaf + current_leaves = body_leaves if not (opening_bracket and closing_bracket and head_leaves): # If there is no opening or closing_bracket that means the split failed and # all content is in the tail. Otherwise, if `head_leaves` are empty, it means @@ -573,7 +574,7 @@ def bracket_split_build_line( no_commas = ( original.is_def and opening_bracket.value == "(" - and not any(leaf.type == token.COMMA for leaf in leaves) + and all(leaf.type != token.COMMA for leaf in leaves) ) if original.is_import or no_commas: @@ -812,35 +813,40 @@ def maybe_make_parens_invisible_in_atom(node: LN, parent: LN) -> bool: ): return False - if is_walrus_assignment(node): - if parent.type in [ - syms.annassign, - syms.expr_stmt, - syms.assert_stmt, - syms.return_stmt, - # these ones aren't useful to end users, but they do please fuzzers - syms.for_stmt, - syms.del_stmt, - ]: - return False + if is_walrus_assignment(node) and parent.type in [ + syms.annassign, + syms.expr_stmt, + syms.assert_stmt, + syms.return_stmt, + # these ones aren't useful to end users, but they do please fuzzers + syms.for_stmt, + syms.del_stmt, + ]: + return False first = node.children[0] last = node.children[-1] if first.type == token.LPAR and last.type == token.RPAR: - middle = node.children[1] - # make parentheses invisible - first.value = "" # type: ignore - last.value = "" # type: ignore - maybe_make_parens_invisible_in_atom(middle, parent=parent) + return _extracted_from_maybe_make_parens_invisible_in_atom_28( + node, first, last, parent + ) - if is_atom_with_invisible_parens(middle): - # Strip the invisible parens from `middle` by replacing - # it with the child in-between the invisible parens - middle.replace(middle.children[1]) + return True - return False - return True +def _extracted_from_maybe_make_parens_invisible_in_atom_28(node, first, last, parent): + middle = node.children[1] + # make parentheses invisible + first.value = "" # type: ignore + last.value = "" # type: ignore + maybe_make_parens_invisible_in_atom(middle, parent=parent) + + if is_atom_with_invisible_parens(middle): + # Strip the invisible parens from `middle` by replacing + # it with the child in-between the invisible parens + middle.replace(middle.children[1]) + + return False def should_split_line(line: Line, opening_bracket: Leaf) -> bool: diff --git a/src/black/lines.py b/src/black/lines.py index 63225c0e6d3..74c7ea461c1 100644 --- a/src/black/lines.py +++ b/src/black/lines.py @@ -171,11 +171,10 @@ def is_triple_quoted_string(self) -> bool: def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool: """If so, needs to be split before emitting.""" - for leaf in self.leaves: - if leaf.type == STANDALONE_COMMENT and leaf.bracket_depth <= depth_limit: - return True - - return False + return any( + leaf.type == STANDALONE_COMMENT and leaf.bracket_depth <= depth_limit + for leaf in self.leaves + ) def contains_uncollapsable_type_comments(self) -> bool: ignored_ids = set() @@ -202,12 +201,14 @@ def contains_uncollapsable_type_comments(self) -> bool: comment_seen = False for leaf_id, comments in self.comments.items(): for comment in comments: - if is_type_comment(comment): - if comment_seen or ( + if is_type_comment(comment) and ( + comment_seen + or ( not is_type_comment(comment, " ignore") and leaf_id not in ignored_ids - ): - return True + ) + ): + return True comment_seen = True @@ -594,7 +595,7 @@ def can_be_split(line: Line) -> bool: elif leaf.type == token.DOT: dot_count += 1 elif leaf.type == token.NAME: - if not (next.type == token.DOT or next.type in OPENING_BRACKETS): + if next.type != token.DOT and next.type not in OPENING_BRACKETS: return False elif leaf.type not in CLOSING_BRACKETS: @@ -637,13 +638,12 @@ def can_omit_invisible_parens( # a bracket. first = line.leaves[0] second = line.leaves[1] - if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS: - if _can_omit_opening_paren(line, first=first, line_length=line_length): - return True - - # Note: we are not returning False here because a line might have *both* - # a leading opening bracket and a trailing closing bracket. If the - # opening bracket doesn't match our rule, maybe the closing will. + if ( + first.type in OPENING_BRACKETS + and second.type not in CLOSING_BRACKETS + and _can_omit_opening_paren(line, first=first, line_length=line_length) + ): + return True penultimate = line.leaves[-2] last = line.leaves[-1] diff --git a/src/black/parsing.py b/src/black/parsing.py index 0b8d984cedd..4eb40eddab3 100644 --- a/src/black/parsing.py +++ b/src/black/parsing.py @@ -102,8 +102,7 @@ def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) - def lib2to3_unparse(node: Node) -> str: """Given a lib2to3 node, return its string representation.""" - code = str(node) - return code + return str(node) def parse_single_version( diff --git a/src/black/trans.py b/src/black/trans.py index 023dcd3618a..f25b765055f 100644 --- a/src/black/trans.py +++ b/src/black/trans.py @@ -134,7 +134,7 @@ def __call__(self, line: Line, _features: Collection[Feature]) -> Iterator[Line] """ # Optimization to avoid calling `self.do_match(...)` when the line does # not contain any string. - if not any(leaf.type == token.STRING for leaf in line.leaves): + if all(leaf.type != token.STRING for leaf in line.leaves): raise CannotTransform("There are no strings in this line.") match_result = self.do_match(line) diff --git a/src/blib2to3/pgen2/conv.py b/src/blib2to3/pgen2/conv.py index 78165217a1b..3e870361746 100644 --- a/src/blib2to3/pgen2/conv.py +++ b/src/blib2to3/pgen2/conv.py @@ -67,12 +67,10 @@ def parse_graminit_h(self, filename): return False self.symbol2number = {} self.number2symbol = {} - lineno = 0 - for line in f: - lineno += 1 + for lineno, line in enumerate(f, start=1): mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line) if not mo and line.strip(): - print("%s(%s): can't parse %s" % (filename, lineno, line.strip())) + print("%s(%s): can't parse %s" % (filename, lineno+1, line.strip())) else: symbol, number = mo.groups() number = int(number) @@ -200,16 +198,13 @@ def parse_graminit_c(self, filename): mo = re.match(r"static label labels\[(\d+)\] = {$", line) assert mo, (lineno, line) nlabels = int(mo.group(1)) - for i in range(nlabels): + for _ in range(nlabels): lineno, line = lineno + 1, next(f) mo = re.match(r'\s+{(\d+), (0|"\w+")},$', line) assert mo, (lineno, line) x, y = mo.groups() x = int(x) - if y == "0": - y = None - else: - y = eval(y) + y = None if y == "0" else eval(y) labels.append((x, y)) lineno, line = lineno + 1, next(f) assert line == "};\n", (lineno, line) diff --git a/src/blib2to3/pgen2/parse.py b/src/blib2to3/pgen2/parse.py index 47c8f02b4f5..04f9d25a266 100644 --- a/src/blib2to3/pgen2/parse.py +++ b/src/blib2to3/pgen2/parse.py @@ -175,16 +175,16 @@ def addtoken(self, type: int, value: Optional[Text], context: Context) -> bool: self.push(t, self.grammar.dfas[t], newstate, context) break # To continue the outer while loop else: - if (0, state) in arcs: - # An accepting state, pop it and try something else - self.pop() - if not self.stack: - # Done parsing, but another token is input - raise ParseError("too much input", type, value, context) - else: + if (0, state) not in arcs: # No success finding a transition raise ParseError("bad input", type, value, context) + # An accepting state, pop it and try something else + self.pop() + if not self.stack: + # Done parsing, but another token is input + raise ParseError("too much input", type, value, context) + def classify(self, type: int, value: Optional[Text], context: Context) -> int: """Turn a token into a label. (Internal)""" if type == token.NAME: diff --git a/src/blib2to3/pgen2/pgen.py b/src/blib2to3/pgen2/pgen.py index 564ebbd1184..92159da4cb8 100644 --- a/src/blib2to3/pgen2/pgen.py +++ b/src/blib2to3/pgen2/pgen.py @@ -95,10 +95,9 @@ def make_label(self, c: PgenGrammar, label: Text) -> int: # A symbol name (a non-terminal) if label in c.symbol2label: return c.symbol2label[label] - else: - c.labels.append((c.symbol2number[label], None)) - c.symbol2label[label] = ilabel - return ilabel + c.labels.append((c.symbol2number[label], None)) + c.symbol2label[label] = ilabel + return ilabel else: # A named token (NAME, NUMBER, STRING) itoken = getattr(token, label, None) @@ -107,9 +106,7 @@ def make_label(self, c: PgenGrammar, label: Text) -> int: if itoken in c.tokens: return c.tokens[itoken] else: - c.labels.append((itoken, None)) - c.tokens[itoken] = ilabel - return ilabel + return self._extracted_from_make_label_22(c, itoken, ilabel) else: # Either a keyword or an operator assert label[0] in ('"', "'"), label @@ -118,19 +115,21 @@ def make_label(self, c: PgenGrammar, label: Text) -> int: # A keyword if value in c.keywords: return c.keywords[value] - else: - c.labels.append((token.NAME, value)) - c.keywords[value] = ilabel - return ilabel + c.labels.append((token.NAME, value)) + c.keywords[value] = ilabel + return ilabel else: # An operator (any non-numeric token) itoken = grammar.opmap[value] # Fails if unknown token if itoken in c.tokens: return c.tokens[itoken] else: - c.labels.append((itoken, None)) - c.tokens[itoken] = ilabel - return ilabel + return self._extracted_from_make_label_22(c, itoken, ilabel) + + def _extracted_from_make_label_22(self, c, itoken, ilabel): + c.labels.append((itoken, None)) + c.tokens[itoken] = ilabel + return ilabel def addfirstsets(self) -> None: names = list(self.dfas.keys()) @@ -285,17 +284,16 @@ def parse_rhs(self) -> Tuple["NFAState", "NFAState"]: a, z = self.parse_alt() if self.value != "|": return a, z - else: - aa = NFAState() - zz = NFAState() + aa = NFAState() + zz = NFAState() + aa.addarc(a) + z.addarc(zz) + while self.value == "|": + self.gettoken() + a, z = self.parse_alt() aa.addarc(a) z.addarc(zz) - while self.value == "|": - self.gettoken() - a, z = self.parse_alt() - aa.addarc(a) - z.addarc(zz) - return aa, zz + return aa, zz def parse_alt(self) -> Tuple["NFAState", "NFAState"]: # ALT: ITEM+ @@ -415,10 +413,7 @@ def __eq__(self, other: Any) -> bool: # would invoke this method recursively, with cycles... if len(self.arcs) != len(other.arcs): return False - for label, next in self.arcs.items(): - if next is not other.arcs.get(label): - return False - return True + return all(next is other.arcs.get(label) for label, next in self.arcs.items()) __hash__: Any = None # For Py3 compatibility. diff --git a/src/blib2to3/pytree.py b/src/blib2to3/pytree.py index 7843467e012..a9f25780341 100644 --- a/src/blib2to3/pytree.py +++ b/src/blib2to3/pytree.py @@ -233,8 +233,7 @@ def get_suffix(self) -> Text: next_sib = self.next_sibling if next_sib is None: return "" - prefix = next_sib.prefix - return prefix + return next_sib.prefix class Node(Base): @@ -483,16 +482,16 @@ def convert(gr: Grammar, raw_node: RawNode) -> NL: strictly bottom-up. """ type, value, context, children = raw_node - if children or type in gr.number2symbol: - # If there's exactly one child, return that child instead of - # creating a new node. - assert children is not None - if len(children) == 1: - return children[0] - return Node(type, children, context=context) - else: + if not children and type not in gr.number2symbol: return Leaf(type, value or "", context=context) + # If there's exactly one child, return that child instead of + # creating a new node. + assert children is not None + if len(children) == 1: + return children[0] + return Node(type, children, context=context) + _Results = Dict[Text, NL] @@ -697,10 +696,10 @@ def _submatch(self, node, results=None) -> bool: return False if len(self.content) != len(node.children): return False - for subpattern, child in zip(self.content, node.children): - if not subpattern.match(child, results): - return False - return True + return all( + subpattern.match(child, results) + for subpattern, child in zip(self.content, node.children) + ) class WildcardPattern(BasePattern):