diff --git a/CHANGES.md b/CHANGES.md index 7fad9f3b..f19d58fc 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -13,6 +13,8 @@ ``` robotidy -c NormalizeAssignments:equal_sign_type=equal_sign -c NormalizeAssignments:equal_sign_type_variables=remove ``` +- New `OrderTags` (non default) transformer. It orders tags in lexicographic order ([#205](https://github.com/MarketSquare/robotframework-tidy/issues/205)) +- New `NormalizeTags` (non default) transformer. It normalizes tag name case and removes duplicates ([#212](https://github.com/MarketSquare/robotframework-tidy/issues/212)) ### Features - It is now possible to provide source paths in configuration file ([#154](https://github.com/MarketSquare/robotframework-tidy/issues/154)) diff --git a/README.md b/README.md index eb067c12..0a95ea5d 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ Installation You can install Robotidy simply by running: ``` -pip install robotframework-tidy +pip install -U robotframework-tidy ``` Usage @@ -56,6 +56,9 @@ All command line options can be displayed in help message by executing: robotidy --help ``` +See [documentation](https://robotidy.readthedocs.io/en/latest/configuration/index.html) for information how to configure +robotidy. + Example ------- Ugly code before transforming with robotidy: diff --git a/docs/source/transformers/NormalizeTags.rst b/docs/source/transformers/NormalizeTags.rst index 14cf9556..2cdb30c5 100644 --- a/docs/source/transformers/NormalizeTags.rst +++ b/docs/source/transformers/NormalizeTags.rst @@ -2,6 +2,7 @@ NormalizeTags ================================ +Normalize tag names by normalizing case and removing duplicates. NormalizeTags is not included in default transformers, that's why you need to call it with ``--transform`` explicitly:: @@ -12,13 +13,13 @@ Or configure `enable` parameter:: robotidy --configure NormalizeTags:enabled=True -Supported cases: lowercase (default), uppercase, titlecase. +Supported cases: lowercase (default), uppercase, title case. You can configure case using `case` parameter:: robotidy --transform NormalizeTags:case=uppercase -You can remove duplicates without normalizing case by setting normalize_case parameter to False:: +You can remove duplicates without normalizing case by setting `normalize_case` parameter to False:: robotidy --transform NormalizeTags:normalize_case=False diff --git a/docs/source/transformers/OrderTags.rst b/docs/source/transformers/OrderTags.rst index 50892a93..fa061343 100644 --- a/docs/source/transformers/OrderTags.rst +++ b/docs/source/transformers/OrderTags.rst @@ -2,6 +2,7 @@ OrderTags ================================ +Order tags in case-insensitive way in ascending order. OrderTags is not included in default transformers, that's why you need to call it with ``--transform`` explicitly:: @@ -11,10 +12,8 @@ Or configure `enable` parameter:: robotidy --configure OrderTags:enabled=True -By default tags are ordered in case-insensitive way in ascending order. This relates to tags in Test Cases, Keywords, Force Tags and Default Tags. - .. tabs:: .. code-tab:: robotframework Before @@ -51,7 +50,7 @@ This relates to tags in Test Cases, Keywords, Force Tags and Default Tags. [Tags] aa Ab ba Bb Ca Cb No Operation -Using the same example with reverse=True param we will get tags in descending order:: +Using the same example with `reverse=True` param we will get tags in descending order:: robotidy --transform OrderTags:reverse=True src @@ -121,4 +120,4 @@ Force Tags and Default Tags ordering can be disabled like this:: *** Test Cases *** Tags Upper Lower [Tags] aa Ab ba Bb Ca Cb - My Keyword \ No newline at end of file + My Keyword diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..e34796ec --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,2 @@ +[tool.black] +line-length = 120 \ No newline at end of file diff --git a/robotidy/__init__.py b/robotidy/__init__.py index 62bb6e02..a77e73b5 100644 --- a/robotidy/__init__.py +++ b/robotidy/__init__.py @@ -1,4 +1,4 @@ from robotidy.version import __version__ -__all__ = ['__version__'] +__all__ = ["__version__"] diff --git a/robotidy/__main__.py b/robotidy/__main__.py index 66c2159e..3e50801f 100644 --- a/robotidy/__main__.py +++ b/robotidy/__main__.py @@ -1,5 +1,5 @@ from robotidy.cli import cli -if __name__ == '__main__': +if __name__ == "__main__": cli() diff --git a/robotidy/api.py b/robotidy/api.py index 9e00535c..43c307e0 100644 --- a/robotidy/api.py +++ b/robotidy/api.py @@ -12,22 +12,19 @@ class RobotidyAPI(Robotidy): def __init__(self, src: str, output: Optional[str], **kwargs): config = find_and_read_config((src,)) - config = { - k: str(v) if not isinstance(v, (list, dict)) else v - for k, v in config.items() - } + config = {k: str(v) if not isinstance(v, (list, dict)) else v for k, v in config.items()} converter = TransformType() - transformers = [converter.convert(tr, None, None) for tr in config.get('transform', ())] - configurations = [converter.convert(c, None, None) for c in config.get('configure', ())] + transformers = [converter.convert(tr, None, None) for tr in config.get("transform", ())] + configurations = [converter.convert(c, None, None) for c in config.get("configure", ())] formatting_config = GlobalFormattingConfig( - space_count=kwargs.get('spacecount', None) or int(config.get('spacecount', 4)), - separator=kwargs.get('separator', None) or config.get('separator', 'space'), - line_sep=config.get('lineseparator', 'native'), - start_line=kwargs.get('startline', None) or int(config['startline']) if 'startline' in config else None, - end_line=kwargs.get('endline', None) or int(config['endline']) if 'endline' in config else None + space_count=kwargs.get("spacecount", None) or int(config.get("spacecount", 4)), + separator=kwargs.get("separator", None) or config.get("separator", "space"), + line_sep=config.get("lineseparator", "native"), + start_line=kwargs.get("startline", None) or int(config["startline"]) if "startline" in config else None, + end_line=kwargs.get("endline", None) or int(config["endline"]) if "endline" in config else None, ) - exclude = config.get('exclude', None) - extend_exclude = config.get('extend_exclude', None) + exclude = config.get("exclude", None) + extend_exclude = config.get("extend_exclude", None) exclude = validate_regex(exclude if exclude is not None else DEFAULT_EXCLUDES) extend_exclude = validate_regex(extend_exclude) super().__init__( @@ -42,7 +39,7 @@ def __init__(self, src: str, output: Optional[str], **kwargs): verbose=False, check=False, output=output, - force_order=False + force_order=False, ) diff --git a/robotidy/app.py b/robotidy/app.py index 48a2c75f..3122e58c 100644 --- a/robotidy/app.py +++ b/robotidy/app.py @@ -14,25 +14,26 @@ StatementLinesCollector, decorate_diff_with_color, GlobalFormattingConfig, - ModelWriter + ModelWriter, ) class Robotidy: - def __init__(self, - transformers: List[Tuple[str, List]], - transformers_config: List[Tuple[str, List]], - src: Tuple[str, ...], - exclude: Pattern, - extend_exclude: Pattern, - overwrite: bool, - show_diff: bool, - formatting_config: GlobalFormattingConfig, - verbose: bool, - check: bool, - output: Optional[Path], - force_order: bool - ): + def __init__( + self, + transformers: List[Tuple[str, List]], + transformers_config: List[Tuple[str, List]], + src: Tuple[str, ...], + exclude: Pattern, + extend_exclude: Pattern, + overwrite: bool, + show_diff: bool, + formatting_config: GlobalFormattingConfig, + verbose: bool, + check: bool, + output: Optional[Path], + force_order: bool, + ): self.sources = get_paths(src, exclude, extend_exclude) self.overwrite = overwrite self.show_diff = show_diff @@ -44,20 +45,20 @@ def __init__(self, self.transformers = load_transformers(transformers, transformers_config, force_order=force_order) for transformer in self.transformers: # inject global settings TODO: handle it better - setattr(transformer, 'formatting_config', self.formatting_config) + setattr(transformer, "formatting_config", self.formatting_config) def transform_files(self): changed_files = 0 for source in self.sources: try: stdin = False - if str(source) == '-': + if str(source) == "-": stdin = True if self.verbose: - click.echo('Loading file from stdin') + click.echo("Loading file from stdin") source = self.load_from_stdin() elif self.verbose: - click.echo(f'Transforming {source} file') + click.echo(f"Transforming {source} file") model = get_model(source) diff, old_model, new_model = self.transform(model) if diff: @@ -97,16 +98,21 @@ def save_model(self, model): output = self.output or model.source ModelWriter(output=output, newline=self.formatting_config.line_sep).write(model) - def output_diff(self, path: str, old_model: StatementLinesCollector, new_model: StatementLinesCollector): + def output_diff( + self, + path: str, + old_model: StatementLinesCollector, + new_model: StatementLinesCollector, + ): if not self.show_diff: return - old = [l + '\n' for l in old_model.text.splitlines()] - new = [l + '\n' for l in new_model.text.splitlines()] - lines = list(unified_diff(old, new, fromfile=f'{path}\tbefore', tofile=f'{path}\tafter')) + old = [l + "\n" for l in old_model.text.splitlines()] + new = [l + "\n" for l in new_model.text.splitlines()] + lines = list(unified_diff(old, new, fromfile=f"{path}\tbefore", tofile=f"{path}\tafter")) if not lines: return colorized_output = decorate_diff_with_color(lines) - click.echo(colorized_output.encode('ascii', 'ignore').decode('ascii'), color=True) + click.echo(colorized_output.encode("ascii", "ignore").decode("ascii"), color=True) @staticmethod def convert_configure(configure: List[Tuple[str, List]]) -> Dict[str, List]: diff --git a/robotidy/cli.py b/robotidy/cli.py index e9a19375..aabf2783 100644 --- a/robotidy/cli.py +++ b/robotidy/cli.py @@ -1,13 +1,5 @@ from pathlib import Path -from typing import ( - Tuple, - Dict, - List, - Iterable, - Optional, - Any, - Pattern -) +from typing import Tuple, Dict, List, Iterable, Optional, Any, Pattern import click import re @@ -19,12 +11,12 @@ GlobalFormattingConfig, split_args_from_name_or_path, remove_rst_formatting, - RecommendationFinder + RecommendationFinder, ) from robotidy.version import __version__ -CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) +CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) HELP_MSG = f""" Version: {__version__} @@ -59,13 +51,13 @@ class RawHelp(click.Command): def format_help_text(self, ctx, formatter): if self.help: formatter.write_paragraph() - for line in self.help.split('\n'): + for line in self.help.split("\n"): formatter.write_text(line) def format_epilog(self, ctx, formatter): if self.epilog: formatter.write_paragraph() - for line in self.epilog.split('\n'): + for line in self.epilog.split("\n"): formatter.write_text(line) @@ -73,20 +65,22 @@ class TransformType(click.ParamType): name = "transform" def convert(self, value, param, ctx): - name = '' + name = "" try: - name, args = split_args_from_name_or_path(value.replace(' ', '')) + name, args = split_args_from_name_or_path(value.replace(" ", "")) except ValueError: - exc = f'Invalid {name} transformer configuration. ' \ - f'Parameters should be provided in format name=value, delimited by :' + exc = ( + f"Invalid {name} transformer configuration. " + f"Parameters should be provided in format name=value, delimited by :" + ) raise ValueError(exc) return name, args def parse_opt(opt): - while opt and opt[0] == '-': + while opt and opt[0] == "-": opt = opt[1:] - return opt.replace('-', '_') + return opt.replace("-", "_") def validate_config_options(params, config): @@ -105,18 +99,15 @@ def read_config(ctx: click.Context, param: click.Parameter, value: Optional[str] if value: config = read_pyproject_config(value) else: - config = find_and_read_config(ctx.params['src'] or (str(Path('.').resolve()),)) + config = find_and_read_config(ctx.params["src"] or (str(Path(".").resolve()),)) if not config: return # Sanitize the values to be Click friendly. For more information please see: # https://github.com/psf/black/issues/1458 # https://github.com/pallets/click/issues/1567 - config = { - k: str(v) if not isinstance(v, (list, dict)) else v - for k, v in config.items() - } - if 'src' in config: - config['src'] = tuple(config['src']) + config = {k: str(v) if not isinstance(v, (list, dict)) else v for k, v in config.items()} + if "src" in config: + config["src"] = tuple(config["src"]) validate_config_options(ctx.command.params, config) default_map: Dict[str, Any] = {} if ctx.default_map: @@ -143,7 +134,7 @@ def validate_regex(value: Optional[str]) -> Optional[Pattern]: def print_description(name: str): transformers = load_transformers(None, {}, allow_disabled=True) transformer_by_names = {transformer.__class__.__name__: transformer for transformer in transformers} - if name == 'all': + if name == "all": for tr_name, transformer in transformer_by_names.items(): click.echo(f"Transformer {tr_name}:") click.echo(remove_rst_formatting(transformer.__doc__)) @@ -160,13 +151,15 @@ def print_description(name: str): def print_transformers_list(): transformers = load_transformers(None, {}, allow_disabled=True) - click.echo('To see detailed docs run --desc or --desc all. ' - 'Transformers with (disabled) tag \nare executed only when selected explicitly with --transform or ' - 'configured with param `enabled=True`.\n' - 'Available transformers:\n') + click.echo( + "To see detailed docs run --desc or --desc all. " + "Transformers with (disabled) tag \nare executed only when selected explicitly with --transform or " + "configured with param `enabled=True`.\n" + "Available transformers:\n" + ) transformer_names = [] for transformer in transformers: - disabled = ' (disabled)' if not getattr(transformer, 'ENABLED', True) else '' + disabled = " (disabled)" if not getattr(transformer, "ENABLED", True) else "" transformer_names.append(transformer.__class__.__name__ + disabled) for name in sorted(transformer_names): click.echo(name) @@ -174,29 +167,27 @@ def print_transformers_list(): @click.command(cls=RawHelp, help=HELP_MSG, epilog=EPILOG, context_settings=CONTEXT_SETTINGS) @click.option( - '--transform', - '-t', + "--transform", + "-t", type=TransformType(), multiple=True, - metavar='TRANSFORMER_NAME', - help="Transform files from [PATH(S)] with given transformer" + metavar="TRANSFORMER_NAME", + help="Transform files from [PATH(S)] with given transformer", ) @click.option( - '--configure', - '-c', + "--configure", + "-c", type=TransformType(), multiple=True, - metavar='TRANSFORMER_NAME:PARAM=VALUE', - help='Configure transformers' + metavar="TRANSFORMER_NAME:PARAM=VALUE", + help="Configure transformers", ) @click.argument( "src", nargs=-1, - type=click.Path( - exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True - ), + type=click.Path(exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True), is_eager=True, - metavar='[PATH(S)]' + metavar="[PATH(S)]", ) @click.option( "--exclude", @@ -234,145 +225,130 @@ def print_transformers_list(): help="Read configuration from FILE path.", ) @click.option( - '--overwrite/--no-overwrite', + "--overwrite/--no-overwrite", default=True, - help='Write changes back to file', - show_default=True + help="Write changes back to file", + show_default=True, ) @click.option( - '--diff', + "--diff", is_flag=True, - help='Output diff of each processed file.', - show_default=True + help="Output diff of each processed file.", + show_default=True, ) @click.option( - '--check', + "--check", is_flag=True, help="Don't overwrite files and just return status. Return code 0 means nothing would change. " - "Return code 1 means that at least 1 file would change. Any internal error will overwrite this status.", - show_default=True + "Return code 1 means that at least 1 file would change. Any internal error will overwrite this status.", + show_default=True, ) @click.option( - '-s', - '--spacecount', + "-s", + "--spacecount", type=click.types.INT, default=4, - help='The number of spaces between cells in the plain text format.\n', - show_default=True + help="The number of spaces between cells in the plain text format.\n", + show_default=True, ) @click.option( - '-ls', - '--lineseparator', - type=click.types.Choice(['native', 'windows', 'unix']), - default='native', + "-ls", + "--lineseparator", + type=click.types.Choice(["native", "windows", "unix"]), + default="native", help="Line separator to use in outputs.\n" - "native: use operating system's native line separators\n" - "windows: use Windows line separators (CRLF)\n" - "unix: use Unix line separators (LF)", - show_default=True + "native: use operating system's native line separators\n" + "windows: use Windows line separators (CRLF)\n" + "unix: use Unix line separators (LF)", + show_default=True, ) @click.option( - '--separator', - type=click.types.Choice(['space', 'tab']), - default='space', + "--separator", + type=click.types.Choice(["space", "tab"]), + default="space", help="Token separator to use in outputs.\n" - "space: use --spacecount spaces to separate tokens\n" - "tab: use a single tabulation to separate tokens\n", - show_default=True + "space: use --spacecount spaces to separate tokens\n" + "tab: use a single tabulation to separate tokens\n", + show_default=True, ) @click.option( - '-sl', - '--startline', + "-sl", + "--startline", default=None, type=int, help="Limit robotidy only to selected area. If --endline is not provided, format text only at --startline. " - "Line numbers start from 1.", - show_default=True + "Line numbers start from 1.", + show_default=True, ) @click.option( - '-el', - '--endline', + "-el", + "--endline", default=None, type=int, - help="Limit robotidy only to selected area. " - "Line numbers start from 1.", - show_default=True + help="Limit robotidy only to selected area. " "Line numbers start from 1.", + show_default=True, ) @click.option( - '--list', - '-l', + "--list", + "-l", is_eager=True, is_flag=True, - help='List available transformers and exit.' + help="List available transformers and exit.", ) @click.option( - '--desc', - '-d', + "--desc", + "-d", default=None, - metavar='TRANSFORMER_NAME', - help='Show documentation for selected transformer.' + metavar="TRANSFORMER_NAME", + help="Show documentation for selected transformer.", ) @click.option( - '--output', - '-o', - type=click.Path( - file_okay=True, dir_okay=False, writable=True, allow_dash=False - ), + "--output", + "-o", + type=click.Path(file_okay=True, dir_okay=False, writable=True, allow_dash=False), default=None, - metavar='PATH', - help='Path to output file where source file will be saved' + metavar="PATH", + help="Path to output file where source file will be saved", ) +@click.option("-v", "--verbose", is_flag=True, help="More verbose output", show_default=True) +@click.option("--list-transformers", is_flag=True) # deprecated +@click.option("--describe-transformer", default=None) # deprecated @click.option( - '-v', - '--verbose', + "--force-order", is_flag=True, - help="More verbose output", - show_default=True + help="Transform files using transformers in order provided in cli", ) -@click.option( # deprecated - '--list-transformers', - is_flag=True -) -@click.option( # deprecated - '--describe-transformer', - default=None -) -@click.option( - '--force-order', - is_flag=True, - help='Transform files using transformers in order provided in cli' -) -@click.version_option(version=__version__, prog_name='robotidy') +@click.version_option(version=__version__, prog_name="robotidy") @click.pass_context def cli( - ctx: click.Context, - transform: List[Tuple[str, List]], - configure: List[Tuple[str, List]], - src: Tuple[str, ...], - exclude: Optional[Pattern], - extend_exclude: Optional[Pattern], - overwrite: bool, - diff: bool, - check: bool, - spacecount: int, - lineseparator: str, - verbose: bool, - config: Optional[str], - separator: Optional[str], - startline: Optional[int], - endline: Optional[int], - list: bool, - desc: Optional[str], - output: Optional[Path], - list_transformers: bool, - describe_transformer: Optional[str], - force_order: bool + ctx: click.Context, + transform: List[Tuple[str, List]], + configure: List[Tuple[str, List]], + src: Tuple[str, ...], + exclude: Optional[Pattern], + extend_exclude: Optional[Pattern], + overwrite: bool, + diff: bool, + check: bool, + spacecount: int, + lineseparator: str, + verbose: bool, + config: Optional[str], + separator: Optional[str], + startline: Optional[int], + endline: Optional[int], + list: bool, + desc: Optional[str], + output: Optional[Path], + list_transformers: bool, + describe_transformer: Optional[str], + force_order: bool, ): if list_transformers: - print('--list-transformers is deprecated in 1.3.0. Use --list instead') + print("--list-transformers is deprecated in 1.3.0. Use --list instead") ctx.exit(0) if describe_transformer: - print('--describe-transformer is deprecated in 1.3.0. Use --desc NAME instead') + print("--describe-transformer is deprecated in 1.3.0. Use --desc NAME instead") ctx.exit(0) if list: print_transformers_list() @@ -382,7 +358,7 @@ def cli( ctx.exit(return_code) if not src: if ctx.default_map is not None: - src = ctx.default_map.get('src', None) + src = ctx.default_map.get("src", None) if not src: print("No source path provided. Run robotidy --help to see how to use robotidy") ctx.exit(1) @@ -391,14 +367,14 @@ def cli( exclude = re.compile(DEFAULT_EXCLUDES) if config and verbose: - click.echo(f'Loaded {config} configuration file') + click.echo(f"Loaded {config} configuration file") formatting_config = GlobalFormattingConfig( space_count=spacecount, line_sep=lineseparator, start_line=startline, separator=separator, - end_line=endline + end_line=endline, ) tidy = Robotidy( transformers=transform, @@ -412,7 +388,7 @@ def cli( verbose=verbose, check=check, output=output, - force_order=force_order + force_order=force_order, ) status = tidy.transform_files() ctx.exit(status) diff --git a/robotidy/decorators.py b/robotidy/decorators.py index 6bf1e189..a49836ff 100644 --- a/robotidy/decorators.py +++ b/robotidy/decorators.py @@ -11,16 +11,18 @@ def check_start_end_line(func): """ Do not transform node if it's not within passed start_line and end_line. """ + @functools.wraps(func) def wrapper(self, node, *args): if not node: return return_node_untouched(node) if not node_within_lines( - node.lineno, - node.end_lineno, - self.formatting_config.start_line, - self.formatting_config.end_line + node.lineno, + node.end_lineno, + self.formatting_config.start_line, + self.formatting_config.end_line, ): return return_node_untouched(node) return func(self, node, *args) + return wrapper diff --git a/robotidy/files.py b/robotidy/files.py index c5d83cf6..d64d00a5 100644 --- a/robotidy/files.py +++ b/robotidy/files.py @@ -8,7 +8,7 @@ DEFAULT_EXCLUDES = r"/(\.direnv|\.eggs|\.git|\.hg|\.nox|\.tox|\.venv|venv|\.svn)/" -INCLUDE_EXT = ('.robot', '.resource') +INCLUDE_EXT = (".robot", ".resource") @lru_cache() @@ -26,9 +26,7 @@ def find_project_root(srcs: Iterable[str]) -> Path: # A list of lists of parents for each 'src'. 'src' is included as a # "parent" of itself if it is a directory - src_parents = [ - list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs - ] + src_parents = [list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs] common_base = max( set.intersection(*(set(parents) for parents in src_parents)), @@ -50,10 +48,10 @@ def find_project_root(srcs: Iterable[str]) -> Path: def find_and_read_config(src_paths: Iterable[str]) -> Dict[str, Any]: project_root = find_project_root(src_paths) - config_path = project_root / 'robotidy.toml' + config_path = project_root / "robotidy.toml" if config_path.is_file(): return read_pyproject_config(str(config_path)) - pyproject_path = project_root / 'pyproject.toml' + pyproject_path = project_root / "pyproject.toml" if pyproject_path.is_file(): return read_pyproject_config(str(pyproject_path)) return {} @@ -64,9 +62,7 @@ def load_toml_file(path: str) -> Dict[str, Any]: config = toml.load(path) return config except (toml.TomlDecodeError, OSError) as e: - raise click.FileError( - filename=path, hint=f"Error reading configuration file: {e}" - ) + raise click.FileError(filename=path, hint=f"Error reading configuration file: {e}") def read_pyproject_config(path: str) -> Dict[str, Any]: @@ -74,7 +70,7 @@ def read_pyproject_config(path: str) -> Dict[str, Any]: config = config.get("tool", {}).get("robotidy", {}) if config: click.echo(f"Loaded configuration from {path}") - return {k.replace('--', '').replace('-', '_'): v for k, v in config.items()} + return {k.replace("--", "").replace("-", "_"): v for k, v in config.items()} @lru_cache() @@ -101,21 +97,26 @@ def get_paths(src: Tuple[str, ...], exclude: Pattern, extend_exclude: Optional[P gitignore = get_gitignore(root) sources = set() for s in src: - if s == '-': - sources.add('-') + if s == "-": + sources.add("-") continue path = Path(s).resolve() if path.is_file(): sources.add(path) elif path.is_dir(): sources.update(iterate_dir((path,), exclude, extend_exclude, gitignore)) - elif s == '-': + elif s == "-": sources.add(path) return sources -def iterate_dir(paths: Iterable[Path], exclude: Pattern, extend_exclude: Pattern, gitignore: Optional[PathSpec]) -> Iterator[Path]: +def iterate_dir( + paths: Iterable[Path], + exclude: Pattern, + extend_exclude: Pattern, + gitignore: Optional[PathSpec], +) -> Iterator[Path]: for path in paths: if gitignore is not None and gitignore.match_file(path): continue @@ -126,7 +127,7 @@ def iterate_dir(paths: Iterable[Path], exclude: Pattern, extend_exclude: Pattern path.iterdir(), exclude, extend_exclude, - gitignore + get_gitignore(path) if gitignore is not None else None + gitignore + get_gitignore(path) if gitignore is not None else None, ) elif path.is_file(): if path.suffix not in INCLUDE_EXT: diff --git a/robotidy/transformers/AddMissingEnd.py b/robotidy/transformers/AddMissingEnd.py index 202d8942..84258b70 100644 --- a/robotidy/transformers/AddMissingEnd.py +++ b/robotidy/transformers/AddMissingEnd.py @@ -20,6 +20,7 @@ class AddMissingEnd(ModelTransformer): Supports global formatting params: ``--startline`` and ``--endline``. """ + @check_start_end_line def visit_For(self, node): # noqa self.generic_visit(node) @@ -47,19 +48,19 @@ def visit_If(self, node): # noqa return (node, *outside) def fix_end(self, node): - """ Fix END (missing END, End -> END, END position should be the same as FOR etc). """ + """Fix END (missing END, End -> END, END position should be the same as FOR etc).""" if node.header.tokens[0].type == Token.SEPARATOR: indent = node.header.tokens[0] else: indent = Token(Token.SEPARATOR, self.formatting_config.separator) - node.end = End([indent, Token(Token.END, 'END'), Token(Token.EOL)]) + node.end = End([indent, Token(Token.END, "END"), Token(Token.EOL)]) @staticmethod def fix_header_name(node, header_name): node.header.data_tokens[0].value = header_name def collect_inside_statements(self, node): - """ Split statements from node for those that belong to it and outside nodes. + """Split statements from node for those that belong to it and outside nodes. In this example with missing END: FOR ${i} IN RANGE 10 @@ -81,7 +82,7 @@ def collect_inside_statements(self, node): @staticmethod def get_column(node): - if hasattr(node, 'header'): + if hasattr(node, "header"): return node.header.data_tokens[0].col_offset if isinstance(node, Comment): token = node.get_token(Token.COMMENT) diff --git a/robotidy/transformers/AlignSettingsSection.py b/robotidy/transformers/AlignSettingsSection.py index 76b15888..1675aab1 100644 --- a/robotidy/transformers/AlignSettingsSection.py +++ b/robotidy/transformers/AlignSettingsSection.py @@ -1,16 +1,13 @@ from collections import defaultdict -from robot.api.parsing import ( - ModelTransformer, - Token -) +from robot.api.parsing import ModelTransformer, Token from robot.parsing.model import Statement from robotidy.utils import ( node_outside_selection, round_to_four, tokens_by_lines, - left_align + left_align, ) @@ -61,7 +58,13 @@ class AlignSettingsSection(ModelTransformer): See https://robotidy.readthedocs.io/en/latest/transformers/AlignSettingsSection.html for more examples. """ - TOKENS_WITH_KEYWORDS = {Token.SUITE_SETUP, Token.SUITE_TEARDOWN, Token.TEST_SETUP, Token.TEST_TEARDOWN} + + TOKENS_WITH_KEYWORDS = { + Token.SUITE_SETUP, + Token.SUITE_TEARDOWN, + Token.TEST_SETUP, + Token.TEST_TEARDOWN, + } def __init__(self, up_to_column: int = 2, argument_indent: int = 4): self.up_to_column = up_to_column - 1 @@ -101,12 +104,17 @@ def align_rows(self, statements, look_up): if index < up_to: arg_indent = self.argument_indent if keyword_arg else 0 if keyword_arg and index != 0: - separator = max((look_up[index] - len(token.value) - arg_indent + 4), - self.formatting_config.space_count) * ' ' + separator = ( + max( + (look_up[index] - len(token.value) - arg_indent + 4), + self.formatting_config.space_count, + ) + * " " + ) else: - separator = (look_up[index] - len(token.value) + arg_indent + 4) * ' ' + separator = (look_up[index] - len(token.value) + arg_indent + 4) * " " else: - separator = self.formatting_config.space_count * ' ' + separator = self.formatting_config.space_count * " " aligned_statement.append(Token(Token.SEPARATOR, separator)) last_token = line[-2] # remove leading whitespace before token diff --git a/robotidy/transformers/AlignTestCases.py b/robotidy/transformers/AlignTestCases.py index ccc41f5c..10ab7762 100644 --- a/robotidy/transformers/AlignTestCases.py +++ b/robotidy/transformers/AlignTestCases.py @@ -8,14 +8,11 @@ End, IfHeader, ElseHeader, - ElseIfHeader + ElseIfHeader, ) from robotidy.decorators import check_start_end_line -from robotidy.utils import ( - round_to_four, - is_suite_templated -) +from robotidy.utils import round_to_four, is_suite_templated class AlignTestCases(ModelTransformer): @@ -46,6 +43,7 @@ class AlignTestCases(ModelTransformer): See https://robotidy.readthedocs.io/en/latest/transformers/AlignTestCases.html for more examples. """ + ENABLED = False def __init__(self, only_with_headers: bool = False): @@ -101,7 +99,10 @@ def visit_Statement(self, statement): # noqa self.name_line = statement.lineno elif statement.type == Token.TESTCASE_HEADER: self.align_header(statement) - elif not isinstance(statement, (Comment, EmptyLine, ForHeader, IfHeader, ElseHeader, ElseIfHeader, End)): + elif not isinstance( + statement, + (Comment, EmptyLine, ForHeader, IfHeader, ElseHeader, ElseIfHeader, End), + ): self.align_statement(statement) return statement @@ -109,7 +110,7 @@ def align_header(self, statement): tokens = [] for index, token in enumerate(statement.data_tokens[:-1]): tokens.append(token) - separator = (self.widths[index] - len(token.value) + 4) * ' ' + separator = (self.widths[index] - len(token.value) + 4) * " " tokens.append(Token(Token.SEPARATOR, separator)) tokens.append(statement.data_tokens[-1]) tokens.append(statement.tokens[-1]) # eol @@ -129,7 +130,7 @@ def align_statement(self, statement): if self.name_line == statement.lineno: exp_pos -= self.test_name_len self.test_name_len = 0 - tokens.append(Token(Token.SEPARATOR, (exp_pos - line_pos) * ' ')) + tokens.append(Token(Token.SEPARATOR, (exp_pos - line_pos) * " ")) tokens.append(token) line_pos += len(token.value) + exp_pos - line_pos tokens.append(line[-1]) diff --git a/robotidy/transformers/AlignVariablesSection.py b/robotidy/transformers/AlignVariablesSection.py index eacf9744..4d6101a0 100644 --- a/robotidy/transformers/AlignVariablesSection.py +++ b/robotidy/transformers/AlignVariablesSection.py @@ -1,16 +1,13 @@ from collections import defaultdict -from robot.api.parsing import ( - ModelTransformer, - Token -) +from robot.api.parsing import ModelTransformer, Token from robot.parsing.model import Statement from robotidy.utils import ( node_outside_selection, round_to_four, tokens_by_lines, - left_align + left_align, ) @@ -46,6 +43,7 @@ class AlignVariablesSection(ModelTransformer): See https://robotidy.readthedocs.io/en/latest/transformers/AlignVariablesSection.html for more examples. """ + def __init__(self, up_to_column: int = 2): self.up_to_column = up_to_column - 1 @@ -78,8 +76,11 @@ def align_rows(self, statements, look_up): up_to = self.up_to_column if self.up_to_column != -1 else len(line) - 2 for index, token in enumerate(line[:-2]): aligned_statement.append(token) - separator = (look_up[index] - len(token.value) + 4) * ' ' if index < up_to else \ - self.formatting_config.space_count * ' ' + separator = ( + (look_up[index] - len(token.value) + 4) * " " + if index < up_to + else self.formatting_config.space_count * " " + ) aligned_statement.append(Token(Token.SEPARATOR, separator)) last_token = line[-2] # remove leading whitespace before token diff --git a/robotidy/transformers/DiscardEmptySections.py b/robotidy/transformers/DiscardEmptySections.py index d4309751..255695a4 100644 --- a/robotidy/transformers/DiscardEmptySections.py +++ b/robotidy/transformers/DiscardEmptySections.py @@ -1,9 +1,4 @@ -from robot.api.parsing import ( - ModelTransformer, - EmptyLine, - Comment, - CommentSection -) +from robot.api.parsing import ModelTransformer, EmptyLine, Comment, CommentSection from robotidy.decorators import check_start_end_line @@ -21,14 +16,16 @@ class DiscardEmptySections(ModelTransformer): See https://robotidy.readthedocs.io/en/latest/transformers/DiscardEmptySections.html for more examples. """ + def __init__(self, allow_only_comments: bool = False): # If True then sections only with comments are not is considered to be empty self.allow_only_comments = allow_only_comments @check_start_end_line def visit_Section(self, node): # noqa - anything_but = EmptyLine if self.allow_only_comments or isinstance(node, CommentSection)\ - else (Comment, EmptyLine) + anything_but = ( + EmptyLine if self.allow_only_comments or isinstance(node, CommentSection) else (Comment, EmptyLine) + ) if all(isinstance(child, anything_but) for child in node.body): return None return node diff --git a/robotidy/transformers/MergeAndOrderSections.py b/robotidy/transformers/MergeAndOrderSections.py index 3cc57799..a67b52d6 100644 --- a/robotidy/transformers/MergeAndOrderSections.py +++ b/robotidy/transformers/MergeAndOrderSections.py @@ -1,9 +1,4 @@ -from robot.api.parsing import ( - Token, - ModelTransformer, - SectionHeader, - EmptyLine -) +from robot.api.parsing import Token, ModelTransformer, SectionHeader, EmptyLine from robot.parsing.model.statements import Statement import click @@ -43,7 +38,8 @@ class MergeAndOrderSections(ModelTransformer): See https://robotidy.readthedocs.io/en/latest/transformers/MergeAndOrderSections.html for more examples. """ - def __init__(self, order: str = '', create_comment_section: bool = True): + + def __init__(self, order: str = "", create_comment_section: bool = True): self.sections_order = self.parse_order(order) self.create_comment_section = create_comment_section @@ -54,32 +50,32 @@ def parse_order(order): Token.SETTING_HEADER, Token.VARIABLE_HEADER, Token.TESTCASE_HEADER, - Token.KEYWORD_HEADER + Token.KEYWORD_HEADER, ) if not order: return default_order - parts = order.lower().split(',') + parts = order.lower().split(",") map = { - 'comments': Token.COMMENT_HEADER, - 'comment': Token.COMMENT_HEADER, - 'settings': Token.SETTING_HEADER, - 'setting': Token.SETTING_HEADER, - 'variables': Token.VARIABLE_HEADER, - 'variable': Token.VARIABLE_HEADER, - 'testcases': Token.TESTCASE_HEADER, - 'testcase': Token.TESTCASE_HEADER, - 'keywords': Token.KEYWORD_HEADER, - 'keyword': Token.KEYWORD_HEADER + "comments": Token.COMMENT_HEADER, + "comment": Token.COMMENT_HEADER, + "settings": Token.SETTING_HEADER, + "setting": Token.SETTING_HEADER, + "variables": Token.VARIABLE_HEADER, + "variable": Token.VARIABLE_HEADER, + "testcases": Token.TESTCASE_HEADER, + "testcase": Token.TESTCASE_HEADER, + "keywords": Token.KEYWORD_HEADER, + "keyword": Token.KEYWORD_HEADER, } parsed_order = [] for part in parts: parsed_order.append(map.get(part, None)) if any(header not in parsed_order for header in default_order) and len(parsed_order) != len(default_order): raise click.BadOptionUsage( - option_name='transform', + option_name="transform", message=f"Invalid configurable value: '{order}' for order for MergeAndOrderSections transformer." - f" Custom order should be provided in comma separated list with all section names:\n" - f"order=comments,settings,variables,testcases,variables" + f" Custom order should be provided in comma separated list with all section names:\n" + f"order=comments,settings,variables,testcases,variables", ) return parsed_order @@ -96,15 +92,17 @@ def visit_File(self, node): # noqa sections[section_type] = section else: if len(section.header.data_tokens) > 1: - print(f'{node.source}: Merged duplicated section has section header comments. ' - 'Only header comments from first section header of the same type are preserved.') + print( + f"{node.source}: Merged duplicated section has section header comments. " + "Only header comments from first section header of the same type are preserved." + ) sections[section_type].body += section.body node.sections = [sections[order] for order in self.sections_order if order in sections] return node @staticmethod def from_last_section(node): - """ Last node use different logic for new line marker. It is not possible to preserve all empty lines but + """Last node use different logic for new line marker. It is not possible to preserve all empty lines but we need at least ensure that following code:: *** Test Case *** @@ -116,13 +114,13 @@ def from_last_section(node): """ if node.body: last_statement = node.body[-1] - new_line = [Token(Token.EOL, '\n')] - if hasattr(last_statement, 'body'): + new_line = [Token(Token.EOL, "\n")] + if hasattr(last_statement, "body"): if not last_statement.body: - node.body[-1].body.append(EmptyLine.from_params(eol='\n')) + node.body[-1].body.append(EmptyLine.from_params(eol="\n")) else: last_statement = last_statement.body[-1] - if hasattr(last_statement, 'end'): + if hasattr(last_statement, "end"): if last_statement.end: node.body[-1].body[-1].end = Statement.from_tokens( list(last_statement.end.tokens[:-1]) + new_line @@ -134,17 +132,22 @@ def from_last_section(node): else: last_token = node.header.tokens[-1] if last_token.type == Token.EOL: - node.header = Statement.from_tokens(list(node.header.tokens[:-1]) + [Token(Token.EOL, '\n')]) + node.header = Statement.from_tokens(list(node.header.tokens[:-1]) + [Token(Token.EOL, "\n")]) return node def get_section_type(self, section): - header_tokens = (Token.COMMENT_HEADER, Token.TESTCASE_HEADER, Token.SETTING_HEADER, Token.KEYWORD_HEADER, - Token.VARIABLE_HEADER) + header_tokens = ( + Token.COMMENT_HEADER, + Token.TESTCASE_HEADER, + Token.SETTING_HEADER, + Token.KEYWORD_HEADER, + Token.VARIABLE_HEADER, + ) if section.header: name_token = section.header.get_token(*header_tokens) section_type = name_token.type else: section_type = Token.COMMENT_HEADER if self.create_comment_section: - section.header = SectionHeader.from_params(section_type, '*** Comments ***') + section.header = SectionHeader.from_params(section_type, "*** Comments ***") return section_type diff --git a/robotidy/transformers/NormalizeAssignments.py b/robotidy/transformers/NormalizeAssignments.py index 2ff10738..da26c427 100644 --- a/robotidy/transformers/NormalizeAssignments.py +++ b/robotidy/transformers/NormalizeAssignments.py @@ -3,11 +3,7 @@ from collections import Counter import click -from robot.api.parsing import ( - ModelTransformer, - Variable, - Token -) +from robot.api.parsing import ModelTransformer, Variable, Token class NormalizeAssignments(ModelTransformer): @@ -58,27 +54,33 @@ class NormalizeAssignments(ModelTransformer): See https://robotidy.readthedocs.io/en/latest/transformers/NormalizeAssignments.html for more examples. """ - def __init__(self, equal_sign_type: str = 'autodetect', equal_sign_type_variables: str = 'remove'): - self.remove_equal_sign = re.compile(r'\s?=$') + + def __init__( + self, + equal_sign_type: str = "autodetect", + equal_sign_type_variables: str = "remove", + ): + self.remove_equal_sign = re.compile(r"\s?=$") self.file_equal_sign_type = None self.file_equal_sign_type_variables = None - self.equal_sign_type = self.parse_equal_sign_type(equal_sign_type, 'equal_sign_type') - self.equal_sign_type_variables = self.parse_equal_sign_type(equal_sign_type_variables, - 'equal_sign_type_variables') + self.equal_sign_type = self.parse_equal_sign_type(equal_sign_type, "equal_sign_type") + self.equal_sign_type_variables = self.parse_equal_sign_type( + equal_sign_type_variables, "equal_sign_type_variables" + ) @staticmethod def parse_equal_sign_type(value, name): types = { - 'remove': '', - 'equal_sign': '=', - 'space_and_equal_sign': ' =', - 'autodetect': None + "remove": "", + "equal_sign": "=", + "space_and_equal_sign": " =", + "autodetect": None, } if value not in types: raise click.BadOptionUsage( - option_name='transform', + option_name="transform", message=f"Invalid configurable value: {value} for {name} for AssignmentNormalizer transformer." - f" Possible values:\n remove\n equal_sign\n space_and_equal_sign" + f" Possible values:\n remove\n equal_sign\n space_and_equal_sign", ) return types[value] @@ -110,11 +112,15 @@ def visit_VariableSection(self, node): # noqa if not isinstance(child, Variable): continue var_token = child.get_token(Token.VARIABLE) - self.normalize_equal_sign(var_token, self.equal_sign_type_variables, self.file_equal_sign_type_variables) + self.normalize_equal_sign( + var_token, + self.equal_sign_type_variables, + self.file_equal_sign_type_variables, + ) return node def normalize_equal_sign(self, token, overwrite, local_normalize): - token.value = re.sub(self.remove_equal_sign, '', token.value) + token.value = re.sub(self.remove_equal_sign, "", token.value) if overwrite: token.value += overwrite elif local_normalize: @@ -156,4 +162,4 @@ def visit_VariableSection(self, node): # noqa @staticmethod def get_assignment_sign(token_value): - return token_value[token_value.find('}')+1:] + return token_value[token_value.find("}") + 1 :] diff --git a/robotidy/transformers/NormalizeNewLines.py b/robotidy/transformers/NormalizeNewLines.py index 1d3ef55d..fd750198 100644 --- a/robotidy/transformers/NormalizeNewLines.py +++ b/robotidy/transformers/NormalizeNewLines.py @@ -1,10 +1,6 @@ from typing import Optional -from robot.api.parsing import ( - ModelTransformer, - EmptyLine, - Token -) +from robot.api.parsing import ModelTransformer, EmptyLine, Token from robotidy.utils import is_suite_templated @@ -28,8 +24,15 @@ class NormalizeNewLines(ModelTransformer): See https://robotidy.readthedocs.io/en/latest/transformers/NormalizeNewLines.html for more examples. """ - def __init__(self, test_case_lines: int = 1, keyword_lines: Optional[int] = None, section_lines: int = 1, - separate_templated_tests: bool = False, consecutive_lines: int = 1): + + def __init__( + self, + test_case_lines: int = 1, + keyword_lines: Optional[int] = None, + section_lines: int = 1, + separate_templated_tests: bool = False, + consecutive_lines: int = 1, + ): self.test_case_lines = test_case_lines self.keyword_lines = keyword_lines if keyword_lines is not None else test_case_lines self.section_lines = section_lines @@ -77,7 +80,7 @@ def visit_Statement(self, node): # noqa tokens = [] for line in node.lines: if line[-1].type == Token.EOL: - line[-1].value = '\n' # TODO: use global formatting in the future + line[-1].value = "\n" # TODO: use global formatting in the future tokens.extend(line) node.tokens = tokens return node @@ -89,7 +92,7 @@ def trim_empty_lines(self, node): @staticmethod def trim_trailing_empty_lines(node): - if not hasattr(node, 'body'): + if not hasattr(node, "body"): return while node.body and isinstance(node.body[-1], EmptyLine): node.body.pop() diff --git a/robotidy/transformers/NormalizeSectionHeaderName.py b/robotidy/transformers/NormalizeSectionHeaderName.py index 74076189..ce0b7009 100644 --- a/robotidy/transformers/NormalizeSectionHeaderName.py +++ b/robotidy/transformers/NormalizeSectionHeaderName.py @@ -1,7 +1,4 @@ -from robot.api.parsing import ( - ModelTransformer, - SectionHeader -) +from robot.api.parsing import ModelTransformer, SectionHeader from robotidy.decorators import check_start_end_line @@ -32,6 +29,7 @@ class NormalizeSectionHeaderName(ModelTransformer): See https://robotidy.readthedocs.io/en/latest/transformers/NormalizeSectionHeaderName.html for more examples. """ + def __init__(self, uppercase: bool = False): self.uppercase = uppercase diff --git a/robotidy/transformers/NormalizeSeparators.py b/robotidy/transformers/NormalizeSeparators.py index b90c2d03..bcda411e 100644 --- a/robotidy/transformers/NormalizeSeparators.py +++ b/robotidy/transformers/NormalizeSeparators.py @@ -1,10 +1,7 @@ from itertools import takewhile import click -from robot.api.parsing import ( - ModelTransformer, - Token -) +from robot.api.parsing import ModelTransformer, Token from robotidy.decorators import check_start_end_line @@ -23,35 +20,31 @@ class NormalizeSeparators(ModelTransformer): See https://robotidy.readthedocs.io/en/latest/transformers/NormalizeSeparators.html for more examples. """ + def __init__(self, sections: str = None): self.indent = 0 self.sections = self.parse_sections(sections) @staticmethod def parse_sections(sections): - default = { - 'comments', - 'settings', - 'testcases', - 'keywords', - 'variables' - } + default = {"comments", "settings", "testcases", "keywords", "variables"} if sections is None: return default if not sections: return {} - parts = sections.split(',') + parts = sections.split(",") parsed_sections = set() for part in parts: - part = part.replace('_', '') - if part and part[-1] != 's': - part += 's' + part = part.replace("_", "") + if part and part[-1] != "s": + part += "s" if part not in default: raise click.BadOptionUsage( - option_name='transform', + option_name="transform", message=f"Invalid configurable value: '{sections}' for sections for NormalizeSeparators transformer." - f" Sections to be transformed should be provided in comma separated list with valid section" - f" names:\n{sorted(default)}") + f" Sections to be transformed should be provided in comma separated list with valid section" + f" names:\n{sorted(default)}", + ) parsed_sections.add(part) return parsed_sections @@ -65,19 +58,19 @@ def should_visit(self, name, node): return node def visit_CommentSection(self, node): # noqa - return self.should_visit('comments', node) + return self.should_visit("comments", node) def visit_SettingSection(self, node): # noqa - return self.should_visit('settings', node) + return self.should_visit("settings", node) def visit_VariableSection(self, node): # noqa - return self.should_visit('variables', node) + return self.should_visit("variables", node) def visit_KeywordSection(self, node): # noqa - return self.should_visit('keywords', node) + return self.should_visit("keywords", node) def visit_TestCaseSection(self, node): # noqa - return self.should_visit('testcases', node) + return self.should_visit("testcases", node) def visit_TestCase(self, node): # noqa self.visit_Statement(node.header) @@ -114,7 +107,7 @@ def visit_If(self, node): @check_start_end_line def visit_Statement(self, statement): # noqa - has_pipes = statement.tokens[0].value.startswith('|') + has_pipes = statement.tokens[0].value.startswith("|") return self._handle_spaces(statement, has_pipes) def _handle_spaces(self, statement, has_pipes): @@ -122,22 +115,19 @@ def _handle_spaces(self, statement, has_pipes): for line in statement.lines: if has_pipes and len(line) > 1: line = self._remove_consecutive_separators(line) - new_tokens.extend([self._normalize_spaces(i, t, len(line)) - for i, t in enumerate(line)]) + new_tokens.extend([self._normalize_spaces(i, t, len(line)) for i, t in enumerate(line)]) statement.tokens = new_tokens self.generic_visit(statement) return statement @staticmethod def _remove_consecutive_separators(line): - sep_count = len(list( - takewhile(lambda t: t.type == Token.SEPARATOR, line) - )) - return line[sep_count - 1:] + sep_count = len(list(takewhile(lambda t: t.type == Token.SEPARATOR, line))) + return line[sep_count - 1 :] def _normalize_spaces(self, index, token, line_length): if token.type == Token.SEPARATOR: - count = self.indent if index == 0 else 1 + count = self.indent if index == 0 else 1 token.value = self.formatting_config.separator * count # remove trailing whitespace from last token if index == line_length - 2: diff --git a/robotidy/transformers/NormalizeSettingName.py b/robotidy/transformers/NormalizeSettingName.py index 6533326f..b18933e6 100644 --- a/robotidy/transformers/NormalizeSettingName.py +++ b/robotidy/transformers/NormalizeSettingName.py @@ -1,7 +1,4 @@ -from robot.api.parsing import ( - ModelTransformer, - Token -) +from robot.api.parsing import ModelTransformer, Token from robot.utils.normalizing import normalize_whitespace from robotidy.decorators import check_start_end_line @@ -38,13 +35,14 @@ class NormalizeSettingName(ModelTransformer): See https://robotidy.readthedocs.io/en/latest/transformers/NormalizeSettingName.html for more examples. """ + @check_start_end_line def visit_Statement(self, node): # noqa if node.type not in Token.SETTING_TOKENS: return node name = node.data_tokens[0].value - if name.startswith('['): - name = f'[{self.normalize_name(name[1:-1])}]' + if name.startswith("["): + name = f"[{self.normalize_name(name[1:-1])}]" else: name = self.normalize_name(name) node.data_tokens[0].value = name diff --git a/robotidy/transformers/NormalizeTags.py b/robotidy/transformers/NormalizeTags.py index cdb26a2e..54851f02 100644 --- a/robotidy/transformers/NormalizeTags.py +++ b/robotidy/transformers/NormalizeTags.py @@ -1,14 +1,16 @@ from robot.api.parsing import ModelTransformer, Tags, Token, DefaultTags, ForceTags import click + class NormalizeTags(ModelTransformer): """ Normalize tag names by normalizing case and removing duplicates. - example usage: + + Example usage: robotidy --transform NormalizeTags:case=lowercase test.robot - Other supported cases: uppercase, titlecase. The default is lowercase. + Other supported cases: uppercase, title case. The default is lowercase. You can also run it to remove duplicates but preserve current case by setting ``normalize_case`` parameter to False: @@ -16,26 +18,32 @@ class NormalizeTags(ModelTransformer): See https://robotidy.readthedocs.io/en/latest/transformers/NormalizeTags.html for more examples. """ - CASE_FUNCTIONS = {'lowercase': str.lower, 'uppercase': str.upper, 'titlecase': str.title} - def __init__(self, case: str = 'lowercase', normalize_case: bool = True): + CASE_FUNCTIONS = { + "lowercase": str.lower, + "uppercase": str.upper, + "titlecase": str.title, + } + + def __init__(self, case: str = "lowercase", normalize_case: bool = True): self.case = case.lower() self.normalize_case = normalize_case try: self.case_function = self.CASE_FUNCTIONS[self.case] except KeyError: raise click.BadOptionUsage( - option_name='transform', + option_name="transform", message=f"Invalid configurable value: '{case}' for case for NormalizeTags transformer. " - f"Supported cases: lowercase, uppercase, titlecase.\n") + f"Supported cases: lowercase, uppercase, titlecase.\n", + ) - def visit_Tags(self, node): + def visit_Tags(self, node): # noqa return self.normalize_tags(node, Tags, indent=True) - def visit_DefaultTags(self, node): + def visit_DefaultTags(self, node): # noqa return self.normalize_tags(node, DefaultTags) - def visit_ForceTags(self, node): + def visit_ForceTags(self, node): # noqa return self.normalize_tags(node, ForceTags) def normalize_tags(self, node, tag_class, indent=False): @@ -45,8 +53,11 @@ def normalize_tags(self, node, tag_class, indent=False): tags = self.remove_duplicates(tags) comments = node.get_tokens(Token.COMMENT) if indent: - tag_node = tag_class.from_params(tags, indent=self.formatting_config.separator, - separator=self.formatting_config.separator) + tag_node = tag_class.from_params( + tags, + indent=self.formatting_config.separator, + separator=self.formatting_config.separator, + ) else: tag_node = tag_class.from_params(tags, separator=self.formatting_config.separator) if comments: @@ -56,7 +67,8 @@ def normalize_tags(self, node, tag_class, indent=False): def convert_case(self, tags): return [self.case_function(item) for item in tags] - def remove_duplicates(self, tags): + @staticmethod + def remove_duplicates(tags): return list(dict.fromkeys(tags)) def join_tokens(self, tokens): @@ -65,4 +77,3 @@ def join_tokens(self, tokens): joined_tokens.append(Token(Token.SEPARATOR, self.formatting_config.separator)) joined_tokens.append(token) return joined_tokens - diff --git a/robotidy/transformers/OrderSettings.py b/robotidy/transformers/OrderSettings.py index 24182296..a73c1294 100644 --- a/robotidy/transformers/OrderSettings.py +++ b/robotidy/transformers/OrderSettings.py @@ -1,10 +1,5 @@ import click -from robot.api.parsing import ( - ModelTransformer, - EmptyLine, - Comment, - Token -) +from robot.api.parsing import ModelTransformer, EmptyLine, Comment, Token from robotidy.decorators import check_start_end_line @@ -57,22 +52,22 @@ class OrderSettings(ModelTransformer): See https://robotidy.readthedocs.io/en/latest/transformers/OrderSettings.html for more examples. """ - def __init__(self, keyword_before: str = None, keyword_after: str = None, test_before: str = None, - test_after: str = None): - self.keyword_before, self.keyword_after, self.test_before, self.test_after = self.parse_order( - keyword_before, - keyword_after, - test_before, - test_after - ) - self.keyword_settings = { - *self.keyword_before, - *self.keyword_after - } - self.test_settings = { - *self.test_before, - *self.test_after - } + + def __init__( + self, + keyword_before: str = None, + keyword_after: str = None, + test_before: str = None, + test_after: str = None, + ): + ( + self.keyword_before, + self.keyword_after, + self.test_before, + self.test_after, + ) = self.parse_order(keyword_before, keyword_after, test_before, test_after) + self.keyword_settings = {*self.keyword_before, *self.keyword_after} + self.test_settings = {*self.test_before, *self.test_after} @staticmethod def get_order(order, default, name_map): @@ -80,15 +75,16 @@ def get_order(order, default, name_map): return default if not order: return [] - parts = order.lower().split(',') + parts = order.lower().split(",") try: return [name_map[part] for part in parts] except KeyError: raise click.BadOptionUsage( - option_name='transform', + option_name="transform", message=f"Invalid configurable value: '{order}' for order for OrderSettings transformer." - f" Custom order should be provided in comma separated list with valid setting names:\n" - f"{sorted(name_map.keys())}") + f" Custom order should be provided in comma separated list with valid setting names:\n" + f"{sorted(name_map.keys())}", + ) def parse_order(self, keword_before, keyword_after, test_before, test_after): keyword_order_before = ( @@ -106,31 +102,31 @@ def parse_order(self, keword_before, keyword_after, test_before, test_after): Token.TAGS, Token.TEMPLATE, Token.TIMEOUT, - Token.SETUP - ) - testcase_order_after = ( - Token.TEARDOWN, + Token.SETUP, ) + testcase_order_after = (Token.TEARDOWN,) keyword_map = { - 'documentation': Token.DOCUMENTATION, - 'tags': Token.TAGS, - 'timeout': Token.TIMEOUT, - 'arguments': Token.ARGUMENTS, - 'return': Token.RETURN, - 'teardown': Token.TEARDOWN + "documentation": Token.DOCUMENTATION, + "tags": Token.TAGS, + "timeout": Token.TIMEOUT, + "arguments": Token.ARGUMENTS, + "return": Token.RETURN, + "teardown": Token.TEARDOWN, } test_map = { - 'documentation': Token.DOCUMENTATION, - 'tags': Token.TAGS, - 'timeout': Token.TIMEOUT, - 'template': Token.TEMPLATE, - 'setup': Token.SETUP, - 'teardown': Token.TEARDOWN + "documentation": Token.DOCUMENTATION, + "tags": Token.TAGS, + "timeout": Token.TIMEOUT, + "template": Token.TEMPLATE, + "setup": Token.SETUP, + "teardown": Token.TEARDOWN, } - return (self.get_order(keword_before, keyword_order_before, keyword_map), - self.get_order(keyword_after, keyword_order_after, keyword_map), - self.get_order(test_before, testcase_order_before, test_map), - self.get_order(test_after, testcase_order_after, test_map)) + return ( + self.get_order(keword_before, keyword_order_before, keyword_map), + self.get_order(keyword_after, keyword_order_after, keyword_map), + self.get_order(test_before, testcase_order_before, test_map), + self.get_order(test_after, testcase_order_after, test_map), + ) @check_start_end_line def visit_Keyword(self, node): # noqa @@ -149,7 +145,7 @@ def order_settings(self, node, setting_types, before, after): # when after_seen is set to True then all statements go to trailing_after and last non data # will be appended after tokens defined in `after` set (like [Return]) for child in node.body: - if getattr(child, 'type', 'invalid') in setting_types: + if getattr(child, "type", "invalid") in setting_types: after_seen = after_seen or child.type in after settings[child.type] = child elif after_seen: @@ -161,8 +157,9 @@ def order_settings(self, node, setting_types, before, after): while trailing_after and isinstance(trailing_after[-1], (EmptyLine, Comment)): trailing_non_data.insert(0, trailing_after.pop()) not_settings += trailing_after - node.body = self.add_in_order(before, settings) + not_settings + \ - self.add_in_order(after, settings) + trailing_non_data + node.body = ( + self.add_in_order(before, settings) + not_settings + self.add_in_order(after, settings) + trailing_non_data + ) return node @staticmethod diff --git a/robotidy/transformers/OrderSettingsSection.py b/robotidy/transformers/OrderSettingsSection.py index ae0e9b65..23f776a2 100644 --- a/robotidy/transformers/OrderSettingsSection.py +++ b/robotidy/transformers/OrderSettingsSection.py @@ -1,13 +1,7 @@ from collections import defaultdict import click -from robot.api.parsing import ( - ModelTransformer, - Comment, - Token, - EmptyLine, - LibraryImport -) +from robot.api.parsing import ModelTransformer, Comment, Token, EmptyLine, LibraryImport from robot.libraries import STDLIBS @@ -46,40 +40,38 @@ class OrderSettingsSection(ModelTransformer): See https://robotidy.readthedocs.io/en/latest/transformers/OrderSettingsSection.html for more examples. """ - def __init__(self, new_lines_between_groups: int = 1, group_order: str = None, documentation_order: str = None, - imports_order: str = 'preserved', settings_order: str = None, tags_order: str = None): + + def __init__( + self, + new_lines_between_groups: int = 1, + group_order: str = None, + documentation_order: str = None, + imports_order: str = "preserved", + settings_order: str = None, + tags_order: str = None, + ): self.last_section = None self.disabled_group = set() self.new_lines_between_groups = new_lines_between_groups self.group_order = self.parse_group_order(group_order) self.documentation_order = self.parse_order_in_group( - 'documentation', + "documentation", documentation_order, - ( - Token.DOCUMENTATION, - Token.METADATA - ), - { - 'documentation': Token.DOCUMENTATION, - 'metadata': Token.METADATA - } + (Token.DOCUMENTATION, Token.METADATA), + {"documentation": Token.DOCUMENTATION, "metadata": Token.METADATA}, ) self.imports_order = self.parse_order_in_group( - 'imports', + "imports", imports_order, - ( - Token.LIBRARY, - Token.RESOURCE, - Token.VARIABLES - ), + (Token.LIBRARY, Token.RESOURCE, Token.VARIABLES), { - 'library': Token.LIBRARY, - 'resource': Token.RESOURCE, - 'variables': Token.VARIABLES - } + "library": Token.LIBRARY, + "resource": Token.RESOURCE, + "variables": Token.VARIABLES, + }, ) self.settings_order = self.parse_order_in_group( - 'settings', + "settings", settings_order, ( Token.SUITE_SETUP, @@ -87,48 +79,37 @@ def __init__(self, new_lines_between_groups: int = 1, group_order: str = None, d Token.TEST_SETUP, Token.TEST_TEARDOWN, Token.TEST_TIMEOUT, - Token.TEST_TEMPLATE + Token.TEST_TEMPLATE, ), { - 'suite_setup': Token.SUITE_SETUP, - 'suite_teardown': Token.SUITE_TEARDOWN, - 'test_setup': Token.TEST_SETUP, - 'test_teardown': Token.TEST_TEARDOWN, - 'test_timeout': Token.TEST_TIMEOUT, - 'test_template': Token.TEST_TEMPLATE - } + "suite_setup": Token.SUITE_SETUP, + "suite_teardown": Token.SUITE_TEARDOWN, + "test_setup": Token.TEST_SETUP, + "test_teardown": Token.TEST_TEARDOWN, + "test_timeout": Token.TEST_TIMEOUT, + "test_template": Token.TEST_TEMPLATE, + }, ) self.tags_order = self.parse_order_in_group( - 'tags', + "tags", tags_order, - ( - Token.FORCE_TAGS, - Token.DEFAULT_TAGS - ), - { - 'force_tags': Token.FORCE_TAGS, - 'default_tags': Token.DEFAULT_TAGS - } + (Token.FORCE_TAGS, Token.DEFAULT_TAGS), + {"force_tags": Token.FORCE_TAGS, "default_tags": Token.DEFAULT_TAGS}, ) @staticmethod def parse_group_order(order): - default = ( - 'documentation', - 'imports', - 'settings', - 'tags' - ) + default = ("documentation", "imports", "settings", "tags") if order is None: return default if not order: return [] - parts = order.lower().split(',') + parts = order.lower().split(",") if any(part not in default for part in parts): raise click.BadOptionUsage( - option_name='transform', + option_name="transform", message=f"Invalid configurable value: '{order}' for group_order for OrderSettingsSection transformer." - f" Custom order should be provided in comma separated list with valid group names:\n{default}" + f" Custom order should be provided in comma separated list with valid group names:\n{default}", ) return parts @@ -137,18 +118,19 @@ def parse_order_in_group(self, name, order, default, mapping): return default if not order: return [] - if order == 'preserved': + if order == "preserved": self.disabled_group.add(name) return default - parts = order.lower().split(',') + parts = order.lower().split(",") try: return [mapping[part] for part in parts] except KeyError: raise click.BadOptionUsage( - option_name='transform', + option_name="transform", message=f"Invalid configurable value: '{order}' for order for OrderSettingsSection transformer." - f" Custom order should be provided in comma separated list with valid group names:\n" - f"{sorted(mapping.keys())}") + f" Custom order should be provided in comma separated list with valid group names:\n" + f"{sorted(mapping.keys())}", + ) def visit_File(self, node): # noqa self.last_section = node.sections[-1] if node.sections else None @@ -162,33 +144,33 @@ def visit_SettingSection(self, node): # noqa comments, errors = [], [] groups = defaultdict(list) for child in node.body: - child_type = getattr(child, 'type', None) + child_type = getattr(child, "type", None) if isinstance(child, Comment): comments.append(child) elif child_type in self.documentation_order: - groups['documentation'].append((comments, child)) + groups["documentation"].append((comments, child)) comments = [] elif child_type in self.imports_order: - groups['imports'].append((comments, child)) + groups["imports"].append((comments, child)) comments = [] elif child_type in self.settings_order: - groups['settings'].append((comments, child)) + groups["settings"].append((comments, child)) comments = [] elif child_type in self.tags_order: - groups['tags'].append((comments, child)) + groups["tags"].append((comments, child)) comments = [] elif not isinstance(child, EmptyLine): errors.append(child) group_map = { - 'documentation': self.documentation_order, - 'imports': self.imports_order, - 'settings': self.settings_order, - 'tags': self.tags_order + "documentation": self.documentation_order, + "imports": self.imports_order, + "settings": self.settings_order, + "tags": self.tags_order, } new_body = [] - empty_line = EmptyLine.from_params(eol='\n') + empty_line = EmptyLine.from_params(eol="\n") order_of_groups = [group for group in self.group_order if group in groups] last_index = len(order_of_groups) - 1 for index, group in enumerate(order_of_groups): @@ -198,7 +180,7 @@ def visit_SettingSection(self, node): # noqa new_body.extend(comment_lines) new_body.append(child) else: - if group == 'imports': + if group == "imports": unordered = self.sort_builtin_libs(unordered) order = group_map[group] for token_type in order: @@ -221,19 +203,23 @@ def visit_SettingSection(self, node): # noqa @staticmethod def fix_eol(node): - if not getattr(node, 'tokens', None): + if not getattr(node, "tokens", None): return node - if getattr(node.tokens[-1], 'type', None) != Token.EOL: + if getattr(node.tokens[-1], "type", None) != Token.EOL: return node - node.tokens = list(node.tokens[:-1]) + [Token(Token.EOL, '\n')] + node.tokens = list(node.tokens[:-1]) + [Token(Token.EOL, "\n")] return node @staticmethod def sort_builtin_libs(statements): before, after = [], [] for comments, statement in statements: - if isinstance(statement, LibraryImport) and statement.name and statement.name != 'Remote' \ - and statement.name in STDLIBS: + if ( + isinstance(statement, LibraryImport) + and statement.name + and statement.name != "Remote" + and statement.name in STDLIBS + ): before.append((comments, statement)) else: after.append((comments, statement)) diff --git a/robotidy/transformers/OrderTags.py b/robotidy/transformers/OrderTags.py index 46b7921b..f09fd678 100644 --- a/robotidy/transformers/OrderTags.py +++ b/robotidy/transformers/OrderTags.py @@ -39,7 +39,11 @@ class OrderTags(ModelTransformer): ENABLED = False def __init__( - self, case_sensitive: bool = False, reverse: bool = False, default_tags: bool = True, force_tags: bool = True + self, + case_sensitive: bool = False, + reverse: bool = False, + default_tags: bool = True, + force_tags: bool = True, ): self.key = self.get_key(case_sensitive) self.reverse = reverse @@ -56,13 +60,19 @@ def visit_ForceTags(self, node): # noqa return self.order_tags(node, ForceTags) if self.force_tags else node def order_tags(self, node, tag_class, indent=False): - ordered_tags = sorted((tag.value for tag in node.data_tokens[1:]), key=self.key, reverse=self.reverse) + ordered_tags = sorted( + (tag.value for tag in node.data_tokens[1:]), + key=self.key, + reverse=self.reverse, + ) if len(ordered_tags) <= 1: return node comments = node.get_tokens(Token.COMMENT) if indent: tag_node = tag_class.from_params( - ordered_tags, indent=self.formatting_config.separator, separator=self.formatting_config.separator + ordered_tags, + indent=self.formatting_config.separator, + separator=self.formatting_config.separator, ) else: tag_node = tag_class.from_params(ordered_tags, separator=self.formatting_config.separator) diff --git a/robotidy/transformers/RemoveEmptySettings.py b/robotidy/transformers/RemoveEmptySettings.py index b663bd81..7862159c 100644 --- a/robotidy/transformers/RemoveEmptySettings.py +++ b/robotidy/transformers/RemoveEmptySettings.py @@ -1,10 +1,7 @@ import ast import click -from robot.api.parsing import ( - ModelTransformer, - Token -) +from robot.api.parsing import ModelTransformer, Token from robotidy.decorators import check_start_end_line @@ -38,17 +35,24 @@ class RemoveEmptySettings(ModelTransformer): See https://robotidy.readthedocs.io/en/latest/transformers/RemoveEmptySettings.html for more examples. """ - def __init__(self, work_mode: str = 'overwrite_ok', more_explicit: bool = True): - if work_mode not in ('overwrite_ok', 'always'): + + def __init__(self, work_mode: str = "overwrite_ok", more_explicit: bool = True): + if work_mode not in ("overwrite_ok", "always"): raise click.BadOptionUsage( - option_name='transform', + option_name="transform", message=f"Invalid configurable value: {work_mode} for work_mode for RemoveEmptySettings transformer." - f" Possible values:\n overwrite_ok\n always" + f" Possible values:\n overwrite_ok\n always", ) self.work_mode = work_mode self.more_explicit = more_explicit self.overwritten_settings = set() - self.child_types = {Token.SETUP, Token.TEARDOWN, Token.TIMEOUT, Token.TEMPLATE, Token.TAGS} + self.child_types = { + Token.SETUP, + Token.TEARDOWN, + Token.TIMEOUT, + Token.TEMPLATE, + Token.TAGS, + } @check_start_end_line def visit_Statement(self, node): # noqa @@ -56,23 +60,26 @@ def visit_Statement(self, node): # noqa if node.type not in Token.SETTING_TOKENS or len(node.data_tokens) != 1: return node # when empty and not overwriting anything - remove - if node.type not in self.child_types or self.work_mode == 'always' or \ - node.type not in self.overwritten_settings: + if ( + node.type not in self.child_types + or self.work_mode == "always" + or node.type not in self.overwritten_settings + ): return None if self.more_explicit: - indent = node.tokens[0].value if node.tokens[0].type == Token.SEPARATOR else '' + indent = node.tokens[0].value if node.tokens[0].type == Token.SEPARATOR else "" setting_token = node.data_tokens[0] node.tokens = [ Token(Token.SEPARATOR, indent), setting_token, Token(Token.SEPARATOR, self.formatting_config.separator), - Token(Token.ARGUMENT, 'NONE'), - Token(Token.EOL, '\n') + Token(Token.ARGUMENT, "NONE"), + Token(Token.EOL, "\n"), ] return node def visit_File(self, node): # noqa - if self.work_mode == 'overwrite_ok': + if self.work_mode == "overwrite_ok": self.overwritten_settings = self.find_overwritten_settings(node) self.generic_visit(node) self.overwritten_settings = set() diff --git a/robotidy/transformers/RenameKeywords.py b/robotidy/transformers/RenameKeywords.py index b215dcd4..e51b565b 100644 --- a/robotidy/transformers/RenameKeywords.py +++ b/robotidy/transformers/RenameKeywords.py @@ -39,19 +39,25 @@ class RenameKeywords(ModelTransformer): See https://robotidy.readthedocs.io/en/latest/transformers/RenameKeywords.html for more examples. """ + ENABLED = False - def __init__(self, replace_pattern: Optional[str] = None, replace_to: Optional[str] = None, - remove_underscores: bool = True): + def __init__( + self, + replace_pattern: Optional[str] = None, + replace_to: Optional[str] = None, + remove_underscores: bool = True, + ): self.remove_underscores = remove_underscores try: self.replace_pattern = re.compile(replace_pattern) if replace_pattern is not None else None except re.error as err: raise click.BadOptionUsage( - option_name='transform', + option_name="transform", message=f"Invalid configurable value: '{replace_pattern}' for replace_pattern in RenameKeywords" - f" transformer. It should be a valid regex expression. Regex error: '{err.msg}'") - self.replace_to = '' if replace_to is None else replace_to + f" transformer. It should be a valid regex expression. Regex error: '{err.msg}'", + ) + self.replace_to = "" if replace_to is None else replace_to @check_start_end_line def rename_node(self, node, type_of_name): @@ -59,17 +65,17 @@ def rename_node(self, node, type_of_name): if not token or not token.value: return node values = [] - for value in token.value.split('.'): - if isinstance(node, KeywordCall) and '.' in value: - library, value = token.value.rsplit('.', maxsplit=1) + for value in token.value.split("."): + if isinstance(node, KeywordCall) and "." in value: + library, value = token.value.rsplit(".", maxsplit=1) if self.replace_pattern is not None: value = self.replace_pattern.sub(repl=self.replace_to, string=value) - if self.remove_underscores and value != '_': - value = value.replace('_', ' ') - value = re.sub(r'\s{2,}', ' ', value) # replace two or more spaces by one + if self.remove_underscores and value != "_": + value = value.replace("_", " ") + value = re.sub(r"\s{2,}", " ", value) # replace two or more spaces by one value = "".join([a if a.isupper() else b for a, b in zip(value, string.capwords(value.strip()))]) values.append(value) - token.value = '.'.join(values) + token.value = ".".join(values) return node def visit_KeywordName(self, node): # noqa diff --git a/robotidy/transformers/RenameTestCases.py b/robotidy/transformers/RenameTestCases.py index e51e3e56..c6815c25 100644 --- a/robotidy/transformers/RenameTestCases.py +++ b/robotidy/transformers/RenameTestCases.py @@ -41,10 +41,11 @@ def __init__(self, replace_pattern: Optional[str] = None, replace_to: Optional[s self.replace_pattern = re.compile(replace_pattern) if replace_pattern is not None else None except re.error as err: raise click.BadOptionUsage( - option_name='transform', + option_name="transform", message=f"Invalid configurable value: '{replace_pattern}' for replace_pattern in RenameTestCases" - f" transformer. It should be a valid regex expression. Regex error: '{err.msg}'") - self.replace_to = '' if replace_to is None else replace_to + f" transformer. It should be a valid regex expression. Regex error: '{err.msg}'", + ) + self.replace_to = "" if replace_to is None else replace_to @check_start_end_line def visit_TestCaseName(self, node): # noqa @@ -53,7 +54,7 @@ def visit_TestCaseName(self, node): # noqa token.value = token.value[0].upper() + token.value[1:] if self.replace_pattern is not None: token.value = self.replace_pattern.sub(repl=self.replace_to, string=token.value) - if token.value.endswith('.'): + if token.value.endswith("."): token.value = token.value[:-1] token.value = token.value.strip() return node diff --git a/robotidy/transformers/ReplaceRunKeywordIf.py b/robotidy/transformers/ReplaceRunKeywordIf.py index a354ebde..341f49e9 100644 --- a/robotidy/transformers/ReplaceRunKeywordIf.py +++ b/robotidy/transformers/ReplaceRunKeywordIf.py @@ -6,7 +6,7 @@ IfHeader, ElseHeader, ElseIfHeader, - KeywordCall + KeywordCall, ) from robotidy.utils import normalize_name, after_last_dot from robotidy.decorators import check_start_end_line @@ -69,11 +69,12 @@ class ReplaceRunKeywordIf(ModelTransformer): See https://robotidy.readthedocs.io/en/latest/transformers/ReplaceRunKeywordIf.html for more examples. """ + @check_start_end_line def visit_KeywordCall(self, node): # noqa if not node.keyword: return node - if after_last_dot(normalize_name(node.keyword)) == 'runkeywordif': + if after_last_dot(normalize_name(node.keyword)) == "runkeywordif": return self.create_branched(node) return node @@ -83,45 +84,41 @@ def create_branched(self, node): raw_args = node.get_tokens(Token.ARGUMENT) if len(raw_args) < 2: return node - end = End([ - separator, - Token(Token.END), - Token(Token.EOL) - ]) + end = End([separator, Token(Token.END), Token(Token.EOL)]) prev_if = None - for branch in reversed(list(self.split_args_on_delimiters(raw_args, ('ELSE', 'ELSE IF'), assign=assign))): - if branch[0].value == 'ELSE': + for branch in reversed(list(self.split_args_on_delimiters(raw_args, ("ELSE", "ELSE IF"), assign=assign))): + if branch[0].value == "ELSE": if len(branch) < 2: return node args = branch[1:] if self.check_for_useless_set_variable(args, assign): continue - header = ElseHeader([ - separator, - Token(Token.ELSE), - Token(Token.EOL) - ]) - elif branch[0].value == 'ELSE IF': + header = ElseHeader([separator, Token(Token.ELSE), Token(Token.EOL)]) + elif branch[0].value == "ELSE IF": if len(branch) < 3: return node - header = ElseIfHeader([ - separator, - Token(Token.ELSE_IF), - Token(Token.SEPARATOR, self.formatting_config.separator), - branch[1], - Token(Token.EOL) - ]) + header = ElseIfHeader( + [ + separator, + Token(Token.ELSE_IF), + Token(Token.SEPARATOR, self.formatting_config.separator), + branch[1], + Token(Token.EOL), + ] + ) args = branch[2:] else: if len(branch) < 2: return node - header = IfHeader([ - separator, - Token(Token.IF), - Token(Token.SEPARATOR, self.formatting_config.separator), - branch[0], - Token(Token.EOL) - ]) + header = IfHeader( + [ + separator, + Token(Token.IF), + Token(Token.SEPARATOR, self.formatting_config.separator), + branch[0], + Token(Token.EOL), + ] + ) args = branch[1:] keywords = self.create_keywords(args, assign, separator.value) if_block = If(header=header, body=keywords, orelse=prev_if) @@ -130,17 +127,21 @@ def create_branched(self, node): return prev_if def create_keywords(self, arg_tokens, assign, indent): - if normalize_name(arg_tokens[0].value) == 'runkeywords': - return [self.args_to_keyword(keyword[1:], assign, indent) - for keyword in self.split_args_on_delimiters(arg_tokens, ('AND',))] + if normalize_name(arg_tokens[0].value) == "runkeywords": + return [ + self.args_to_keyword(keyword[1:], assign, indent) + for keyword in self.split_args_on_delimiters(arg_tokens, ("AND",)) + ] return self.args_to_keyword(arg_tokens, assign, indent) def args_to_keyword(self, arg_tokens, assign, indent): - separated_tokens = list(insert_separators( - indent, - [*assign, Token(Token.KEYWORD, arg_tokens[0].value), *arg_tokens[1:]], - self.formatting_config.separator - )) + separated_tokens = list( + insert_separators( + indent, + [*assign, Token(Token.KEYWORD, arg_tokens[0].value), *arg_tokens[1:]], + self.formatting_config.separator, + ) + ) return KeywordCall.from_tokens(separated_tokens) @staticmethod @@ -150,18 +151,14 @@ def split_args_on_delimiters(args, delimiters, assign=None): for split_point in split_points: yield args[prev_index:split_point] prev_index = split_point - yield args[prev_index:len(args)] - if assign and 'ELSE' in delimiters and not any(arg.value == 'ELSE' for arg in args): - values = [Token(Token.ARGUMENT, '${None}')] * len(assign) - yield [Token(Token.ELSE), Token(Token.ARGUMENT, 'Set Variable'), *values] + yield args[prev_index : len(args)] + if assign and "ELSE" in delimiters and not any(arg.value == "ELSE" for arg in args): + values = [Token(Token.ARGUMENT, "${None}")] * len(assign) + yield [Token(Token.ELSE), Token(Token.ARGUMENT, "Set Variable"), *values] @staticmethod def check_for_useless_set_variable(tokens, assign): - if ( - not assign - or normalize_name(tokens[0].value) != 'setvariable' - or len(tokens[1:]) != len(assign) - ): + if not assign or normalize_name(tokens[0].value) != "setvariable" or len(tokens[1:]) != len(assign): return False for var, var_assign in zip(tokens[1:], assign): if normalize_name(var.value) != normalize_name(var_assign.value): diff --git a/robotidy/transformers/SmartSortKeywords.py b/robotidy/transformers/SmartSortKeywords.py index 927a06d2..2d582051 100644 --- a/robotidy/transformers/SmartSortKeywords.py +++ b/robotidy/transformers/SmartSortKeywords.py @@ -43,9 +43,15 @@ class SmartSortKeywords(ModelTransformer): See https://robotidy.readthedocs.io/en/latest/transformers/SmartSortKeywords.html for more examples. """ + ENABLED = False - def __init__(self, case_insensitive=True, ignore_leading_underscore=False, ignore_other_underscore=True): + def __init__( + self, + case_insensitive=True, + ignore_leading_underscore=False, + ignore_other_underscore=True, + ): self.ci = case_insensitive self.ilu = ignore_leading_underscore self.iou = ignore_other_underscore @@ -85,9 +91,9 @@ def sort_function(self, kw): if self.ci: name = name.casefold().upper() # to make sure that letters go before underscore if self.ilu: - name = name.lstrip('_') + name = name.lstrip("_") if self.iou: - index = len(name) - len(name.lstrip('_')) + index = len(name) - len(name.lstrip("_")) name = name[:index] + name[index:].replace("_", " ") return name diff --git a/robotidy/transformers/SplitTooLongLine.py b/robotidy/transformers/SplitTooLongLine.py index 1bcb538a..1e08cb66 100644 --- a/robotidy/transformers/SplitTooLongLine.py +++ b/robotidy/transformers/SplitTooLongLine.py @@ -1,7 +1,4 @@ -from robot.api.parsing import ( - ModelTransformer, - Token -) +from robot.api.parsing import ModelTransformer, Token from robotidy.decorators import check_start_end_line @@ -35,6 +32,7 @@ class SplitTooLongLine(ModelTransformer): See https://robotidy.readthedocs.io/en/latest/transformers/SplitTooLongLine.html for more examples. """ + def __init__(self, line_length: int = 120, split_on_every_arg: bool = False): super().__init__() self.line_length = line_length @@ -71,7 +69,7 @@ def split_keyword_call(self, node): # original comment, we need a lookback on the separator tokens. last_separator = None - rest = node.tokens[node.tokens.index(keyword) + 1:] + rest = node.tokens[node.tokens.index(keyword) + 1 :] for token in rest: if token.type == Token.SEPARATOR: last_separator = token @@ -84,14 +82,14 @@ def split_keyword_call(self, node): # # Notice the third value not starting with a hash - that's what this # condition is about: - if not str(token).startswith('#'): + if not str(token).startswith("#"): # -2 because -1 is the EOL comments[-2].value += last_separator.value + token.value else: comments += [indent, token, EOL] elif token.type == Token.ARGUMENT: - if token.value == '': - token.value = '${EMPTY}' + if token.value == "": + token.value = "${EMPTY}" if self.cols_remaining(line + [separator, token]) == 0: line.append(EOL) tail += line @@ -122,4 +120,4 @@ def last_line_of(tokens): """Return the tokens from after the last EOL in the given list""" if EOL not in tokens: return tokens - return tokens[len(tokens) - tokens[::-1].index(EOL):] + return tokens[len(tokens) - tokens[::-1].index(EOL) :] diff --git a/robotidy/transformers/__init__.py b/robotidy/transformers/__init__.py index f62c6f0c..0ac38c3a 100644 --- a/robotidy/transformers/__init__.py +++ b/robotidy/transformers/__init__.py @@ -16,27 +16,27 @@ TRANSFORMERS = [ - 'AddMissingEnd', - 'NormalizeSeparators', - 'DiscardEmptySections', - 'MergeAndOrderSections', - 'RemoveEmptySettings', - 'NormalizeAssignments', - 'OrderSettings', - 'OrderSettingsSection', - 'NormalizeTags', - 'OrderTags', - 'AlignSettingsSection', - 'AlignVariablesSection', - 'AlignTestCases', - 'NormalizeNewLines', - 'NormalizeSectionHeaderName', - 'NormalizeSettingName', - 'ReplaceRunKeywordIf', - 'SplitTooLongLine', - 'SmartSortKeywords', - 'RenameTestCases', - 'RenameKeywords' + "AddMissingEnd", + "NormalizeSeparators", + "DiscardEmptySections", + "MergeAndOrderSections", + "RemoveEmptySettings", + "NormalizeAssignments", + "OrderSettings", + "OrderSettingsSection", + "NormalizeTags", + "OrderTags", + "AlignSettingsSection", + "AlignVariablesSection", + "AlignTestCases", + "NormalizeNewLines", + "NormalizeSectionHeaderName", + "NormalizeSettingName", + "ReplaceRunKeywordIf", + "SplitTooLongLine", + "SmartSortKeywords", + "RenameTestCases", + "RenameKeywords", ] @@ -48,20 +48,22 @@ def import_transformer(name, args): try: return Importer().import_class_or_module(name, instantiate_with_args=args) except DataError as err: - if 'Creating instance failed' in str(err): + if "Creating instance failed" in str(err): raise err from None - short_name = name.split('.')[-1] + short_name = name.split(".")[-1] similar_finder = RecommendationFinder() similar = similar_finder.find_similar(short_name, TRANSFORMERS) - raise ImportTransformerError(f"Importing transformer '{short_name}' failed. " - f"Verify if correct name or configuration was provided.{similar}") from None + raise ImportTransformerError( + f"Importing transformer '{short_name}' failed. " + f"Verify if correct name or configuration was provided.{similar}" + ) from None def load_transformer(name, args): - if not args.get('enabled', True): + if not args.get("enabled", True): return None - args = [f'{key}={value}' for key, value in args.items() if key != 'enabled'] - import_name = f'robotidy.transformers.{name}' if name in TRANSFORMERS else name + args = [f"{key}={value}" for key, value in args.items() if key != "enabled"] + import_name = f"robotidy.transformers.{name}" if name in TRANSFORMERS else name return import_transformer(import_name, args) @@ -69,16 +71,16 @@ def join_configs(args, config): # args are from --transform Name:param=value and config is from --configure temp_args = {} for arg in chain(args, config): - param, value = arg.split('=', maxsplit=1) - if param == 'enabled': - temp_args[param] = value.lower() == 'true' + param, value = arg.split("=", maxsplit=1) + if param == "enabled": + temp_args[param] = value.lower() == "true" else: temp_args[param] = value return temp_args def load_transformers(allowed_transformers, config, allow_disabled=False, force_order=False): - """ Dynamically load all classes from this file with attribute `name` defined in allowed_transformers """ + """Dynamically load all classes from this file with attribute `name` defined in allowed_transformers""" loaded_transformers = [] allowed_mapped = {name: args for name, args in allowed_transformers} if allowed_transformers else {} if not force_order: @@ -88,7 +90,7 @@ def load_transformers(allowed_transformers, config, allow_disabled=False, force_ imported_class = load_transformer(name, args) if imported_class is None: continue - enabled = getattr(imported_class, 'ENABLED', True) or args.get('enabled', False) + enabled = getattr(imported_class, "ENABLED", True) or args.get("enabled", False) if allowed_mapped or allow_disabled or enabled: loaded_transformers.append(imported_class) for name in allowed_mapped: diff --git a/robotidy/utils.py b/robotidy/utils.py index 245cb6fc..b97cbedd 100644 --- a/robotidy/utils.py +++ b/robotidy/utils.py @@ -3,10 +3,7 @@ from typing import List import difflib -from robot.api.parsing import ( - ModelVisitor, - Token -) +from robot.api.parsing import ModelVisitor, Token from robot.parsing.model import Statement from robot.utils.robotio import file_writer from click import style @@ -16,8 +13,9 @@ class StatementLinesCollector(ModelVisitor): """ Used to get writeable presentation of Robot Framework model. """ + def __init__(self, model): - self.text = '' + self.text = "" self.visit(model) def visit_Statement(self, node): # noqa @@ -29,21 +27,28 @@ def __eq__(self, other): class GlobalFormattingConfig: - def __init__(self, space_count: int, line_sep: str, start_line: int, end_line: int, separator: str): + def __init__( + self, + space_count: int, + line_sep: str, + start_line: int, + end_line: int, + separator: str, + ): self.start_line = start_line self.end_line = end_line self.space_count = space_count - if separator == 'space': - self.separator = ' ' * space_count - elif separator == 'tab': + if separator == "space": + self.separator = " " * space_count + elif separator == "tab": self.space_count = space_count - self.separator = '\t' + self.separator = "\t" - if line_sep == 'windows': - self.line_sep = '\r\n' - elif line_sep == 'unix': - self.line_sep = '\n' + if line_sep == "windows": + self.line_sep = "\r\n" + elif line_sep == "unix": + self.line_sep = "\n" else: self.line_sep = os.linesep @@ -54,21 +59,21 @@ def decorate_diff_with_color(contents: List[str]) -> str: if line.startswith("+++") or line.startswith("---"): line = style(line, bold=True, reset=True) elif line.startswith("@@"): - line = style(line, fg='cyan', reset=True) + line = style(line, fg="cyan", reset=True) elif line.startswith("+"): - line = style(line, fg='green', reset=True) + line = style(line, fg="green", reset=True) elif line.startswith("-"): - line = style(line, fg='red', reset=True) + line = style(line, fg="red", reset=True) contents[i] = line - return ''.join(contents) + return "".join(contents) def normalize_name(name): - return name.lower().replace('_', '').replace(' ', '') + return name.lower().replace("_", "").replace(" ", "") def after_last_dot(name): - return name.split('.')[-1] + return name.split(".")[-1] def node_within_lines(node_start, node_end, start_line, end_line): @@ -89,8 +94,12 @@ def node_outside_selection(node, formatting_config): Contrary to ``node_within_lines`` it just checks if node is fully outside selected lines. Partial selection is useful for transformers like aligning code. """ - if formatting_config.start_line and formatting_config.start_line > node.end_lineno or \ - formatting_config.end_line and formatting_config.end_line < node.lineno: + if ( + formatting_config.start_line + and formatting_config.start_line > node.end_lineno + or formatting_config.end_line + and formatting_config.end_line < node.lineno + ): return True return False @@ -106,7 +115,7 @@ def split_args_from_name_or_path(name): index = _get_arg_separator_index_from_name_or_path(name) if index == -1: return name, [] - args = _escaped_split(name[index+1:], name[index]) + args = _escaped_split(name[index + 1 :], name[index]) name = name[:index] return name, args @@ -116,28 +125,28 @@ def _escaped_split(string, delim): current = [] itr = iter(string) for ch in itr: - if ch == '\\': + if ch == "\\": try: - current.append('\\') + current.append("\\") current.append(next(itr)) except StopIteration: pass elif ch == delim: - ret.append(''.join(current)) + ret.append("".join(current)) current = [] else: current.append(ch) if current: - ret.append(''.join(current)) + ret.append("".join(current)) return ret def _get_arg_separator_index_from_name_or_path(name): - colon_index = name.find(':') + colon_index = name.find(":") # Handle absolute Windows paths - if colon_index == 1 and name[2:3] in ('/', '\\'): - colon_index = name.find(':', colon_index+1) - semicolon_index = name.find(';') + if colon_index == 1 and name[2:3] in ("/", "\\"): + colon_index = name.find(":", colon_index + 1) + semicolon_index = name.find(";") if colon_index == -1: return semicolon_index if semicolon_index == -1: @@ -173,15 +182,15 @@ def tokens_by_lines(node): def left_align(node): - """ remove leading separator token """ + """remove leading separator token""" tokens = list(node.tokens) if tokens: - tokens[0].value = tokens[0].value.lstrip(' \t') + tokens[0].value = tokens[0].value.lstrip(" \t") return Statement.from_tokens(tokens) def remove_rst_formatting(text): - return text.replace('::', ':').replace("``", "'") + return text.replace("::", ":").replace("``", "'") class RecommendationFinder: @@ -190,46 +199,48 @@ def find_similar(self, name, candidates): norm_cand = self.get_normalized_candidates(candidates) matches = self.find(norm_name, norm_cand.keys()) if not matches: - return '' + return "" matches = self.get_original_candidates(matches, norm_cand) if len(matches) == 1 and matches[0] == name: - return '' - suggestion = ' Did you mean:\n' - suggestion += '\n'.join(f' {match}' for match in matches) + return "" + suggestion = " Did you mean:\n" + suggestion += "\n".join(f" {match}" for match in matches) return suggestion def find(self, name, candidates, max_matches=2): - """ Return a list of close matches to `name` from `candidates`. """ + """Return a list of close matches to `name` from `candidates`.""" if not name or not candidates: return [] cutoff = self._calculate_cutoff(name) - return difflib.get_close_matches( - name, candidates, n=max_matches, cutoff=cutoff - ) + return difflib.get_close_matches(name, candidates, n=max_matches, cutoff=cutoff) @staticmethod - def _calculate_cutoff(string, min_cutoff=.5, max_cutoff=.85, - step=.03): - """ The longer the string the bigger required cutoff. """ + def _calculate_cutoff(string, min_cutoff=0.5, max_cutoff=0.85, step=0.03): + """The longer the string the bigger required cutoff.""" cutoff = min_cutoff + len(string) * step return min(cutoff, max_cutoff) @staticmethod def get_original_candidates(candidates, norm_candidates): - """ Map found normalized candidates to unique original candidates. """ + """Map found normalized candidates to unique original candidates.""" return sorted(list(set(c for cand in candidates for c in norm_candidates[cand]))) @staticmethod def get_normalized_candidates(candidates): norm_cand = {cand.lower(): [cand] for cand in candidates} # most popular typos - norm_cand['align'] = ['AlignSettingsSection', 'AlignVariablesSection'] - norm_cand['normalize'] = ['NormalizeAssignments', 'NormalizeNewLines', 'NormalizeSectionHeaderName', - 'NormalizeSeparators', 'NormalizeSettingName'] - norm_cand['order'] = ['OrderSettings', 'OrderSettingsSection'] - norm_cand['alignsettings'] = ['AlignSettingsSection'] - norm_cand['alignvariables'] = ['AlignVariablesSection'] - norm_cand['assignmentnormalizer'] = ['NormalizeAssignments'] + norm_cand["align"] = ["AlignSettingsSection", "AlignVariablesSection"] + norm_cand["normalize"] = [ + "NormalizeAssignments", + "NormalizeNewLines", + "NormalizeSectionHeaderName", + "NormalizeSeparators", + "NormalizeSettingName", + ] + norm_cand["order"] = ["OrderSettings", "OrderSettingsSection"] + norm_cand["alignsettings"] = ["AlignSettingsSection"] + norm_cand["alignvariables"] = ["AlignVariablesSection"] + norm_cand["assignmentnormalizer"] = ["NormalizeAssignments"] return norm_cand diff --git a/robotidy/version.py b/robotidy/version.py index 51ed7c48..e4adfb83 100644 --- a/robotidy/version.py +++ b/robotidy/version.py @@ -1 +1 @@ -__version__ = '1.5.1' +__version__ = "1.6.0"