diff --git a/examples/pytorch/text-generation/run_generation.py b/examples/pytorch/text-generation/run_generation.py index f89ca96eefd7..7784580e033c 100755 --- a/examples/pytorch/text-generation/run_generation.py +++ b/examples/pytorch/text-generation/run_generation.py @@ -63,7 +63,7 @@ ) logger = logging.getLogger(__name__) -MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop +MAX_LENGTH = 10000 # Hardcoded max length to avoid infinite loop MODEL_CLASSES = { "gpt2": (GPT2LMHeadModel, GPT2Tokenizer), diff --git a/pyproject.toml b/pyproject.toml index b8f4676a2b46..5d3a9436eb3f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,10 +22,15 @@ line-length = 119 # SIM300: Yoda condition detected # SIM212: Checks for if expressions that check against a negated condition. # SIM905: Consider using a list literal instead of `str.split` -ignore = ["C901", "E501", "E741", "F402", "F823", "SIM1", "SIM300", "SIM212", "SIM905"] +# UP009: UTF-8 encoding declaration is unnecessary +# UP015: Unnecessary mode argument +# UP031: Use format specifiers instead of percent format +# UP004: Class `XXX` inherits from `object` +# UP028: Checks for for loops that can be replaced with yield from expressions +ignore = ["C901", "E501", "E741", "F402", "F823", "SIM1", "SIM300", "SIM212", "SIM905", "UP009", "UP015", "UP031", "UP028", "UP004"] # RUF013: Checks for the use of implicit Optional # in type annotations when the default parameter value is None. -select = ["C", "E", "F", "I", "W", "RUF013", "UP006", "PERF102", "PLC1802", "PLC0208","SIM"] +select = ["C", "E", "F", "I", "W", "RUF013", "PERF102", "PLC1802", "PLC0208", "SIM", "UP"] extend-safe-fixes = ["UP006"] # Ignore import violations in all `__init__.py` files. @@ -33,6 +38,7 @@ extend-safe-fixes = ["UP006"] "__init__.py" = ["E402", "F401", "F403", "F811"] "src/transformers/file_utils.py" = ["F401"] "src/transformers/utils/dummy_*.py" = ["F401"] +"examples/legacy/**/*.py" = ["UP"] [tool.ruff.lint.isort] lines-after-imports = 2 diff --git a/src/transformers/audio_utils.py b/src/transformers/audio_utils.py index fa9a7d3d8757..e848f558738c 100644 --- a/src/transformers/audio_utils.py +++ b/src/transformers/audio_utils.py @@ -21,8 +21,9 @@ import io import os import warnings +from collections.abc import Sequence from io import BytesIO -from typing import Any, Optional, Sequence, Union +from typing import Any, Optional, Union import numpy as np import requests diff --git a/src/transformers/commands/serving.py b/src/transformers/commands/serving.py index d7b852c9e932..2dace1512caf 100644 --- a/src/transformers/commands/serving.py +++ b/src/transformers/commands/serving.py @@ -699,7 +699,7 @@ def get_all_models(): uvicorn.run(app, host=self.args.host, port=self.args.port, log_level=self.args.log_level) - @functools.lru_cache(maxsize=None) + @functools.cache def get_gen_models(self) -> list[dict[str, any]]: """ This is by no means a limit to which models may be instantiated with `transformers serve`: any chat-based diff --git a/src/transformers/modeling_layers.py b/src/transformers/modeling_layers.py index eea5595dc49e..4f4a599693dd 100644 --- a/src/transformers/modeling_layers.py +++ b/src/transformers/modeling_layers.py @@ -95,7 +95,7 @@ def __call__(self, *args, **kwargs): @auto_docstring -class GenericForSequenceClassification(object): +class GenericForSequenceClassification: base_model_prefix = "model" def __init__(self, config): @@ -170,7 +170,7 @@ def forward( @auto_docstring -class GenericForQuestionAnswering(object): +class GenericForQuestionAnswering: base_model_prefix = "model" def __init__(self, config): @@ -231,7 +231,7 @@ def forward( @auto_docstring -class GenericForTokenClassification(object): +class GenericForTokenClassification: base_model_prefix = "model" def __init__(self, config): diff --git a/src/transformers/models/florence2/modular_florence2.py b/src/transformers/models/florence2/modular_florence2.py index 417e296071de..f11dd2a9137e 100644 --- a/src/transformers/models/florence2/modular_florence2.py +++ b/src/transformers/models/florence2/modular_florence2.py @@ -994,7 +994,7 @@ def __call__(self, text=None, sequence=None, image_size=None, parse_tasks=None) instances = self.parse_description_with_bboxes_from_text_and_spans(text, image_size=image_size) parsed_dict["description_with_bboxes_or_polygons"] = instances else: - raise ValueError("task {} is not supported".format(task)) + raise ValueError(f"task {task} is not supported") return parsed_dict diff --git a/src/transformers/models/florence2/processing_florence2.py b/src/transformers/models/florence2/processing_florence2.py index 96dd81a68ab6..9e56d2e3d8fc 100644 --- a/src/transformers/models/florence2/processing_florence2.py +++ b/src/transformers/models/florence2/processing_florence2.py @@ -795,7 +795,7 @@ def __call__(self, text=None, sequence=None, image_size=None, parse_tasks=None) instances = self.parse_description_with_bboxes_from_text_and_spans(text, image_size=image_size) parsed_dict["description_with_bboxes_or_polygons"] = instances else: - raise ValueError("task {} is not supported".format(task)) + raise ValueError(f"task {task} is not supported") return parsed_dict diff --git a/src/transformers/models/gemma3n/convert_gemma3n_weights.py b/src/transformers/models/gemma3n/convert_gemma3n_weights.py index 867098a4667c..6b77bbf766c1 100644 --- a/src/transformers/models/gemma3n/convert_gemma3n_weights.py +++ b/src/transformers/models/gemma3n/convert_gemma3n_weights.py @@ -519,7 +519,7 @@ def convert_vision_weights( weights: np.ndarray, ) -> Iterable[tuple[str, np.ndarray]]: def generate_base_path(path: str, block_type: str) -> tuple[str, tuple[int, int]]: - re_str = r"{}(\d+)/".format(block_type) + re_str = rf"{block_type}(\d+)/" re_pattern = re.compile(re_str) match = re.search(re_pattern, path).group(1) idx = abs(int(match)) - 1 diff --git a/src/transformers/models/sam2/modeling_sam2.py b/src/transformers/models/sam2/modeling_sam2.py index c182a2e999f3..4bce174cc9e9 100644 --- a/src/transformers/models/sam2/modeling_sam2.py +++ b/src/transformers/models/sam2/modeling_sam2.py @@ -1487,9 +1487,7 @@ def forward( if input_points is not None and input_boxes is not None: if input_points.shape[1] != input_boxes.shape[1]: raise ValueError( - "You should provide as many bounding boxes as input points per box. Got {} and {}.".format( - input_points.shape[1], input_boxes.shape[1] - ) + f"You should provide as many bounding boxes as input points per box. Got {input_points.shape[1]} and {input_boxes.shape[1]}." ) image_positional_embeddings = self.get_image_wide_positional_embeddings() diff --git a/src/transformers/models/sam2/modular_sam2.py b/src/transformers/models/sam2/modular_sam2.py index adc373021258..3d856ad41188 100644 --- a/src/transformers/models/sam2/modular_sam2.py +++ b/src/transformers/models/sam2/modular_sam2.py @@ -1384,9 +1384,7 @@ def forward( if input_points is not None and input_boxes is not None: if input_points.shape[1] != input_boxes.shape[1]: raise ValueError( - "You should provide as many bounding boxes as input points per box. Got {} and {}.".format( - input_points.shape[1], input_boxes.shape[1] - ) + f"You should provide as many bounding boxes as input points per box. Got {input_points.shape[1]} and {input_boxes.shape[1]}." ) image_positional_embeddings = self.get_image_wide_positional_embeddings() diff --git a/src/transformers/models/sam2_video/modeling_sam2_video.py b/src/transformers/models/sam2_video/modeling_sam2_video.py index e48e21c82fcd..0a0a308c1fe9 100644 --- a/src/transformers/models/sam2_video/modeling_sam2_video.py +++ b/src/transformers/models/sam2_video/modeling_sam2_video.py @@ -21,8 +21,9 @@ import math from collections import OrderedDict +from collections.abc import Iterator from dataclasses import dataclass -from typing import Any, Callable, Iterator, Optional, Union +from typing import Any, Callable, Optional, Union import numpy as np import torch @@ -988,7 +989,7 @@ def __init__(self, config: Sam2VideoConfig): ) # pointwise/1x1 convs, implemented with linear layers self.pointwise_conv2 = nn.Linear(config.memory_fuser_intermediate_dim, config.memory_fuser_embed_dim) self.scale = nn.Parameter( - config.memory_fuser_layer_scale_init_value * torch.ones((config.memory_fuser_embed_dim)), + config.memory_fuser_layer_scale_init_value * torch.ones(config.memory_fuser_embed_dim), requires_grad=True, ) @@ -1923,9 +1924,7 @@ def _single_frame_forward( if input_points is not None and input_boxes is not None: if input_points.shape[1] != input_boxes.shape[1]: raise ValueError( - "You should provide as many bounding boxes as input points per box. Got {} and {}.".format( - input_points.shape[1], input_boxes.shape[1] - ) + f"You should provide as many bounding boxes as input points per box. Got {input_points.shape[1]} and {input_boxes.shape[1]}." ) elif input_points is not None: num_objects = input_points.shape[1] diff --git a/src/transformers/models/sam2_video/modular_sam2_video.py b/src/transformers/models/sam2_video/modular_sam2_video.py index 00494e6558b6..0b6cef4e910a 100644 --- a/src/transformers/models/sam2_video/modular_sam2_video.py +++ b/src/transformers/models/sam2_video/modular_sam2_video.py @@ -16,8 +16,9 @@ import math from collections import OrderedDict +from collections.abc import Iterator from dataclasses import dataclass -from typing import Any, Callable, Iterator, Optional, Union +from typing import Any, Callable, Optional, Union import numpy as np import torch @@ -1326,7 +1327,7 @@ def __init__(self, config: Sam2VideoConfig): ) # pointwise/1x1 convs, implemented with linear layers self.pointwise_conv2 = nn.Linear(config.memory_fuser_intermediate_dim, config.memory_fuser_embed_dim) self.scale = nn.Parameter( - config.memory_fuser_layer_scale_init_value * torch.ones((config.memory_fuser_embed_dim)), + config.memory_fuser_layer_scale_init_value * torch.ones(config.memory_fuser_embed_dim), requires_grad=True, ) @@ -1634,9 +1635,7 @@ def _single_frame_forward( if input_points is not None and input_boxes is not None: if input_points.shape[1] != input_boxes.shape[1]: raise ValueError( - "You should provide as many bounding boxes as input points per box. Got {} and {}.".format( - input_points.shape[1], input_boxes.shape[1] - ) + f"You should provide as many bounding boxes as input points per box. Got {input_points.shape[1]} and {input_boxes.shape[1]}." ) elif input_points is not None: num_objects = input_points.shape[1] diff --git a/src/transformers/pipelines/keypoint_matching.py b/src/transformers/pipelines/keypoint_matching.py index cb7f9d2e5eb9..11afd3d4326c 100644 --- a/src/transformers/pipelines/keypoint_matching.py +++ b/src/transformers/pipelines/keypoint_matching.py @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Sequence, TypedDict, Union +from collections.abc import Sequence +from typing import Any, TypedDict, Union from typing_extensions import TypeAlias, overload @@ -29,8 +30,16 @@ ImagePair: TypeAlias = Sequence[Union["Image.Image", str]] -Keypoint = TypedDict("Keypoint", {"x": float, "y": float}) -Match = TypedDict("Match", {"keypoint_image_0": Keypoint, "keypoint_image_1": Keypoint, "score": float}) + +class Keypoint(TypedDict): + x: float + y: float + + +class Match(TypedDict): + keypoint_image_0: Keypoint + keypoint_image_1: Keypoint + score: float def validate_image_pairs(images: Any) -> Sequence[Sequence[ImagePair]]: diff --git a/src/transformers/video_processing_utils.py b/src/transformers/video_processing_utils.py index 90aa5f43de9e..8b72069e4807 100644 --- a/src/transformers/video_processing_utils.py +++ b/src/transformers/video_processing_utils.py @@ -694,8 +694,8 @@ def get_video_processor_dict( _raise_exceptions_for_missing_entries=False, ) resolved_video_processor_file = resolved_video_processor_files[0] - except EnvironmentError: - # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to + except OSError: + # Raise any OS error raise by `cached_file`. It will have a helpful error message adapted to # the original exception. raise except Exception: