Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/pytorch/text-generation/run_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@
)
logger = logging.getLogger(__name__)

MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
MAX_LENGTH = 10000 # Hardcoded max length to avoid infinite loop

MODEL_CLASSES = {
"gpt2": (GPT2LMHeadModel, GPT2Tokenizer),
Expand Down
10 changes: 8 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,17 +22,23 @@ line-length = 119
# SIM300: Yoda condition detected
# SIM212: Checks for if expressions that check against a negated condition.
# SIM905: Consider using a list literal instead of `str.split`
ignore = ["C901", "E501", "E741", "F402", "F823", "SIM1", "SIM300", "SIM212", "SIM905"]
# UP009: UTF-8 encoding declaration is unnecessary
# UP015: Unnecessary mode argument
# UP031: Use format specifiers instead of percent format
# UP004: Class `XXX` inherits from `object`
# UP028: Checks for for loops that can be replaced with yield from expressions
ignore = ["C901", "E501", "E741", "F402", "F823", "SIM1", "SIM300", "SIM212", "SIM905", "UP009", "UP015", "UP031", "UP028", "UP004"]
# RUF013: Checks for the use of implicit Optional
# in type annotations when the default parameter value is None.
select = ["C", "E", "F", "I", "W", "RUF013", "UP006", "PERF102", "PLC1802", "PLC0208","SIM"]
select = ["C", "E", "F", "I", "W", "RUF013", "PERF102", "PLC1802", "PLC0208", "SIM", "UP"]
extend-safe-fixes = ["UP006"]

# Ignore import violations in all `__init__.py` files.
[tool.ruff.lint.per-file-ignores]
"__init__.py" = ["E402", "F401", "F403", "F811"]
"src/transformers/file_utils.py" = ["F401"]
"src/transformers/utils/dummy_*.py" = ["F401"]
"examples/legacy/**/*.py" = ["UP"]

[tool.ruff.lint.isort]
lines-after-imports = 2
Expand Down
3 changes: 2 additions & 1 deletion src/transformers/audio_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,9 @@
import io
import os
import warnings
from collections.abc import Sequence
from io import BytesIO
from typing import Any, Optional, Sequence, Union
from typing import Any, Optional, Union

import numpy as np
import requests
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/commands/serving.py
Original file line number Diff line number Diff line change
Expand Up @@ -699,7 +699,7 @@ def get_all_models():

uvicorn.run(app, host=self.args.host, port=self.args.port, log_level=self.args.log_level)

@functools.lru_cache(maxsize=None)
@functools.cache
def get_gen_models(self) -> list[dict[str, any]]:
"""
This is by no means a limit to which models may be instantiated with `transformers serve`: any chat-based
Expand Down
6 changes: 3 additions & 3 deletions src/transformers/modeling_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def __call__(self, *args, **kwargs):


@auto_docstring
class GenericForSequenceClassification(object):
class GenericForSequenceClassification:
base_model_prefix = "model"

def __init__(self, config):
Expand Down Expand Up @@ -170,7 +170,7 @@ def forward(


@auto_docstring
class GenericForQuestionAnswering(object):
class GenericForQuestionAnswering:
base_model_prefix = "model"

def __init__(self, config):
Expand Down Expand Up @@ -231,7 +231,7 @@ def forward(


@auto_docstring
class GenericForTokenClassification(object):
class GenericForTokenClassification:
base_model_prefix = "model"

def __init__(self, config):
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/florence2/modular_florence2.py
Original file line number Diff line number Diff line change
Expand Up @@ -994,7 +994,7 @@ def __call__(self, text=None, sequence=None, image_size=None, parse_tasks=None)
instances = self.parse_description_with_bboxes_from_text_and_spans(text, image_size=image_size)
parsed_dict["description_with_bboxes_or_polygons"] = instances
else:
raise ValueError("task {} is not supported".format(task))
raise ValueError(f"task {task} is not supported")

return parsed_dict

Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/florence2/processing_florence2.py
Original file line number Diff line number Diff line change
Expand Up @@ -795,7 +795,7 @@ def __call__(self, text=None, sequence=None, image_size=None, parse_tasks=None)
instances = self.parse_description_with_bboxes_from_text_and_spans(text, image_size=image_size)
parsed_dict["description_with_bboxes_or_polygons"] = instances
else:
raise ValueError("task {} is not supported".format(task))
raise ValueError(f"task {task} is not supported")

return parsed_dict

Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/gemma3n/convert_gemma3n_weights.py
Original file line number Diff line number Diff line change
Expand Up @@ -519,7 +519,7 @@ def convert_vision_weights(
weights: np.ndarray,
) -> Iterable[tuple[str, np.ndarray]]:
def generate_base_path(path: str, block_type: str) -> tuple[str, tuple[int, int]]:
re_str = r"{}(\d+)/".format(block_type)
re_str = rf"{block_type}(\d+)/"
re_pattern = re.compile(re_str)
match = re.search(re_pattern, path).group(1)
idx = abs(int(match)) - 1
Expand Down
4 changes: 1 addition & 3 deletions src/transformers/models/sam2/modeling_sam2.py
Original file line number Diff line number Diff line change
Expand Up @@ -1487,9 +1487,7 @@ def forward(
if input_points is not None and input_boxes is not None:
if input_points.shape[1] != input_boxes.shape[1]:
raise ValueError(
"You should provide as many bounding boxes as input points per box. Got {} and {}.".format(
input_points.shape[1], input_boxes.shape[1]
)
f"You should provide as many bounding boxes as input points per box. Got {input_points.shape[1]} and {input_boxes.shape[1]}."
)

image_positional_embeddings = self.get_image_wide_positional_embeddings()
Expand Down
4 changes: 1 addition & 3 deletions src/transformers/models/sam2/modular_sam2.py
Original file line number Diff line number Diff line change
Expand Up @@ -1384,9 +1384,7 @@ def forward(
if input_points is not None and input_boxes is not None:
if input_points.shape[1] != input_boxes.shape[1]:
raise ValueError(
"You should provide as many bounding boxes as input points per box. Got {} and {}.".format(
input_points.shape[1], input_boxes.shape[1]
)
f"You should provide as many bounding boxes as input points per box. Got {input_points.shape[1]} and {input_boxes.shape[1]}."
)

image_positional_embeddings = self.get_image_wide_positional_embeddings()
Expand Down
9 changes: 4 additions & 5 deletions src/transformers/models/sam2_video/modeling_sam2_video.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,9 @@

import math
from collections import OrderedDict
from collections.abc import Iterator
from dataclasses import dataclass
from typing import Any, Callable, Iterator, Optional, Union
from typing import Any, Callable, Optional, Union

import numpy as np
import torch
Expand Down Expand Up @@ -988,7 +989,7 @@ def __init__(self, config: Sam2VideoConfig):
) # pointwise/1x1 convs, implemented with linear layers
self.pointwise_conv2 = nn.Linear(config.memory_fuser_intermediate_dim, config.memory_fuser_embed_dim)
self.scale = nn.Parameter(
config.memory_fuser_layer_scale_init_value * torch.ones((config.memory_fuser_embed_dim)),
config.memory_fuser_layer_scale_init_value * torch.ones(config.memory_fuser_embed_dim),
requires_grad=True,
)

Expand Down Expand Up @@ -1923,9 +1924,7 @@ def _single_frame_forward(
if input_points is not None and input_boxes is not None:
if input_points.shape[1] != input_boxes.shape[1]:
raise ValueError(
"You should provide as many bounding boxes as input points per box. Got {} and {}.".format(
input_points.shape[1], input_boxes.shape[1]
)
f"You should provide as many bounding boxes as input points per box. Got {input_points.shape[1]} and {input_boxes.shape[1]}."
)
elif input_points is not None:
num_objects = input_points.shape[1]
Expand Down
9 changes: 4 additions & 5 deletions src/transformers/models/sam2_video/modular_sam2_video.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,9 @@

import math
from collections import OrderedDict
from collections.abc import Iterator
from dataclasses import dataclass
from typing import Any, Callable, Iterator, Optional, Union
from typing import Any, Callable, Optional, Union

import numpy as np
import torch
Expand Down Expand Up @@ -1326,7 +1327,7 @@ def __init__(self, config: Sam2VideoConfig):
) # pointwise/1x1 convs, implemented with linear layers
self.pointwise_conv2 = nn.Linear(config.memory_fuser_intermediate_dim, config.memory_fuser_embed_dim)
self.scale = nn.Parameter(
config.memory_fuser_layer_scale_init_value * torch.ones((config.memory_fuser_embed_dim)),
config.memory_fuser_layer_scale_init_value * torch.ones(config.memory_fuser_embed_dim),
requires_grad=True,
)

Expand Down Expand Up @@ -1634,9 +1635,7 @@ def _single_frame_forward(
if input_points is not None and input_boxes is not None:
if input_points.shape[1] != input_boxes.shape[1]:
raise ValueError(
"You should provide as many bounding boxes as input points per box. Got {} and {}.".format(
input_points.shape[1], input_boxes.shape[1]
)
f"You should provide as many bounding boxes as input points per box. Got {input_points.shape[1]} and {input_boxes.shape[1]}."
)
elif input_points is not None:
num_objects = input_points.shape[1]
Expand Down
15 changes: 12 additions & 3 deletions src/transformers/pipelines/keypoint_matching.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Any, Sequence, TypedDict, Union
from collections.abc import Sequence
from typing import Any, TypedDict, Union

from typing_extensions import TypeAlias, overload

Expand All @@ -29,8 +30,16 @@

ImagePair: TypeAlias = Sequence[Union["Image.Image", str]]

Keypoint = TypedDict("Keypoint", {"x": float, "y": float})
Match = TypedDict("Match", {"keypoint_image_0": Keypoint, "keypoint_image_1": Keypoint, "score": float})

class Keypoint(TypedDict):
x: float
y: float


class Match(TypedDict):
keypoint_image_0: Keypoint
keypoint_image_1: Keypoint
score: float


def validate_image_pairs(images: Any) -> Sequence[Sequence[ImagePair]]:
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/video_processing_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -694,8 +694,8 @@ def get_video_processor_dict(
_raise_exceptions_for_missing_entries=False,
)
resolved_video_processor_file = resolved_video_processor_files[0]
except EnvironmentError:
# Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to
except OSError:
# Raise any OS error raise by `cached_file`. It will have a helpful error message adapted to
# the original exception.
raise
except Exception:
Expand Down