From 26249b63037b1d4138900582007bba8035372aef Mon Sep 17 00:00:00 2001 From: Danipulok Date: Sun, 23 Nov 2025 00:05:29 +0200 Subject: [PATCH 1/7] Make `docs` hooks, `clai` and `pydantic-evals` Windows compatible (use `utf-8`) --- clai/update_readme.py | 2 +- docs/.hooks/main.py | 4 ++-- docs/.hooks/snippets.py | 2 +- pydantic_evals/pydantic_evals/dataset.py | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/clai/update_readme.py b/clai/update_readme.py index 859ef69f3f..8a9f3bf070 100644 --- a/clai/update_readme.py +++ b/clai/update_readme.py @@ -21,7 +21,7 @@ def test_cli_help(capfd: pytest.CaptureFixture[str]): this_dir = Path(__file__).parent readme = this_dir / 'README.md' - content = readme.read_text() + content = readme.read_text(encoding='utf-8') new_content, count = re.subn('^(## Help\n+```).+?```', rf'\1\n{help_output}\n```', content, flags=re.M | re.S) assert count, 'help section not found' diff --git a/docs/.hooks/main.py b/docs/.hooks/main.py index c4bad10f8c..5026db7679 100644 --- a/docs/.hooks/main.py +++ b/docs/.hooks/main.py @@ -43,7 +43,7 @@ def on_post_build(config: Config) -> None: """Inject extra CSS into mermaid styles to avoid titles being the same color as the background in dark mode.""" assert bundle_path is not None if bundle_path.exists(): - content = bundle_path.read_text() + content = bundle_path.read_text(encoding='utf-8') content, _ = re.subn(r'}(\.statediagram)', '}.statediagramTitleText{fill:#888}\1', content, count=1) bundle_path.write_text(content) @@ -88,7 +88,7 @@ def render_examples(markdown: str) -> str: def sub_example(m: re.Match[str]) -> str: example_path = EXAMPLES_DIR / m.group(1) - content = example_path.read_text().strip() + content = example_path.read_text(encoding='utf-8').strip() # remove leading docstring which duplicates what's in the docs page content = re.sub(r'^""".*?"""', '', content, count=1, flags=re.S).strip() diff --git a/docs/.hooks/snippets.py b/docs/.hooks/snippets.py index 5ffaa9e63a..4aa9e2ed30 100644 --- a/docs/.hooks/snippets.py +++ b/docs/.hooks/snippets.py @@ -149,7 +149,7 @@ def parse_snippet_directive(line: str) -> SnippetDirective | None: def parse_file_sections(file_path: Path) -> ParsedFile: """Parse a file and extract sections marked with ### [section] or /// [section]""" - input_lines = file_path.read_text().splitlines() + input_lines = file_path.read_text(encoding='utf-8').splitlines() output_lines: list[str] = [] lines_mapping: dict[int, int] = {} diff --git a/pydantic_evals/pydantic_evals/dataset.py b/pydantic_evals/pydantic_evals/dataset.py index 28f0f2a1a5..da087c6c55 100644 --- a/pydantic_evals/pydantic_evals/dataset.py +++ b/pydantic_evals/pydantic_evals/dataset.py @@ -511,7 +511,7 @@ def from_file( path = Path(path) fmt = cls._infer_fmt(path, fmt) - raw = Path(path).read_text() + raw = Path(path).read_text(encoding='utf-8') try: return cls.from_text(raw, fmt=fmt, custom_evaluator_types=custom_evaluator_types, default_name=path.stem) except ValidationError as e: # pragma: no cover @@ -767,7 +767,7 @@ def _save_schema( path = Path(path) json_schema = cls.model_json_schema_with_evaluators(custom_evaluator_types) schema_content = to_json(json_schema, indent=2).decode() + '\n' - if not path.exists() or path.read_text() != schema_content: # pragma: no branch + if not path.exists() or path.read_text(encoding='utf-8') != schema_content: # pragma: no branch path.write_text(schema_content) @classmethod From dc2cffa4d0ba052f8bb9ba9abd066e90735622c7 Mon Sep 17 00:00:00 2001 From: Danipulok Date: Sun, 23 Nov 2025 02:50:14 +0200 Subject: [PATCH 2/7] Add `utf-8` to missing spots --- clai/update_readme.py | 2 +- docs/.hooks/main.py | 2 +- pydantic_evals/pydantic_evals/dataset.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/clai/update_readme.py b/clai/update_readme.py index 8a9f3bf070..30d09f2b0b 100644 --- a/clai/update_readme.py +++ b/clai/update_readme.py @@ -26,5 +26,5 @@ def test_cli_help(capfd: pytest.CaptureFixture[str]): new_content, count = re.subn('^(## Help\n+```).+?```', rf'\1\n{help_output}\n```', content, flags=re.M | re.S) assert count, 'help section not found' if new_content != content: - readme.write_text(new_content) + readme.write_text(new_content, encoding='utf-8') pytest.fail('`clai --help` output changed.') diff --git a/docs/.hooks/main.py b/docs/.hooks/main.py index 5026db7679..df0e8f808c 100644 --- a/docs/.hooks/main.py +++ b/docs/.hooks/main.py @@ -45,7 +45,7 @@ def on_post_build(config: Config) -> None: if bundle_path.exists(): content = bundle_path.read_text(encoding='utf-8') content, _ = re.subn(r'}(\.statediagram)', '}.statediagramTitleText{fill:#888}\1', content, count=1) - bundle_path.write_text(content) + bundle_path.write_text(content, encoding='utf-8') def replace_uv_python_run(markdown: str) -> str: diff --git a/pydantic_evals/pydantic_evals/dataset.py b/pydantic_evals/pydantic_evals/dataset.py index da087c6c55..d1c087d15f 100644 --- a/pydantic_evals/pydantic_evals/dataset.py +++ b/pydantic_evals/pydantic_evals/dataset.py @@ -768,7 +768,7 @@ def _save_schema( json_schema = cls.model_json_schema_with_evaluators(custom_evaluator_types) schema_content = to_json(json_schema, indent=2).decode() + '\n' if not path.exists() or path.read_text(encoding='utf-8') != schema_content: # pragma: no branch - path.write_text(schema_content) + path.write_text(schema_content, encoding='utf-8') @classmethod @functools.cache From 89005bf121a4d6f38acfe71b80f9563fa08d9aa7 Mon Sep 17 00:00:00 2001 From: Danipulok Date: Tue, 25 Nov 2025 01:50:36 +0200 Subject: [PATCH 3/7] Add `utf-8` via `uv run ruff check --select PLW1514 --preview .` --- docs/.hooks/test_snippets.py | 456 +++++++++++++------------- tests/providers/test_google_vertex.py | 2 +- tests/test_fastmcp.py | 4 +- tests/test_live.py | 4 +- 4 files changed, 225 insertions(+), 241 deletions(-) diff --git a/docs/.hooks/test_snippets.py b/docs/.hooks/test_snippets.py index fbe26b621f..839ad11046 100644 --- a/docs/.hooks/test_snippets.py +++ b/docs/.hooks/test_snippets.py @@ -2,6 +2,7 @@ import os import tempfile +from contextlib import contextmanager from pathlib import Path import pytest @@ -19,6 +20,19 @@ ) +@contextmanager +def temp_text_file(content: str): + """Context manager for temporary text file with common params.""" + with tempfile.NamedTemporaryFile(mode='w', suffix='.py', encoding='utf-8', delete=False) as f: + f.write(content) + temp_name = f.name + + try: + yield Path(temp_name) + finally: + os.unlink(temp_name) + + def test_parse_snippet_directive_basic(): """Test basic parsing of snippet directives.""" line = '```snippet {path="test.py"}```' @@ -87,21 +101,16 @@ def test_parse_file_sections_basic(): ### [/section1] line 6""" - with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f: - f.write(content) - f.flush() + with temp_text_file(content) as temp_path: + result = parse_file_sections(temp_path) - try: - result = parse_file_sections(Path(f.name)) - assert result == snapshot( - ParsedFile( - lines=['line 1', 'content 1', 'content 2', 'line 6'], - sections={'section1': [LineRange(start_line=1, end_line=3)]}, - lines_mapping={0: 0, 1: 2, 2: 3, 3: 5}, - ) - ) - finally: - os.unlink(f.name) + assert result == snapshot( + ParsedFile( + lines=['line 1', 'content 1', 'content 2', 'line 6'], + sections={'section1': [LineRange(start_line=1, end_line=3)]}, + lines_mapping={0: 0, 1: 2, 2: 3, 3: 5}, + ) + ) def test_parse_file_sections_multiple_ranges(): @@ -116,27 +125,22 @@ def test_parse_file_sections_multiple_ranges(): ### [/section1] end line""" - with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f: - f.write(content) - f.flush() - - try: - result = parse_file_sections(Path(f.name)) - assert result == snapshot( - ParsedFile( - lines=[ - 'line 1', - 'content 1', - 'middle line', - 'content 2', - 'end line', - ], - sections={'section1': [LineRange(start_line=1, end_line=2), LineRange(start_line=3, end_line=4)]}, - lines_mapping={0: 0, 1: 2, 2: 4, 3: 6, 4: 8}, - ) - ) - finally: - os.unlink(f.name) + with temp_text_file(content) as temp_path: + result = parse_file_sections(temp_path) + + assert result == snapshot( + ParsedFile( + lines=[ + 'line 1', + 'content 1', + 'middle line', + 'content 2', + 'end line', + ], + sections={'section1': [LineRange(start_line=1, end_line=2), LineRange(start_line=3, end_line=4)]}, + lines_mapping={0: 0, 1: 2, 2: 4, 3: 6, 4: 8}, + ) + ) def test_parse_file_sections_comment_style(): @@ -147,21 +151,16 @@ def test_parse_file_sections_comment_style(): /// [/section1] line 5""" - with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f: - f.write(content) - f.flush() + with temp_text_file(content) as temp_path: + result = parse_file_sections(temp_path) - try: - result = parse_file_sections(Path(f.name)) - assert result == snapshot( - ParsedFile( - lines=['line 1', 'content 1', 'line 5'], - sections={'section1': [LineRange(start_line=1, end_line=2)]}, - lines_mapping={0: 0, 1: 2, 2: 4}, - ) - ) - finally: - os.unlink(f.name) + assert result == snapshot( + ParsedFile( + lines=['line 1', 'content 1', 'line 5'], + sections={'section1': [LineRange(start_line=1, end_line=2)]}, + lines_mapping={0: 0, 1: 2, 2: 4}, + ) + ) def test_parse_file_sections_nested(): @@ -176,30 +175,25 @@ def test_parse_file_sections_nested(): ### [/outer] end""" - with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f: - f.write(content) - f.flush() - - try: - result = parse_file_sections(Path(f.name)) - assert result == snapshot( - ParsedFile( - lines=[ - 'line 1', - 'outer content', - 'inner content', - 'more outer', - 'end', - ], - sections={ - 'inner': [LineRange(start_line=2, end_line=3)], - 'outer': [LineRange(start_line=1, end_line=4)], - }, - lines_mapping={0: 0, 1: 2, 2: 4, 3: 6, 4: 8}, - ) - ) - finally: - os.unlink(f.name) + with temp_text_file(content) as temp_path: + result = parse_file_sections(temp_path) + + assert result == snapshot( + ParsedFile( + lines=[ + 'line 1', + 'outer content', + 'inner content', + 'more outer', + 'end', + ], + sections={ + 'inner': [LineRange(start_line=2, end_line=3)], + 'outer': [LineRange(start_line=1, end_line=4)], + }, + lines_mapping={0: 0, 1: 2, 2: 4, 3: 6, 4: 8}, + ) + ) def test_extract_fragment_content_entire_file(): @@ -210,47 +204,44 @@ def test_extract_fragment_content_entire_file(): ### [/section1] line 5""" - with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f: - f.write(content) - f.flush() + with temp_text_file(content) as temp_path: + parsed = parse_file_sections(temp_path) - try: - parsed = parse_file_sections(Path(f.name)) - assert parsed.render([], []) == snapshot( - RenderedSnippet( - content="""\ + assert parsed.render([], []) == snapshot( + RenderedSnippet( + content="""\ line 1 content 1 line 5\ """, - highlights=[], - original_range=LineRange(start_line=0, end_line=5), - ) - ) - assert parsed.render(['section1'], []) == snapshot( - RenderedSnippet( - content="""\ + highlights=[], + original_range=LineRange(start_line=0, end_line=5), + ) + ) + + assert parsed.render(['section1'], []) == snapshot( + RenderedSnippet( + content="""\ content 1 ...\ """, - highlights=[], - original_range=LineRange(start_line=2, end_line=3), - ) - ) - assert parsed.render([], ['section1']) == snapshot( - RenderedSnippet( - content="""\ + highlights=[], + original_range=LineRange(start_line=2, end_line=3), + ) + ) + + assert parsed.render([], ['section1']) == snapshot( + RenderedSnippet( + content="""\ line 1 content 1 line 5\ """, - highlights=[LineRange(start_line=1, end_line=2)], - original_range=LineRange(start_line=0, end_line=5), - ) - ) - finally: - os.unlink(f.name) + highlights=[LineRange(start_line=1, end_line=2)], + original_range=LineRange(start_line=0, end_line=5), + ) + ) def test_extract_fragment_content_specific_section(): @@ -262,50 +253,47 @@ def test_extract_fragment_content_specific_section(): ### [/section1] line 6""" - with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f: - f.write(content) - f.flush() + with temp_text_file(content) as temp_path: + parsed = parse_file_sections(temp_path) - try: - parsed = parse_file_sections(Path(f.name)) - assert parsed.render([], []) == snapshot( - RenderedSnippet( - content="""\ + assert parsed.render([], []) == snapshot( + RenderedSnippet( + content="""\ line 1 content 1 content 2 line 6\ """, - highlights=[], - original_range=LineRange(start_line=0, end_line=6), - ) - ) - assert parsed.render(['section1'], []) == snapshot( - RenderedSnippet( - content="""\ + highlights=[], + original_range=LineRange(start_line=0, end_line=6), + ) + ) + + assert parsed.render(['section1'], []) == snapshot( + RenderedSnippet( + content="""\ content 1 content 2 ...\ """, - highlights=[], - original_range=LineRange(start_line=2, end_line=4), - ) - ) - assert parsed.render([], ['section1']) == snapshot( - RenderedSnippet( - content="""\ + highlights=[], + original_range=LineRange(start_line=2, end_line=4), + ) + ) + + assert parsed.render([], ['section1']) == snapshot( + RenderedSnippet( + content="""\ line 1 content 1 content 2 line 6\ """, - highlights=[LineRange(start_line=1, end_line=3)], - original_range=LineRange(start_line=0, end_line=6), - ) - ) - finally: - os.unlink(f.name) + highlights=[LineRange(start_line=1, end_line=3)], + original_range=LineRange(start_line=0, end_line=6), + ) + ) def test_extract_fragment_content_multiple_sections(): @@ -320,28 +308,26 @@ def test_extract_fragment_content_multiple_sections(): ### [/section2] end""" - with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f: - f.write(content) - f.flush() + with temp_text_file(content) as temp_path: + parsed = parse_file_sections(temp_path) - try: - parsed = parse_file_sections(Path(f.name)) - assert parsed.render([], []) == snapshot( - RenderedSnippet( - content="""\ + assert parsed.render([], []) == snapshot( + RenderedSnippet( + content="""\ line 1 content 1 middle content 2 end\ """, - highlights=[], - original_range=LineRange(start_line=0, end_line=9), - ) - ) - assert parsed.render(['section1', 'section2'], []) == snapshot( - RenderedSnippet( - content="""\ + highlights=[], + original_range=LineRange(start_line=0, end_line=9), + ) + ) + + assert parsed.render(['section1', 'section2'], []) == snapshot( + RenderedSnippet( + content="""\ content 1 ... @@ -350,13 +336,14 @@ def test_extract_fragment_content_multiple_sections(): ...\ """, - highlights=[], - original_range=LineRange(start_line=2, end_line=7), - ) - ) - assert parsed.render(['section1', 'section2'], ['section1']) == snapshot( - RenderedSnippet( - content="""\ + highlights=[], + original_range=LineRange(start_line=2, end_line=7), + ) + ) + + assert parsed.render(['section1', 'section2'], ['section1']) == snapshot( + RenderedSnippet( + content="""\ content 1 ... @@ -365,13 +352,14 @@ def test_extract_fragment_content_multiple_sections(): ...\ """, - highlights=[LineRange(start_line=0, end_line=1)], - original_range=LineRange(start_line=2, end_line=7), - ) - ) - assert parsed.render(['section1', 'section2'], ['section1', 'section2']) == snapshot( - RenderedSnippet( - content="""\ + highlights=[LineRange(start_line=0, end_line=1)], + original_range=LineRange(start_line=2, end_line=7), + ) + ) + + assert parsed.render(['section1', 'section2'], ['section1', 'section2']) == snapshot( + RenderedSnippet( + content="""\ content 1 ... @@ -380,36 +368,36 @@ def test_extract_fragment_content_multiple_sections(): ...\ """, - highlights=[LineRange(start_line=0, end_line=1), LineRange(start_line=2, end_line=3)], - original_range=LineRange(start_line=2, end_line=7), - ) - ) - assert parsed.render(['section1'], ['section2']) == snapshot( - RenderedSnippet( - content="""\ + highlights=[LineRange(start_line=0, end_line=1), LineRange(start_line=2, end_line=3)], + original_range=LineRange(start_line=2, end_line=7), + ) + ) + + assert parsed.render(['section1'], ['section2']) == snapshot( + RenderedSnippet( + content="""\ content 1 ...\ """, - highlights=[], - original_range=LineRange(start_line=2, end_line=3), - ) - ) - assert parsed.render([], ['section1', 'section2']) == snapshot( - RenderedSnippet( - content="""\ + highlights=[], + original_range=LineRange(start_line=2, end_line=3), + ) + ) + + assert parsed.render([], ['section1', 'section2']) == snapshot( + RenderedSnippet( + content="""\ line 1 content 1 middle content 2 end\ """, - highlights=[LineRange(start_line=1, end_line=2), LineRange(start_line=3, end_line=4)], - original_range=LineRange(start_line=0, end_line=9), - ) - ) - finally: - os.unlink(f.name) + highlights=[LineRange(start_line=1, end_line=2), LineRange(start_line=3, end_line=4)], + original_range=LineRange(start_line=0, end_line=9), + ) + ) def test_complicated_example(): @@ -428,15 +416,12 @@ def test_complicated_example(): ### [/highlight2] """ - with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f: - f.write(content) - f.flush() + with temp_text_file(content) as temp_path: + parsed = parse_file_sections(temp_path) - try: - parsed = parse_file_sections(Path(f.name)) - assert parsed.render([], []) == snapshot( - RenderedSnippet( - content="""\ + assert parsed.render([], []) == snapshot( + RenderedSnippet( + content="""\ line 1 line 2 line 3 @@ -444,39 +429,42 @@ def test_complicated_example(): line 5 line 6\ """, - highlights=[], - original_range=LineRange(start_line=0, end_line=11), - ) - ) - assert parsed.render(['fragment1'], ['highlight1']) == snapshot( - RenderedSnippet( - content="""\ + highlights=[], + original_range=LineRange(start_line=0, end_line=11), + ) + ) + + assert parsed.render(['fragment1'], ['highlight1']) == snapshot( + RenderedSnippet( + content="""\ line 2 line 3 line 4 ...\ """, - highlights=[LineRange(start_line=2, end_line=3)], - original_range=LineRange(start_line=2, end_line=7), - ) - ) - assert parsed.render(['fragment1'], ['highlight2']) == snapshot( - RenderedSnippet( - content="""\ + highlights=[LineRange(start_line=2, end_line=3)], + original_range=LineRange(start_line=2, end_line=7), + ) + ) + + assert parsed.render(['fragment1'], ['highlight2']) == snapshot( + RenderedSnippet( + content="""\ line 2 line 3 line 4 ...\ """, - highlights=[LineRange(start_line=2, end_line=5)], - original_range=LineRange(start_line=2, end_line=7), - ) - ) - assert parsed.render(['fragment2'], ['highlight2']) == snapshot( - RenderedSnippet( - content="""\ + highlights=[LineRange(start_line=2, end_line=5)], + original_range=LineRange(start_line=2, end_line=7), + ) + ) + + assert parsed.render(['fragment2'], ['highlight2']) == snapshot( + RenderedSnippet( + content="""\ ... line 3 @@ -485,13 +473,14 @@ def test_complicated_example(): ...\ """, - highlights=[LineRange(start_line=2, end_line=5)], - original_range=LineRange(start_line=4, end_line=9), - ) - ) - assert parsed.render(['fragment1', 'fragment2'], []) == snapshot( - RenderedSnippet( - content="""\ + highlights=[LineRange(start_line=2, end_line=5)], + original_range=LineRange(start_line=4, end_line=9), + ) + ) + + assert parsed.render(['fragment1', 'fragment2'], []) == snapshot( + RenderedSnippet( + content="""\ line 2 line 3 line 4 @@ -499,12 +488,10 @@ def test_complicated_example(): ...\ """, - highlights=[], - original_range=LineRange(start_line=2, end_line=9), - ) - ) - finally: - os.unlink(f.name) + highlights=[], + original_range=LineRange(start_line=2, end_line=9), + ) + ) def test_format_highlight_lines_empty(): @@ -534,25 +521,17 @@ def test_inject_snippets_basic(): content = """def hello(): return "world" """ - with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f: - f.write(content) - f.flush() - - try: - # Create a temporary docs directory structure - with tempfile.TemporaryDirectory() as temp_dir: - docs_dir = Path(temp_dir) + with tempfile.TemporaryDirectory() as temp_dir: + docs_dir = Path(temp_dir) - # Mock the docs directory resolution by copying file - target_file = docs_dir / 'test.py' - target_file.write_text(content) + # Mock the docs directory resolution by copying file + target_file = docs_dir / 'test.py' + target_file.write_text(content, encoding='utf-8') - markdown = '```snippet {path="test.py"}' - result = inject_snippets(markdown, docs_dir) - assert result == snapshot('```snippet {path="test.py"}') + markdown = '```snippet {path="test.py"}' + result = inject_snippets(markdown, docs_dir) - finally: - os.unlink(f.name) + assert result == snapshot('```snippet {path="test.py"}') def test_inject_snippets_with_title(): @@ -567,6 +546,7 @@ def test_inject_snippets_with_title(): markdown = '```snippet {path="test.py" title="Custom Title"}' result = inject_snippets(markdown, docs_dir) + assert result == snapshot('```snippet {path="test.py" title="Custom Title"}') @@ -586,6 +566,7 @@ def test_inject_snippets_with_fragments(): markdown = '```snippet {path="test.py" fragment="important"}' result = inject_snippets(markdown, docs_dir) + assert result == snapshot('```snippet {path="test.py" fragment="important"}') @@ -610,6 +591,7 @@ def other(): markdown = '```snippet {path="test.py" highlight="important"}' result = inject_snippets(markdown, docs_dir) + assert result == snapshot('```snippet {path="test.py" highlight="important"}') @@ -639,6 +621,7 @@ def test_inject_snippets_multiple(): Final text""" result = inject_snippets(markdown, docs_dir) + assert result == snapshot( """\ Some text @@ -662,4 +645,5 @@ def test_inject_snippets_extra_attrs(): markdown = '```snippet {path="test.py" custom="value" another="attr"}' result = inject_snippets(markdown, docs_dir) + assert result == snapshot('```snippet {path="test.py" custom="value" another="attr"}') diff --git a/tests/providers/test_google_vertex.py b/tests/providers/test_google_vertex.py index 5d1b53019e..243a08f79c 100644 --- a/tests/providers/test_google_vertex.py +++ b/tests/providers/test_google_vertex.py @@ -143,7 +143,7 @@ def prepare_service_account_contents(project_id: str) -> dict[str, str]: def save_service_account(service_account_path: Path, project_id: str) -> None: service_account = prepare_service_account_contents(project_id) - service_account_path.write_text(json.dumps(service_account, indent=2)) + service_account_path.write_text(json.dumps(service_account, indent=2), encoding='utf-8') @pytest.fixture(autouse=True) diff --git a/tests/test_fastmcp.py b/tests/test_fastmcp.py index 1d2815cd6b..d8d49f4be4 100644 --- a/tests/test_fastmcp.py +++ b/tests/test_fastmcp.py @@ -544,14 +544,14 @@ async def test_transports(self): with TemporaryDirectory() as temp_dir: server_py: Path = Path(temp_dir) / 'server.py' - server_py.write_text(data='') + server_py.write_text(data='', encoding='utf-8') toolset = FastMCPToolset(server_py) assert isinstance(toolset.client.transport, PythonStdioTransport) toolset = FastMCPToolset(str(server_py)) assert isinstance(toolset.client.transport, PythonStdioTransport) server_js: Path = Path(temp_dir) / 'server.js' - server_js.write_text(data='') + server_js.write_text(data='', encoding='utf-8') toolset = FastMCPToolset(server_js) assert isinstance(toolset.client.transport, NodeStdioTransport) toolset = FastMCPToolset(str(server_js)) diff --git a/tests/test_live.py b/tests/test_live.py index d5a90c7b28..3ec6314d15 100644 --- a/tests/test_live.py +++ b/tests/test_live.py @@ -41,11 +41,11 @@ def vertexai(http_client: httpx.AsyncClient, tmp_path: Path) -> Model: from pydantic_ai.providers.google import GoogleProvider if service_account_path := os.getenv('GOOGLE_APPLICATION_CREDENTIALS'): - project_id = json.loads(Path(service_account_path).read_text())['project_id'] + project_id = json.loads(Path(service_account_path).read_text(encoding='utf-8'))['project_id'] elif service_account_content := os.getenv('GOOGLE_SERVICE_ACCOUNT_CONTENT'): project_id = json.loads(service_account_content)['project_id'] service_account_path = tmp_path / 'service_account.json' - service_account_path.write_text(service_account_content) + service_account_path.write_text(service_account_content, encoding='utf-8') else: pytest.skip( 'VertexAI live test requires GOOGLE_APPLICATION_CREDENTIALS or GOOGLE_SERVICE_ACCOUNT_CONTENT to be set' From c98bff068652a3a33f9ff9b9edd746c76d74a8ee Mon Sep 17 00:00:00 2001 From: Danipulok Date: Tue, 25 Nov 2025 02:02:16 +0200 Subject: [PATCH 4/7] Add `utf-8` to missing methods --- docs/.hooks/test_snippets.py | 12 ++--- docs/evals/how-to/dataset-management.md | 4 +- docs/evals/how-to/dataset-serialization.md | 2 +- docs/mcp/client.md | 4 +- pydantic_evals/pydantic_evals/dataset.py | 4 +- tests/conftest.py | 4 +- tests/evals/test_dataset.py | 6 +-- tests/mcp_server.py | 2 +- tests/test_examples.py | 2 +- tests/test_fastmcp.py | 2 +- tests/test_mcp.py | 52 +++++++++++++++------- 11 files changed, 57 insertions(+), 37 deletions(-) diff --git a/docs/.hooks/test_snippets.py b/docs/.hooks/test_snippets.py index 839ad11046..9551ed02fb 100644 --- a/docs/.hooks/test_snippets.py +++ b/docs/.hooks/test_snippets.py @@ -541,7 +541,7 @@ def test_inject_snippets_with_title(): with tempfile.TemporaryDirectory() as temp_dir: docs_dir = Path(temp_dir) target_file = docs_dir / 'test.py' - target_file.write_text(content) + target_file.write_text(content, encoding='utf-8') markdown = '```snippet {path="test.py" title="Custom Title"}' @@ -561,7 +561,7 @@ def test_inject_snippets_with_fragments(): with tempfile.TemporaryDirectory() as temp_dir: docs_dir = Path(temp_dir) target_file = docs_dir / 'test.py' - target_file.write_text(content) + target_file.write_text(content, encoding='utf-8') markdown = '```snippet {path="test.py" fragment="important"}' @@ -586,7 +586,7 @@ def other(): with tempfile.TemporaryDirectory() as temp_dir: docs_dir = Path(temp_dir) target_file = docs_dir / 'test.py' - target_file.write_text(content) + target_file.write_text(content, encoding='utf-8') markdown = '```snippet {path="test.py" highlight="important"}' @@ -611,8 +611,8 @@ def test_inject_snippets_multiple(): docs_dir = Path(temp_dir) file1 = docs_dir / 'test1.py' file2 = docs_dir / 'test2.py' - file1.write_text(content1) - file2.write_text(content2) + file1.write_text(content1, encoding='utf-8') + file2.write_text(content2, encoding='utf-8') markdown = """Some text ```snippet {path="test1.py"} @@ -640,7 +640,7 @@ def test_inject_snippets_extra_attrs(): with tempfile.TemporaryDirectory() as temp_dir: docs_dir = Path(temp_dir) target_file = docs_dir / 'test.py' - target_file.write_text(content) + target_file.write_text(content, encoding='utf-8') markdown = '```snippet {path="test.py" custom="value" another="attr"}' diff --git a/docs/evals/how-to/dataset-management.md b/docs/evals/how-to/dataset-management.md index 0a97d15283..9fe657b63d 100644 --- a/docs/evals/how-to/dataset-management.md +++ b/docs/evals/how-to/dataset-management.md @@ -270,7 +270,7 @@ async def main(): ) output_file = Path('questions_cases.yaml') dataset.to_file(output_file) # (5)! - print(output_file.read_text()) + print(output_file.read_text(encoding='utf-8')) """ # yaml-language-server: $schema=questions_cases_schema.json name: null @@ -333,7 +333,7 @@ async def main(): ) output_file = Path('questions_cases.json') dataset.to_file(output_file) # (2)! - print(output_file.read_text()) + print(output_file.read_text(encoding='utf-8')) """ { "$schema": "questions_cases_schema.json", diff --git a/docs/evals/how-to/dataset-serialization.md b/docs/evals/how-to/dataset-serialization.md index 381a5b51f3..cb59a2c133 100644 --- a/docs/evals/how-to/dataset-serialization.md +++ b/docs/evals/how-to/dataset-serialization.md @@ -255,7 +255,7 @@ from pydantic_evals import Dataset schema = Dataset[str, str, Any].model_json_schema_with_evaluators() # Save manually -with open('custom_schema.json', 'w') as f: +with open('custom_schema.json', 'w', encoding='utf-8') as f: json.dump(schema, f, indent=2) ``` diff --git a/docs/mcp/client.md b/docs/mcp/client.md index ddde50564a..815af8ffce 100644 --- a/docs/mcp/client.md +++ b/docs/mcp/client.md @@ -539,9 +539,9 @@ Let's say we have an MCP server that wants to use sampling (in this case to gene path = Path(f'{subject}_{style}.svg') # remove triple backticks if the svg was returned within markdown if m := re.search(r'^```\w*$(.+?)```$', result.content.text, re.S | re.M): - path.write_text(m.group(1)) + path.write_text(m.group(1), encoding='utf-8') else: - path.write_text(result.content.text) + path.write_text(result.content.text, encoding='utf-8') return f'See {path}' diff --git a/pydantic_evals/pydantic_evals/dataset.py b/pydantic_evals/pydantic_evals/dataset.py index d1c087d15f..d8d4c187a2 100644 --- a/pydantic_evals/pydantic_evals/dataset.py +++ b/pydantic_evals/pydantic_evals/dataset.py @@ -671,11 +671,11 @@ def to_file( if schema_ref: # pragma: no branch yaml_language_server_line = f'{_YAML_SCHEMA_LINE_PREFIX}{schema_ref}' content = f'{yaml_language_server_line}\n{content}' - path.write_text(content) + path.write_text(content, encoding='utf-8') else: context['$schema'] = schema_ref json_data = self.model_dump_json(indent=2, by_alias=True, context=context) - path.write_text(json_data + '\n') + path.write_text(json_data + '\n', encoding='utf-8') @classmethod def model_json_schema_with_evaluators( diff --git a/tests/conftest.py b/tests/conftest.py index 6b90ecfb28..32b4a475cc 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -198,7 +198,7 @@ def run( sanitized_name = re.sub('[' + re.escape('<>:"/\\|?*') + ']', '-', request.node.name)[:max_name_len] module_name = f'{sanitized_name}_{secrets.token_hex(5)}' path = tmp_path / f'{module_name}.py' - path.write_text(source_code) + path.write_text(source_code, encoding='utf-8') filename = str(path) if module_name_prefix: # pragma: no cover @@ -352,7 +352,7 @@ def document_content(assets_path: Path) -> BinaryContent: @pytest.fixture(scope='session') def text_document_content(assets_path: Path) -> BinaryContent: - content = assets_path.joinpath('dummy.txt').read_text() + content = assets_path.joinpath('dummy.txt').read_text(encoding='utf-8') bin_content = BinaryContent(data=content.encode(), media_type='text/plain') return bin_content diff --git a/tests/evals/test_dataset.py b/tests/evals/test_dataset.py index ca5f90d44a..34a7dbecd0 100644 --- a/tests/evals/test_dataset.py +++ b/tests/evals/test_dataset.py @@ -836,9 +836,9 @@ async def test_deserializing_without_name( example_dataset.to_file(yaml_path) # Rewrite the file _without_ a name to test deserializing a name-less file - obj = yaml.safe_load(yaml_path.read_text()) + obj = yaml.safe_load(yaml_path.read_text(encoding='utf-8')) obj.pop('name', None) - yaml_path.write_text(yaml.dump(obj)) + yaml_path.write_text(yaml.dump(obj), encoding='utf-8') # Test loading results in the name coming from the filename stem loaded_dataset = Dataset[TaskInput, TaskOutput, TaskMetadata].from_file(yaml_path) @@ -858,7 +858,7 @@ async def test_serialization_to_json(example_dataset: Dataset[TaskInput, TaskOut assert loaded_dataset.cases[0].name == 'case1' assert loaded_dataset.cases[0].inputs.query == 'What is 2+2?' - raw = json.loads(json_path.read_text()) + raw = json.loads(json_path.read_text(encoding='utf-8')) schema = raw['$schema'] assert isinstance(schema, str) assert (tmp_path / schema).exists() diff --git a/tests/mcp_server.py b/tests/mcp_server.py index 4763fd86ca..8ba9b9997f 100644 --- a/tests/mcp_server.py +++ b/tests/mcp_server.py @@ -127,7 +127,7 @@ async def get_product_name_link() -> ResourceLink: annotations=Annotations(audience=['user', 'assistant'], priority=0.5), ) async def product_name_resource() -> str: - return Path(__file__).parent.joinpath('assets/product_name.txt').read_text() + return Path(__file__).parent.joinpath('assets/product_name.txt').read_text(encoding='utf-8') @mcp.resource('resource://greeting/{name}', mime_type='text/plain') diff --git a/tests/test_examples.py b/tests/test_examples.py index 407816b60a..efa799dfa1 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -205,7 +205,7 @@ def print(self, *args: Any, **kwargs: Any) -> None: for req in requires.split(','): known_local_folder.append(Path(req).stem) if ex := code_examples.get(req): - (tmp_path_cwd / req).write_text(ex.source) + (tmp_path_cwd / req).write_text(ex.source, encoding='utf-8') else: # pragma: no cover raise KeyError(f'Example {req} not found, check the `requires` header of this example.') diff --git a/tests/test_fastmcp.py b/tests/test_fastmcp.py index d8d49f4be4..1e504692b6 100644 --- a/tests/test_fastmcp.py +++ b/tests/test_fastmcp.py @@ -521,7 +521,7 @@ async def test_tool(param1: str, param2: int = 0) -> str: server.run()""" with TemporaryDirectory() as temp_dir: server_py = Path(temp_dir) / 'server.py' - server_py.write_text(server_script) + server_py.write_text(server_script, encoding='utf-8') toolset = FastMCPToolset(server_py) assert isinstance(toolset, FastMCPToolset) diff --git a/tests/test_mcp.py b/tests/test_mcp.py index 9a1f4a6317..630989f82d 100644 --- a/tests/test_mcp.py +++ b/tests/test_mcp.py @@ -1686,17 +1686,19 @@ async def test_list_resource_templates_error(mcp_server: MCPServerStdio) -> None def test_load_mcp_servers(tmp_path: Path): config = tmp_path / 'mcp.json' - config.write_text('{"mcpServers": {"potato": {"url": "https://example.com/mcp"}}}') + config.write_text('{"mcpServers": {"potato": {"url": "https://example.com/mcp"}}}', encoding='utf-8') server = load_mcp_servers(config)[0] assert server == MCPServerStreamableHTTP(url='https://example.com/mcp', id='potato', tool_prefix='potato') - config.write_text('{"mcpServers": {"potato": {"command": "python", "args": ["-m", "tests.mcp_server"]}}}') + config.write_text( + '{"mcpServers": {"potato": {"command": "python", "args": ["-m", "tests.mcp_server"]}}}', encoding='utf-8' + ) server = load_mcp_servers(config)[0] assert server == MCPServerStdio( command='python', args=['-m', 'tests.mcp_server'], id='potato', tool_prefix='potato' ) - config.write_text('{"mcpServers": {"potato": {"url": "https://example.com/sse"}}}') + config.write_text('{"mcpServers": {"potato": {"url": "https://example.com/sse"}}}', encoding='utf-8') server = load_mcp_servers(config)[0] assert server == MCPServerSSE(url='https://example.com/sse', id='potato', tool_prefix='potato') @@ -1711,7 +1713,9 @@ def test_load_mcp_servers_with_env_vars(tmp_path: Path, monkeypatch: pytest.Monk # Test with environment variables in command monkeypatch.setenv('PYTHON_CMD', 'python3') monkeypatch.setenv('MCP_MODULE', 'tests.mcp_server') - config.write_text('{"mcpServers": {"my_server": {"command": "${PYTHON_CMD}", "args": ["-m", "${MCP_MODULE}"]}}}') + config.write_text( + '{"mcpServers": {"my_server": {"command": "${PYTHON_CMD}", "args": ["-m", "${MCP_MODULE}"]}}}', encoding='utf-8' + ) servers = load_mcp_servers(config) @@ -1732,7 +1736,8 @@ def test_load_mcp_servers_env_var_in_env_dict(tmp_path: Path, monkeypatch: pytes monkeypatch.setenv('API_KEY', 'secret123') config.write_text( '{"mcpServers": {"my_server": {"command": "python", "args": ["-m", "tests.mcp_server"], ' - '"env": {"API_KEY": "${API_KEY}"}}}}' + '"env": {"API_KEY": "${API_KEY}"}}}}', + encoding='utf-8', ) servers = load_mcp_servers(config) @@ -1750,7 +1755,9 @@ def test_load_mcp_servers_env_var_expansion_url(tmp_path: Path, monkeypatch: pyt # Test with environment variables in URL monkeypatch.setenv('SERVER_HOST', 'example.com') monkeypatch.setenv('SERVER_PORT', '8080') - config.write_text('{"mcpServers": {"web_server": {"url": "https://${SERVER_HOST}:${SERVER_PORT}/mcp"}}}') + config.write_text( + '{"mcpServers": {"web_server": {"url": "https://${SERVER_HOST}:${SERVER_PORT}/mcp"}}}', encoding='utf-8' + ) servers = load_mcp_servers(config) @@ -1767,7 +1774,7 @@ def test_load_mcp_servers_undefined_env_var(tmp_path: Path, monkeypatch: pytest. # Make sure the environment variable is not set monkeypatch.delenv('UNDEFINED_VAR', raising=False) - config.write_text('{"mcpServers": {"my_server": {"command": "${UNDEFINED_VAR}", "args": []}}}') + config.write_text('{"mcpServers": {"my_server": {"command": "${UNDEFINED_VAR}", "args": []}}}', encoding='utf-8') with pytest.raises(ValueError, match='Environment variable \\$\\{UNDEFINED_VAR\\} is not defined'): load_mcp_servers(config) @@ -1779,7 +1786,7 @@ def test_load_mcp_servers_partial_env_vars(tmp_path: Path, monkeypatch: pytest.M monkeypatch.setenv('HOST', 'example.com') monkeypatch.setenv('PATH_SUFFIX', 'mcp') - config.write_text('{"mcpServers": {"server": {"url": "https://${HOST}/api/${PATH_SUFFIX}"}}}') + config.write_text('{"mcpServers": {"server": {"url": "https://${HOST}/api/${PATH_SUFFIX}"}}}', encoding='utf-8') servers = load_mcp_servers(config) @@ -1798,7 +1805,8 @@ def test_load_mcp_servers_with_non_string_values(tmp_path: Path, monkeypatch: py monkeypatch.setenv('PYTHON_CMD', 'python') config.write_text( '{"mcpServers": {"my_server": {"command": "${PYTHON_CMD}", "args": ["-m", "tests.mcp_server"], ' - '"metadata": {"count": 42, "enabled": true, "value": null}}}}' + '"metadata": {"count": 42, "enabled": true, "value": null}}}}', + encoding='utf-8', ) # This should successfully expand env vars and ignore the metadata field @@ -1816,7 +1824,9 @@ def test_load_mcp_servers_with_default_values(tmp_path: Path, monkeypatch: pytes # Test with undefined variable using default monkeypatch.delenv('UNDEFINED_VAR', raising=False) - config.write_text('{"mcpServers": {"server": {"command": "${UNDEFINED_VAR:-python3}", "args": []}}}') + config.write_text( + '{"mcpServers": {"server": {"command": "${UNDEFINED_VAR:-python3}", "args": []}}}', encoding='utf-8' + ) servers = load_mcp_servers(config) assert len(servers) == 1 @@ -1826,7 +1836,9 @@ def test_load_mcp_servers_with_default_values(tmp_path: Path, monkeypatch: pytes # Test with defined variable (should use actual value, not default) monkeypatch.setenv('DEFINED_VAR', 'actual_value') - config.write_text('{"mcpServers": {"server": {"command": "${DEFINED_VAR:-default_value}", "args": []}}}') + config.write_text( + '{"mcpServers": {"server": {"command": "${DEFINED_VAR:-default_value}", "args": []}}}', encoding='utf-8' + ) servers = load_mcp_servers(config) assert len(servers) == 1 @@ -1836,7 +1848,7 @@ def test_load_mcp_servers_with_default_values(tmp_path: Path, monkeypatch: pytes # Test with empty string as default monkeypatch.delenv('UNDEFINED_VAR', raising=False) - config.write_text('{"mcpServers": {"server": {"command": "${UNDEFINED_VAR:-}", "args": []}}}') + config.write_text('{"mcpServers": {"server": {"command": "${UNDEFINED_VAR:-}", "args": []}}}', encoding='utf-8') servers = load_mcp_servers(config) assert len(servers) == 1 @@ -1852,7 +1864,10 @@ def test_load_mcp_servers_with_default_values_in_url(tmp_path: Path, monkeypatch # Test with default values in URL monkeypatch.delenv('HOST', raising=False) monkeypatch.setenv('PROTOCOL', 'https') - config.write_text('{"mcpServers": {"server": {"url": "${PROTOCOL:-http}://${HOST:-localhost}:${PORT:-8080}/mcp"}}}') + config.write_text( + '{"mcpServers": {"server": {"url": "${PROTOCOL:-http}://${HOST:-localhost}:${PORT:-8080}/mcp"}}}', + encoding='utf-8', + ) servers = load_mcp_servers(config) assert len(servers) == 1 @@ -1869,7 +1884,8 @@ def test_load_mcp_servers_with_default_values_in_env_dict(tmp_path: Path, monkey monkeypatch.setenv('CUSTOM_VAR', 'custom_value') config.write_text( '{"mcpServers": {"server": {"command": "python", "args": [], ' - '"env": {"API_KEY": "${API_KEY:-default_key}", "CUSTOM": "${CUSTOM_VAR:-fallback}"}}}}' + '"env": {"API_KEY": "${API_KEY:-default_key}", "CUSTOM": "${CUSTOM_VAR:-fallback}"}}}}', + encoding='utf-8', ) servers = load_mcp_servers(config) @@ -1885,7 +1901,10 @@ def test_load_mcp_servers_with_complex_default_values(tmp_path: Path, monkeypatc monkeypatch.delenv('PATH_VAR', raising=False) # Test default with slashes, dots, and dashes - config.write_text('{"mcpServers": {"server": {"command": "${PATH_VAR:-/usr/local/bin/python-3.10}", "args": []}}}') + config.write_text( + '{"mcpServers": {"server": {"command": "${PATH_VAR:-/usr/local/bin/python-3.10}", "args": []}}}', + encoding='utf-8', + ) servers = load_mcp_servers(config) assert len(servers) == 1 @@ -1901,7 +1920,8 @@ def test_load_mcp_servers_with_mixed_syntax(tmp_path: Path, monkeypatch: pytest. monkeypatch.setenv('REQUIRED_VAR', 'required_value') monkeypatch.delenv('OPTIONAL_VAR', raising=False) config.write_text( - '{"mcpServers": {"server": {"command": "${REQUIRED_VAR}", "args": ["${OPTIONAL_VAR:-default_arg}"]}}}' + '{"mcpServers": {"server": {"command": "${REQUIRED_VAR}", "args": ["${OPTIONAL_VAR:-default_arg}"]}}}', + encoding='utf-8', ) servers = load_mcp_servers(config) From 435702175c2d9a21e95c625d72b8ecf3f3adbea3 Mon Sep 17 00:00:00 2001 From: Danipulok Date: Tue, 25 Nov 2025 02:17:02 +0200 Subject: [PATCH 5/7] Add `PLW1514` to ruff config --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 1b28c22490..b0db3f756e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -134,6 +134,7 @@ include = ["/README.md", "/Makefile", "/tests"] [tool.ruff] line-length = 120 target-version = "py310" +preview = true # For `PLW1514` include = [ "pydantic_ai_slim/**/*.py", "pydantic_evals/**/*.py", @@ -155,6 +156,7 @@ extend-select = [ "I", "D", "TID251", + "PLW1514", # https://docs.astral.sh/ruff/rules/unspecified-encoding/ ] flake8-quotes = { inline-quotes = "single", multiline-quotes = "double" } mccabe = { max-complexity = 15 } @@ -181,6 +183,7 @@ convention = "google" [tool.ruff.format] # don't format python in docstrings, pytest-examples takes care of it docstring-code-format = false +preview = false # Override [tool.ruff] preview setting quote-style = "single" [tool.ruff.lint.per-file-ignores] From 730f64975cd977b92fe965e3d807445aba889d98 Mon Sep 17 00:00:00 2001 From: Danipulok Date: Tue, 25 Nov 2025 12:04:03 +0200 Subject: [PATCH 6/7] Have `preview = true` only in `[tool.ruff.lint]` --- pyproject.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index b0db3f756e..a53439fa0f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -134,7 +134,6 @@ include = ["/README.md", "/Makefile", "/tests"] [tool.ruff] line-length = 120 target-version = "py310" -preview = true # For `PLW1514` include = [ "pydantic_ai_slim/**/*.py", "pydantic_evals/**/*.py", @@ -147,6 +146,7 @@ include = [ ] [tool.ruff.lint] +preview = true # For `PLW1514` extend-select = [ "Q", "RUF100", @@ -183,7 +183,6 @@ convention = "google" [tool.ruff.format] # don't format python in docstrings, pytest-examples takes care of it docstring-code-format = false -preview = false # Override [tool.ruff] preview setting quote-style = "single" [tool.ruff.lint.per-file-ignores] From 79835c0aa1c17af55a35cf7d7648d0cab8c634ff Mon Sep 17 00:00:00 2001 From: Danipulok Date: Tue, 25 Nov 2025 12:24:06 +0200 Subject: [PATCH 7/7] Change `# noqa XXX` to `# noqa: XXX` format --- docs/.hooks/snippets.py | 2 +- pydantic_ai_slim/pydantic_ai/agent/abstract.py | 2 +- .../pydantic_ai/durable_exec/temporal/__init__.py | 4 ++-- pydantic_ai_slim/pydantic_ai/profiles/openai.py | 2 +- pydantic_evals/pydantic_evals/dataset.py | 4 ++-- pydantic_evals/pydantic_evals/otel/span_tree.py | 2 +- pydantic_graph/pydantic_graph/beta/graph.py | 2 +- pydantic_graph/pydantic_graph/beta/graph_builder.py | 6 +++--- pydantic_graph/pydantic_graph/beta/mermaid.py | 2 +- tests/test_examples.py | 2 +- 10 files changed, 14 insertions(+), 14 deletions(-) diff --git a/docs/.hooks/snippets.py b/docs/.hooks/snippets.py index 4aa9e2ed30..a098abb97d 100644 --- a/docs/.hooks/snippets.py +++ b/docs/.hooks/snippets.py @@ -220,7 +220,7 @@ def format_highlight_lines(highlight_ranges: list[LineRange]) -> str: return ' '.join(parts) -def inject_snippets(markdown: str, relative_path_root: Path) -> str: # noqa C901 +def inject_snippets(markdown: str, relative_path_root: Path) -> str: # noqa: C901 def replace_snippet(match: re.Match[str]) -> str: line = match.group(0) directive = parse_snippet_directive(line) diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index c7c1cb2b5c..567b61dff6 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -403,7 +403,7 @@ def run_stream( ) -> AbstractAsyncContextManager[result.StreamedRunResult[AgentDepsT, RunOutputDataT]]: ... @asynccontextmanager - async def run_stream( # noqa C901 + async def run_stream( # noqa: C901 self, user_prompt: str | Sequence[_messages.UserContent] | None = None, *, diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/__init__.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/__init__.py index b002208b7e..48e4489ab9 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/__init__.py @@ -30,10 +30,10 @@ # Note: It's difficult to add a test that covers this because pytest presumably does these imports itself # when you have a @pytest.mark.anyio somewhere. # I suppose we could add a test that runs a python script in a separate process, but I have not done that... -import anyio._backends._asyncio # pyright: ignore[reportUnusedImport] +import anyio._backends._asyncio # pyright: ignore[reportUnusedImport] # noqa: F401 try: - import anyio._backends._trio # pyright: ignore[reportUnusedImport] # noqa F401 + import anyio._backends._trio # pyright: ignore[reportUnusedImport] # noqa: F401 except ImportError: pass diff --git a/pydantic_ai_slim/pydantic_ai/profiles/openai.py b/pydantic_ai_slim/pydantic_ai/profiles/openai.py index a3cd83d3e8..37c0316c34 100644 --- a/pydantic_ai_slim/pydantic_ai/profiles/openai.py +++ b/pydantic_ai_slim/pydantic_ai/profiles/openai.py @@ -161,7 +161,7 @@ def walk(self) -> JsonSchema: return result - def transform(self, schema: JsonSchema) -> JsonSchema: # noqa C901 + def transform(self, schema: JsonSchema) -> JsonSchema: # noqa: C901 # Remove unnecessary keys schema.pop('title', None) schema.pop('$schema', None) diff --git a/pydantic_evals/pydantic_evals/dataset.py b/pydantic_evals/pydantic_evals/dataset.py index d8d4c187a2..08cd8dbbb8 100644 --- a/pydantic_evals/pydantic_evals/dataset.py +++ b/pydantic_evals/pydantic_evals/dataset.py @@ -741,13 +741,13 @@ class Case(BaseModel, extra='forbid'): # pyright: ignore[reportUnusedClass] # metadata: meta_type | None = None # pyright: ignore[reportInvalidTypeForm,reportUnknownVariableType] expected_output: out_type | None = None # pyright: ignore[reportInvalidTypeForm,reportUnknownVariableType] if evaluator_schema_types: # pragma: no branch - evaluators: list[Union[tuple(evaluator_schema_types)]] = [] # pyright: ignore # noqa UP007 + evaluators: list[Union[tuple(evaluator_schema_types)]] = [] # pyright: ignore # noqa: UP007 class Dataset(BaseModel, extra='forbid'): name: str | None = None cases: list[Case] if evaluator_schema_types: # pragma: no branch - evaluators: list[Union[tuple(evaluator_schema_types)]] = [] # pyright: ignore # noqa UP007 + evaluators: list[Union[tuple(evaluator_schema_types)]] = [] # pyright: ignore # noqa: UP007 json_schema = Dataset.model_json_schema() # See `_add_json_schema` below, since `$schema` is added to the JSON, it has to be supported in the JSON diff --git a/pydantic_evals/pydantic_evals/otel/span_tree.py b/pydantic_evals/pydantic_evals/otel/span_tree.py index ea0e953a11..efdc8d49b8 100644 --- a/pydantic_evals/pydantic_evals/otel/span_tree.py +++ b/pydantic_evals/pydantic_evals/otel/span_tree.py @@ -241,7 +241,7 @@ def matches(self, query: SpanQuery | SpanPredicate) -> bool: return self._matches_query(query) - def _matches_query(self, query: SpanQuery) -> bool: # noqa C901 + def _matches_query(self, query: SpanQuery) -> bool: # noqa: C901 """Check if the span matches the query conditions.""" # Logical combinations if or_ := query.get('or_'): diff --git a/pydantic_graph/pydantic_graph/beta/graph.py b/pydantic_graph/pydantic_graph/beta/graph.py index f6aeab9a34..112f402ffc 100644 --- a/pydantic_graph/pydantic_graph/beta/graph.py +++ b/pydantic_graph/pydantic_graph/beta/graph.py @@ -562,7 +562,7 @@ def __post_init__(self): self.iter_stream_sender, self.iter_stream_receiver = create_memory_object_stream[_GraphTaskResult]() self._next_node_run_id = 1 - async def iter_graph( # noqa C901 + async def iter_graph( # noqa: C901 self, first_task: GraphTask ) -> AsyncGenerator[EndMarker[OutputT] | Sequence[GraphTask], EndMarker[OutputT] | Sequence[GraphTask]]: async with self.iter_stream_sender: diff --git a/pydantic_graph/pydantic_graph/beta/graph_builder.py b/pydantic_graph/pydantic_graph/beta/graph_builder.py index 8c895bd1e7..e004370b1b 100644 --- a/pydantic_graph/pydantic_graph/beta/graph_builder.py +++ b/pydantic_graph/pydantic_graph/beta/graph_builder.py @@ -318,7 +318,7 @@ def join( preferred_parent_fork: Literal['farthest', 'closest'] = 'farthest', ) -> Join[StateT, DepsT, InputT, OutputT]: if initial_factory is UNSET: - initial_factory = lambda: initial # pyright: ignore[reportAssignmentType] # noqa E731 + initial_factory = lambda: initial # pyright: ignore[reportAssignmentType] # noqa: E731 return Join[StateT, DepsT, InputT, OutputT]( id=JoinID(NodeID(node_id or generate_placeholder_node_id(get_callable_name(reducer)))), @@ -329,7 +329,7 @@ def join( ) # Edge building - def add(self, *edges: EdgePath[StateT, DepsT]) -> None: # noqa C901 + def add(self, *edges: EdgePath[StateT, DepsT]) -> None: # noqa: C901 """Add one or more edge paths to the graph. This method processes edge paths and automatically creates any necessary @@ -674,7 +674,7 @@ def build(self, validate_graph_structure: bool = True) -> Graph[StateT, DepsT, G ) -def _validate_graph_structure( # noqa C901 +def _validate_graph_structure( # noqa: C901 nodes: dict[NodeID, AnyNode], edges_by_source: dict[NodeID, list[Path]], ) -> None: diff --git a/pydantic_graph/pydantic_graph/beta/mermaid.py b/pydantic_graph/pydantic_graph/beta/mermaid.py index 3fccdf06d4..df12137319 100644 --- a/pydantic_graph/pydantic_graph/beta/mermaid.py +++ b/pydantic_graph/pydantic_graph/beta/mermaid.py @@ -49,7 +49,7 @@ class MermaidEdge: label: str | None -def build_mermaid_graph( # noqa C901 +def build_mermaid_graph( # noqa: C901 graph_nodes: dict[NodeID, AnyNode], graph_edges_by_source: dict[NodeID, list[Path]] ) -> MermaidGraph: """Build a mermaid graph.""" diff --git a/tests/test_examples.py b/tests/test_examples.py index efa799dfa1..6e3dc12acf 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -883,7 +883,7 @@ async def model_logic( # noqa: C901 raise RuntimeError(f'Unexpected message: {m}') -async def stream_model_logic( # noqa C901 +async def stream_model_logic( # noqa: C901 messages: list[ModelMessage], info: AgentInfo ) -> AsyncIterator[str | DeltaToolCalls]: # pragma: lax no cover async def stream_text_response(r: str) -> AsyncIterator[str]: