diff --git a/tests/benchmarks/test_basemodel_eq_performance.py b/tests/benchmarks/test_basemodel_eq_performance.py index 95aa192751c..7210b9b692b 100644 --- a/tests/benchmarks/test_basemodel_eq_performance.py +++ b/tests/benchmarks/test_basemodel_eq_performance.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import dataclasses import enum import gc @@ -7,24 +9,13 @@ import textwrap import timeit from importlib import metadata -from typing import ( - Any, - Callable, - Dict, - Generic, - Iterable, - List, - Optional, - Sized, - Type, - TypeVar, - Union, -) - -import matplotlib.pyplot as plt -import numpy as np -import tqdm.auto as tqdm -from matplotlib import axes, figure +from typing import TYPE_CHECKING, Any, Callable, Generic, Iterable, Sized, TypeVar + +# Do not import additional dependencies at top-level +if TYPE_CHECKING: + import matplotlib.pyplot as plt + import numpy as np + from matplotlib import axes, figure import pydantic @@ -151,9 +142,9 @@ class _SafeGetItemProxy(Generic[K, V]): This makes is safe to use in `operator.itemgetter` when some keys may be missing """ - wrapped: Dict[K, V] + wrapped: dict[K, V] - def __getitem__(self, __key: K) -> Union[V, _SentinelType]: + def __getitem__(self, __key: K) -> V | _SentinelType: return self.wrapped.get(__key, _SENTINEL) def __contains__(self, __key: K) -> bool: @@ -241,9 +232,11 @@ def __eq__(self, other: Any) -> bool: def plot_all_benchmark( - bases: Dict[str, Type[pydantic.BaseModel]], - sizes: List[int], + bases: dict[str, type[pydantic.BaseModel]], + sizes: list[int], ) -> figure.Figure: + import matplotlib.pyplot as plt + n_rows, n_cols = len(BENCHMARKS), 2 fig, axes = plt.subplots(n_rows, n_cols, figsize=(n_cols * 6, n_rows * 4)) @@ -266,11 +259,14 @@ def plot_all_benchmark( def plot_benchmark( title: str, benchmark: Callable, - bases: Dict[str, Type[pydantic.BaseModel]], - sizes: List[int], + bases: dict[str, type[pydantic.BaseModel]], + sizes: list[int], mimic_cached_property: bool, - ax: Optional[axes.Axes] = None, + ax: axes.Axes | None = None, ): + import matplotlib.pyplot as plt + import numpy as np + ax = ax or plt.gca() arr_sizes = np.asarray(sizes) @@ -303,7 +299,7 @@ class SizedIterable(Sized, Iterable): def run_benchmark_nodiff( title: str, - base: Type[pydantic.BaseModel], + base: type[pydantic.BaseModel], sizes: SizedIterable, mimic_cached_property: bool, n_execution: int = 10_000, @@ -345,7 +341,7 @@ def run_benchmark_nodiff( def run_benchmark_first_diff( title: str, - base: Type[pydantic.BaseModel], + base: type[pydantic.BaseModel], sizes: SizedIterable, mimic_cached_property: bool, n_execution: int = 10_000, @@ -387,7 +383,7 @@ def run_benchmark_first_diff( def run_benchmark_last_diff( title: str, - base: Type[pydantic.BaseModel], + base: type[pydantic.BaseModel], sizes: SizedIterable, mimic_cached_property: bool, n_execution: int = 10_000, @@ -430,13 +426,15 @@ def run_benchmark_last_diff( def run_benchmark_random_unequal( title: str, - base: Type[pydantic.BaseModel], + base: type[pydantic.BaseModel], sizes: SizedIterable, mimic_cached_property: bool, n_samples: int = 100, n_execution: int = 1_000, n_repeat: int = 5, ) -> np.ndarray: + import numpy as np + setup = textwrap.dedent( f""" import pydantic @@ -498,10 +496,13 @@ def run_benchmark( statement: str, n_execution: int = 10_000, n_repeat: int = 5, - globals: Optional[Dict[str, Any]] = None, + globals: dict[str, Any] | None = None, progress_bar: bool = True, - params: Optional[Dict[str, SizedIterable]] = None, + params: dict[str, SizedIterable] | None = None, ) -> np.ndarray: + import numpy as np + import tqdm.auto as tqdm + namespace = globals or {} # fast-path if not params: @@ -569,6 +570,8 @@ def run_benchmark( args = parser.parse_args() + import matplotlib.pyplot as plt + sizes = list(range(args.min_n_fields, args.max_n_fields)) fig = plot_all_benchmark(IMPLEMENTATIONS, sizes=sizes) plt.tight_layout()