From cabfb43834302cee7f7201fb86ce17beae25fa97 Mon Sep 17 00:00:00 2001 From: Fumiya Simada Date: Tue, 13 Jun 2023 12:16:55 +0900 Subject: [PATCH] Simplify the type annotations in `benchmarks` (#4703) * refactor: simplified typing annotations under * Fix: fixed syntax error on python3.7 and 3.8 --- benchmarks/asv/optimize.py | 8 +-- benchmarks/bayesmark/optuna_optimizer.py | 15 ++--- benchmarks/bayesmark/report_bayesmark.py | 60 +++++++++---------- benchmarks/kurobako/problems/wfg/problem.py | 52 ++++++++-------- .../problems/wfg/transformation_functions.py | 2 + benchmarks/naslib/problem.py | 9 +-- benchmarks/run_bayesmark.py | 12 ++-- benchmarks/run_mo_kurobako.py | 6 +- 8 files changed, 84 insertions(+), 80 deletions(-) diff --git a/benchmarks/asv/optimize.py b/benchmarks/asv/optimize.py index fff081b273..7efd4b8e55 100644 --- a/benchmarks/asv/optimize.py +++ b/benchmarks/asv/optimize.py @@ -1,6 +1,6 @@ +from __future__ import annotations + from typing import cast -from typing import List -from typing import Union import optuna from optuna.samplers import BaseSampler @@ -10,8 +10,8 @@ from optuna.testing.storages import StorageSupplier -def parse_args(args: str) -> List[Union[int, str]]: - ret: List[Union[int, str]] = [] +def parse_args(args: str) -> list[int | str]: + ret: list[int | str] = [] for arg in map(lambda s: s.strip(), args.split(",")): try: ret.append(int(arg)) diff --git a/benchmarks/bayesmark/optuna_optimizer.py b/benchmarks/bayesmark/optuna_optimizer.py index 2286cafade..1b925b8afc 100644 --- a/benchmarks/bayesmark/optuna_optimizer.py +++ b/benchmarks/bayesmark/optuna_optimizer.py @@ -1,6 +1,7 @@ +from __future__ import annotations + from typing import Any from typing import Dict -from typing import List from typing import Union import numpy as np @@ -50,14 +51,14 @@ def __init__(self, api_config: ApiConfig, **kwargs: Any) -> None: try: sampler = _SAMPLERS[kwargs["sampler"]] - sampler_kwargs: Dict[str, Any] = kwargs["sampler_kwargs"] + sampler_kwargs: dict[str, Any] = kwargs["sampler_kwargs"] except KeyError: raise ValueError("Unknown sampler passed to Optuna optimizer.") try: pruner = _PRUNERS[kwargs["pruner"]] - pruner_kwargs: Dict[str, Any] = kwargs["pruner_kwargs"] + pruner_kwargs: dict[str, Any] = kwargs["pruner_kwargs"] except KeyError: raise ValueError("Unknown pruner passed to Optuna optimizer.") @@ -69,7 +70,7 @@ def __init__(self, api_config: ApiConfig, **kwargs: Any) -> None: sampler=sampler(**sampler_kwargs), pruner=pruner(**pruner_kwargs), ) - self.current_trials: Dict[int, int] = dict() + self.current_trials: dict[int, int] = dict() def _suggest(self, trial: optuna.trial.Trial) -> Suggestion: suggestions: Suggestion = dict() @@ -96,8 +97,8 @@ def _suggest(self, trial: optuna.trial.Trial) -> Suggestion: return suggestions - def suggest(self, n_suggestions: int) -> List[Suggestion]: - suggestions: List[Suggestion] = list() + def suggest(self, n_suggestions: int) -> list[Suggestion]: + suggestions: list[Suggestion] = list() for _ in range(n_suggestions): trial = self.study.ask() params = self._suggest(trial) @@ -107,7 +108,7 @@ def suggest(self, n_suggestions: int) -> List[Suggestion]: return suggestions - def observe(self, X: List[Suggestion], y: List[float]) -> None: + def observe(self, X: list[Suggestion], y: list[float]) -> None: for params, objective_value in zip(X, y): sid = hash(frozenset(params.items())) trial = self.current_trials.pop(sid) diff --git a/benchmarks/bayesmark/report_bayesmark.py b/benchmarks/bayesmark/report_bayesmark.py index e558dd98bb..fd1de66027 100644 --- a/benchmarks/bayesmark/report_bayesmark.py +++ b/benchmarks/bayesmark/report_bayesmark.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import abc from collections import defaultdict from dataclasses import dataclass @@ -6,8 +8,6 @@ from typing import Dict from typing import Generator from typing import List -from typing import Optional -from typing import Set from typing import Tuple from jinja2 import Environment @@ -38,7 +38,7 @@ def precision(self) -> int: raise NotImplementedError @abc.abstractmethod - def calculate(self, data: pd.DataFrame) -> List[float]: + def calculate(self, data: pd.DataFrame) -> list[float]: """Calculates metric for each study in data frame.""" raise NotImplementedError @@ -48,7 +48,7 @@ class BestValueMetric(BaseMetric): name = "Best value" precision = 6 - def calculate(self, data: pd.DataFrame) -> List[float]: + def calculate(self, data: pd.DataFrame) -> list[float]: return data.groupby("uuid").generalization.min().values @@ -56,8 +56,8 @@ class AUCMetric(BaseMetric): name = "AUC" precision = 3 - def calculate(self, data: pd.DataFrame) -> List[float]: - aucs: List[float] = list() + def calculate(self, data: pd.DataFrame) -> list[float]: + aucs: list[float] = list() for _, grp in data.groupby("uuid"): auc = np.sum(grp.generalization.cummin()) aucs.append(auc / grp.shape[0]) @@ -68,7 +68,7 @@ class ElapsedMetric(BaseMetric): name = "Elapsed" precision = 3 - def calculate(self, data: pd.DataFrame) -> List[float]: + def calculate(self, data: pd.DataFrame) -> list[float]: # Total time does not include evaluation of bayesmark # objective function (no Optuna APIs are called there). time_cols = ["suggest", "observe"] @@ -80,7 +80,7 @@ def __init__(self, data: pd.DataFrame) -> None: self._data = data @property - def optimizers(self) -> List[str]: + def optimizers(self) -> list[str]: return list(self._data.opt.unique()) @classmethod @@ -97,7 +97,7 @@ def summarize_solver(self, solver: str, metric: BaseMetric) -> Moments: return np.mean(run_metrics).item(), np.var(run_metrics).item() def sample_performance(self, metric: BaseMetric) -> Samples: - performance: Dict[str, List[float]] = {} + performance: dict[str, list[float]] = {} for solver, data in self._data.groupby("opt"): run_metrics = metric.calculate(data) performance[solver] = run_metrics @@ -105,16 +105,16 @@ def sample_performance(self, metric: BaseMetric) -> Samples: class DewanckerRanker: - def __init__(self, metrics: List[BaseMetric]) -> None: + def __init__(self, metrics: list[BaseMetric]) -> None: self._metrics = metrics - self._ranking: Optional[List[str]] = None - self._borda: Optional[np.ndarray] = None + self._ranking: list[str] | None = None + self._borda: np.ndarray | None = None - def __iter__(self) -> Generator[Tuple[str, int], None, None]: + def __iter__(self) -> Generator[tuple[str, int], None, None]: yield from zip(self.solvers, self.borda) @property - def solvers(self) -> List[str]: + def solvers(self) -> list[str]: if self._ranking is None: raise ValueError("Call rank first.") return self._ranking @@ -136,11 +136,11 @@ def pick_alpha(report: PartialReport) -> float: return cand return candidates[-1] - def _set_ranking(self, wins: Dict[str, int]) -> None: + def _set_ranking(self, wins: dict[str, int]) -> None: sorted_wins = [k for k, _ in sorted(wins.items(), key=lambda x: x[1])] self._ranking = sorted_wins[::-1] - def _set_borda(self, wins: Dict[str, int]) -> None: + def _set_borda(self, wins: dict[str, int]) -> None: sorted_wins = np.array(sorted(wins.values())) num_wins, num_ties = np.unique(sorted_wins, return_counts=True) points = np.searchsorted(sorted_wins, num_wins) @@ -149,7 +149,7 @@ def _set_borda(self, wins: Dict[str, int]) -> None: def rank(self, report: PartialReport) -> None: # Implements Section 2.1.1 # https://proceedings.mlr.press/v64/dewancker_strategy_2016.pdf - wins: Dict[str, int] = defaultdict(int) + wins: dict[str, int] = defaultdict(int) alpha = DewanckerRanker.pick_alpha(report) for metric in self._metrics: samples = report.sample_performance(metric) @@ -172,28 +172,28 @@ def rank(self, report: PartialReport) -> None: class Solver: rank: int name: str - results: List[str] + results: list[str] @dataclass class Problem: number: int name: str - metrics: List[BaseMetric] - solvers: List[Solver] + metrics: list[BaseMetric] + solvers: list[Solver] class BayesmarkReportBuilder: def __init__(self) -> None: - self.solvers: Set[str] = set() - self.datasets: Set[str] = set() - self.models: Set[str] = set() - self.firsts: Dict[str, int] = defaultdict(int) - self.borda: Dict[str, int] = defaultdict(int) + self.solvers: set[str] = set() + self.datasets: set[str] = set() + self.models: set[str] = set() + self.firsts: dict[str, int] = defaultdict(int) + self.borda: dict[str, int] = defaultdict(int) self.metric_precedence = "" - self.problems: List[Problem] = [] + self.problems: list[Problem] = [] - def set_precedence(self, metrics: List[BaseMetric]) -> None: + def set_precedence(self, metrics: list[BaseMetric]) -> None: self.metric_precedence = " -> ".join([m.name for m in metrics]) def add_problem( @@ -201,13 +201,13 @@ def add_problem( name: str, report: PartialReport, ranking: DewanckerRanker, - metrics: List[BaseMetric], + metrics: list[BaseMetric], ) -> "BayesmarkReportBuilder": - solvers: List[Solver] = list() + solvers: list[Solver] = list() positions = np.abs(ranking.borda - (max(ranking.borda) + 1)) for pos, solver in zip(positions, ranking.solvers): self.solvers.add(solver) - results: List[str] = list() + results: list[str] = list() for metric in metrics: mean, variance = report.summarize_solver(solver, metric) precision = metric.precision diff --git a/benchmarks/kurobako/problems/wfg/problem.py b/benchmarks/kurobako/problems/wfg/problem.py index fd437e3732..86ae92e154 100644 --- a/benchmarks/kurobako/problems/wfg/problem.py +++ b/benchmarks/kurobako/problems/wfg/problem.py @@ -1,7 +1,7 @@ +from __future__ import annotations + import math import sys -from typing import List -from typing import Union import numpy as np import shape_functions @@ -16,8 +16,8 @@ def __init__( S: np.ndarray, A: np.ndarray, upper_bounds: np.ndarray, - shapes: List[shape_functions.BaseShapeFunction], - transformations: List[List[transformation_functions.BaseTransformations]], + shapes: list[shape_functions.BaseShapeFunction], + transformations: list[list[transformation_functions.BaseTransformations]], ) -> None: assert all(S > 0) assert all((A == 0) + (A == 1)) @@ -84,11 +84,11 @@ def __init__(self, n_arguments: int, n_objectives: int, k: int): self.domain = np.zeros((n, 2)) self.domain[:, 1] = upper_bounds - shapes: List[shape_functions.BaseShapeFunction] + shapes: list[shape_functions.BaseShapeFunction] shapes = [shape_functions.ConvexShapeFunction(M) for _ in range(M - 1)] shapes.append(shape_functions.MixedConvexOrConcaveShapeFunction(M, 1, 5)) - transformations: List[List[transformation_functions.BaseTransformations]] + transformations: list[list[transformation_functions.BaseTransformations]] transformations = [[] for _ in range(4)] transformations[0] = [transformation_functions.IdenticalTransformation() for _ in range(k)] @@ -163,11 +163,11 @@ def __init__(self, n_arguments: int, n_objectives: int, k: int): self.domain = np.zeros((n, 2)) self.domain[:, 1] = upper_bounds - shapes: List[shape_functions.BaseShapeFunction] + shapes: list[shape_functions.BaseShapeFunction] shapes = [shape_functions.ConvexShapeFunction(M) for _ in range(M - 1)] shapes.append(shape_functions.DisconnectedShapeFunction(M, 1, 1, 5)) - transformations: List[List[transformation_functions.BaseTransformations]] + transformations: list[list[transformation_functions.BaseTransformations]] transformations = [[] for _ in range(3)] transformations[0] = [transformation_functions.IdenticalTransformation() for _ in range(k)] @@ -244,10 +244,10 @@ def __init__(self, n_arguments: int, n_objectives: int, k: int): self.domain = np.zeros((n, 2)) self.domain[:, 1] = upper_bounds - shapes: List[shape_functions.BaseShapeFunction] + shapes: list[shape_functions.BaseShapeFunction] shapes = [shape_functions.LinearShapeFunction(M) for _ in range(M)] - transformations: List[List[transformation_functions.BaseTransformations]] + transformations: list[list[transformation_functions.BaseTransformations]] transformations = [[] for _ in range(3)] transformations[0] = [transformation_functions.IdenticalTransformation() for _ in range(k)] @@ -322,10 +322,10 @@ def __init__(self, n_arguments: int, n_objectives: int, k: int): self.domain = np.zeros((n, 2)) self.domain[:, 1] = upper_bounds - shapes: List[shape_functions.BaseShapeFunction] + shapes: list[shape_functions.BaseShapeFunction] shapes = [shape_functions.ConcaveShapeFunction(M) for _ in range(M)] - transformations: List[List[transformation_functions.BaseTransformations]] + transformations: list[list[transformation_functions.BaseTransformations]] transformations = [[] for _ in range(2)] transformations[0] = [ @@ -388,10 +388,10 @@ def __init__(self, n_arguments: int, n_objectives: int, k: int): self.domain = np.zeros((n, 2)) self.domain[:, 1] = upper_bounds - shapes: List[shape_functions.BaseShapeFunction] + shapes: list[shape_functions.BaseShapeFunction] shapes = [shape_functions.ConcaveShapeFunction(M) for _ in range(M)] - transformations: List[List[transformation_functions.BaseTransformations]] + transformations: list[list[transformation_functions.BaseTransformations]] transformations = [[] for _ in range(2)] transformations[0] = [ @@ -455,10 +455,10 @@ def __init__(self, n_arguments: int, n_objectives: int, k: int): self.domain = np.zeros((n, 2)) self.domain[:, 1] = upper_bounds - shapes: List[shape_functions.BaseShapeFunction] + shapes: list[shape_functions.BaseShapeFunction] shapes = [shape_functions.ConcaveShapeFunction(M) for _ in range(M)] - transformations: List[List[transformation_functions.BaseTransformations]] + transformations: list[list[transformation_functions.BaseTransformations]] transformations = [[] for _ in range(2)] transformations[0] = [transformation_functions.IdenticalTransformation() for _ in range(k)] @@ -521,13 +521,13 @@ def __init__(self, n_arguments: int, n_objectives: int, k: int): self.domain = np.zeros((n, 2)) self.domain[:, 1] = upper_bounds - shapes: List[shape_functions.BaseShapeFunction] + shapes: list[shape_functions.BaseShapeFunction] shapes = [shape_functions.ConcaveShapeFunction(M) for _ in range(M)] def _input_converter0(i: int, y: np.ndarray) -> np.ndarray: return y[i:n] - transformations: List[List[transformation_functions.BaseTransformations]] + transformations: list[list[transformation_functions.BaseTransformations]] transformations = [[] for _ in range(3)] transformations[0] = [ @@ -604,13 +604,13 @@ def __init__(self, n_arguments: int, n_objectives: int, k: int): self.domain = np.zeros((n, 2)) self.domain[:, 1] = upper_bounds - shapes: List[shape_functions.BaseShapeFunction] + shapes: list[shape_functions.BaseShapeFunction] shapes = [shape_functions.ConcaveShapeFunction(M) for _ in range(M)] def _input_converter0(i: int, y: np.ndarray) -> np.ndarray: return y[: i - 1] - transformations: List[List[transformation_functions.BaseTransformations]] + transformations: list[list[transformation_functions.BaseTransformations]] transformations = [[] for _ in range(3)] transformations[0] = [transformation_functions.IdenticalTransformation() for _ in range(k)] @@ -686,13 +686,13 @@ def __init__(self, n_arguments: int, n_objectives: int, k: int): self.domain = np.zeros((n, 2)) self.domain[:, 1] = upper_bounds - shapes: List[shape_functions.BaseShapeFunction] + shapes: list[shape_functions.BaseShapeFunction] shapes = [shape_functions.ConcaveShapeFunction(M) for _ in range(M)] def _input_converter0(i: int, y: np.ndarray) -> np.ndarray: return y[i:n] - transformations: List[List[transformation_functions.BaseTransformations]] + transformations: list[list[transformation_functions.BaseTransformations]] transformations = [[] for _ in range(3)] transformations[0] = [ @@ -768,12 +768,12 @@ class WFGProblem(problem.Problem): def __init__(self) -> None: super().__init__() - def create_evaluator(self, params: List[problem.Var]) -> problem.Evaluator: + def create_evaluator(self, params: list[problem.Var]) -> problem.Evaluator: return WFGEvaluator(params) class WFGEvaluator(problem.Evaluator): - def __init__(self, params: List[problem.Var]) -> None: + def __init__(self, params: list[problem.Var]) -> None: self._n_wfg = int(sys.argv[1]) self._n_dim = int(sys.argv[2]) self._n_obj = int(sys.argv[3]) @@ -782,7 +782,7 @@ def __init__(self, params: List[problem.Var]) -> None: self._x = np.array(params) self._current_step = 0 - self.wfg: Union[WFG1, WFG2, WFG3, WFG4, WFG5, WFG6, WFG7, WFG8, WFG9] + self.wfg: WFG1 | WFG2 | WFG3 | WFG4 | WFG5 | WFG6 | WFG7 | WFG8 | WFG9 if self._n_wfg == 1: self.wfg = WFG1(n_arguments=self._n_dim, n_objectives=self._n_obj, k=self._k) elif self._n_wfg == 2: @@ -807,7 +807,7 @@ def __init__(self, params: List[problem.Var]) -> None: def current_step(self) -> int: return self._current_step - def evaluate(self, next_step: int) -> List[float]: + def evaluate(self, next_step: int) -> list[float]: self._current_step = 1 v = self.wfg(self._x) v = v.tolist() diff --git a/benchmarks/kurobako/problems/wfg/transformation_functions.py b/benchmarks/kurobako/problems/wfg/transformation_functions.py index 8654a13c73..26209df7d5 100644 --- a/benchmarks/kurobako/problems/wfg/transformation_functions.py +++ b/benchmarks/kurobako/problems/wfg/transformation_functions.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import abc from typing import Callable from typing import Union diff --git a/benchmarks/naslib/problem.py b/benchmarks/naslib/problem.py index 14e864ffd7..0dc2c42c33 100644 --- a/benchmarks/naslib/problem.py +++ b/benchmarks/naslib/problem.py @@ -1,6 +1,7 @@ +from __future__ import annotations + import sys from typing import Any -from typing import List from kurobako import problem from naslib.utils import get_dataset_api @@ -55,7 +56,7 @@ def __init__(self, dataset: str, dataset_api: Any) -> None: self._dataset = dataset self._dataset_api = dataset_api - def create_evaluator(self, params: List[float]) -> problem.Evaluator: + def create_evaluator(self, params: list[float]) -> problem.Evaluator: ops = [op_names[int(x)] for x in params] arch_str = "|{}~0|+|{}~0|{}~1|+|{}~0|{}~1|{}~2|".format(*ops) return NASLibEvaluator( @@ -64,14 +65,14 @@ def create_evaluator(self, params: List[float]) -> problem.Evaluator: class NASLibEvaluator(problem.Evaluator): - def __init__(self, learning_curve: List[float]) -> None: + def __init__(self, learning_curve: list[float]) -> None: self._current_step = 0 self._lc = learning_curve def current_step(self) -> int: return self._current_step - def evaluate(self, next_step: int) -> List[float]: + def evaluate(self, next_step: int) -> list[float]: self._current_step = next_step return [-self._lc[next_step]] diff --git a/benchmarks/run_bayesmark.py b/benchmarks/run_bayesmark.py index 953beb04c4..66d8e0f8f3 100644 --- a/benchmarks/run_bayesmark.py +++ b/benchmarks/run_bayesmark.py @@ -1,9 +1,9 @@ +from __future__ import annotations + import argparse import json import os import subprocess -from typing import Dict -from typing import List from matplotlib import cm from matplotlib import colors @@ -132,7 +132,7 @@ def make_plot( ax.grid(alpha=0.2) -def build_color_dict(names: List[str]) -> Dict[str, np.ndarray]: +def build_color_dict(names: list[str]) -> dict[str, np.ndarray]: norm = colors.Normalize(vmin=0, vmax=1) m = cm.ScalarMappable(norm, cm.tab20) color_dict = m.to_rgba(np.linspace(0, 1, len(names))) @@ -145,11 +145,11 @@ def partial_report(args: argparse.Namespace) -> None: eval_path = os.path.join("runs", _DB, "eval") time_path = os.path.join("runs", _DB, "time") studies = os.listdir(eval_path) - summaries: List[pd.DataFrame] = [] + summaries: list[pd.DataFrame] = [] for study in studies: - table_buffer: List[pd.DataFrame] = [] - column_buffer: List[str] = [] + table_buffer: list[pd.DataFrame] = [] + column_buffer: list[str] = [] for path in [eval_path, time_path]: with open(os.path.join(path, study), "r") as file: data = json.load(file) diff --git a/benchmarks/run_mo_kurobako.py b/benchmarks/run_mo_kurobako.py index b6e78249a6..9b3f4f86c9 100644 --- a/benchmarks/run_mo_kurobako.py +++ b/benchmarks/run_mo_kurobako.py @@ -1,8 +1,8 @@ +from __future__ import annotations + import argparse import os import subprocess -from typing import Dict -from typing import Union def run(args: argparse.Namespace) -> None: @@ -99,7 +99,7 @@ def run(args: argparse.Namespace) -> None: subprocess.run(cmd, shell=True) # Plot pareto-front. - plot_args: Dict[str, Dict[str, Union[int, float]]] + plot_args: dict[str, dict[str, int | float]] plot_args = { "NASBench": {"xmin": 0, "xmax": 25000000, "ymin": 0, "ymax": 0.2}, "ZDT1": {"xmin": 0, "xmax": 1, "ymin": 1, "ymax": 7},