Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Doc: Prettify petab.select docs #1255

Merged
merged 1 commit into from
Dec 17, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 22 additions & 27 deletions pypesto/select/method.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import logging
from dataclasses import dataclass
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from typing import Any, Callable, Optional, Union

import numpy as np
import petab_select
Expand Down Expand Up @@ -55,7 +55,7 @@ class MethodLogger:
level:
The logging level.
logger:
A logger from the `logging` module.
A logger from the :mod:`logging` module.
"""

column_width: int = 12
Expand Down Expand Up @@ -138,8 +138,7 @@ def get_model_id(model: Model) -> str:

Returns
-------
str
The ID.
The ID.
"""
model_subspace_id = model.model_subspace_id or ''
original_model_id = model.model_id or model.get_hash()
Expand Down Expand Up @@ -193,11 +192,11 @@ class MethodCaller:
petab_select_problem:
The PEtab Select problem.
candidate_space:
A `petab_select.CandidateSpace`, used to generate candidate models.
A :class:`petab_select.CandidateSpace`, used to generate candidate models.
criterion:
The criterion by which models will be compared.
criterion_threshold:
The minimum improvement in criterion that a test model must have to
The minimum improvement in `criterion` that a test model must have to
be selected. The comparison is made according to the method. For
example, in `ForwardSelector`, test models are compared to the
previously selected model.
Expand All @@ -208,7 +207,7 @@ class MethodCaller:
Limit the number of calibrated models. NB: the number of accepted
models may (likely) be fewer.
logger:
A `MethodLogger`, used to log results.
A :class:`MethodLogger`, used to log results.
minimize_options:
A dictionary that will be passed to `pypesto.minimize` as keyword
arguments for model optimization.
Expand All @@ -219,24 +218,24 @@ class MethodCaller:
objective is initialized, before calibration.
predecessor_model:
Specify the predecessor (initial) model for the model selection
algorithm. If `None`, then the algorithm will generate an
algorithm. If ``None``, then the algorithm will generate an initial
predecessor model if required.
select_first_improvement:
If `True`, model selection will terminate as soon as a better model
If ``True``, model selection will terminate as soon as a better model
is found. If `False`, all candidate models will be tested.
startpoint_latest_mle:
If `True`, one of the startpoints in the multistart optimization
If ``True``, one of the startpoints in the multistart optimization
will be the MLE of the latest model.
"""

def __init__(
self,
petab_select_problem: petab_select.Problem,
calibrated_models: Dict[str, Model],
calibrated_models: dict[str, Model],
# Arguments/attributes that can simply take the default value here.
criterion_threshold: float = 0.0,
limit: int = np.inf,
minimize_options: Dict = None,
minimize_options: dict = None,
model_postprocessor: TYPE_POSTPROCESSOR = None,
objective_customizer: Callable = None,
select_first_improvement: bool = False,
Expand Down Expand Up @@ -318,15 +317,15 @@ def __init__(
def __call__(
self,
predecessor_model: Optional[Union[Model, None]] = None,
newly_calibrated_models: Optional[Dict[str, Model]] = None,
) -> Tuple[List[Model], Dict[str, Model]]:
newly_calibrated_models: Optional[dict[str, Model]] = None,
) -> tuple[list[Model], dict[str, Model]]:
"""Run a single iteration of the model selection method.

A single iteration here refers to calibration of all candidate models.
For example, given a predecessor model with 3 estimated parameters,
with the forward method, a single iteration would involve calibration
of all models that have both: the same 3 estimated parameters; and 1
additional estimated paramenter.
additional estimated parameter.

The input `newly_calibrated_models` is from the previous iteration. The
output `newly_calibrated_models` is from the current iteration.
Expand All @@ -342,12 +341,11 @@ def __call__(

Returns
-------
tuple
A 2-tuple, with the following values:
A 2-tuple, with the following values:

1. the predecessor model for the newly calibrated models; and
2. the newly calibrated models, as a `dict` where keys are model
hashes and values are models.
1. the predecessor model for the newly calibrated models; and
2. the newly calibrated models, as a `dict` where keys are model
hashes and values are models.
"""
# All calibrated models in this iteration (see second return value).
self.logger.new_selection()
Expand Down Expand Up @@ -406,8 +404,7 @@ def handle_calibrated_model(

Returns
-------
MethodSignal
A `MethodSignal` that describes the result.
A :class:`MethodSignal` that describes the result.
"""
# Use the predecessor model from `__init__` if an iteration-specific
# predecessor model was not supplied to `__call__`.
Expand Down Expand Up @@ -465,9 +462,8 @@ def model1_gt_model0(

Returns
-------
bool
`True`, if `model1` is superior to `model0` by the criterion,
else `False`.
``True``, if `model1` is superior to `model0` by the criterion,
else ``False``.
"""
if self.criterion in [
Criterion.AIC,
Expand Down Expand Up @@ -509,8 +505,7 @@ def new_model_problem(

Returns
-------
ModelProblem
The model selection problem.
The model selection problem.
"""
x_guess = None
if (
Expand Down
11 changes: 5 additions & 6 deletions pypesto/select/misc.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Miscellaneous methods."""
import logging
from typing import Dict, Iterable
from typing import Iterable

import pandas as pd
import petab
Expand All @@ -19,7 +19,7 @@
def model_to_pypesto_problem(
model: Model,
objective: Objective = None,
x_guesses: Iterable[Dict[str, float]] = None,
x_guesses: Iterable[dict[str, float]] = None,
hierarchical: bool = False,
) -> Problem:
"""Create a pyPESTO problem from a PEtab Select model.
Expand All @@ -43,8 +43,7 @@ def model_to_pypesto_problem(

Returns
-------
Problem
The pyPESTO select problem.
The pyPESTO select problem.
"""
petab_problem = petab_select.ui.model_to_petab(model=model)[PETAB_PROBLEM]

Expand Down Expand Up @@ -78,7 +77,7 @@ def model_to_pypesto_problem(
def model_to_hierarchical_pypesto_problem(*args, **kwargs) -> Problem:
"""Create a hierarchical pyPESTO problem from a PEtab Select model.

See `model_to_pypesto_problem`.
See :func:`model_to_pypesto_problem`.
"""
pypesto_problem = model_to_pypesto_problem(
*args,
Expand All @@ -89,7 +88,7 @@ def model_to_hierarchical_pypesto_problem(*args, **kwargs) -> Problem:


def correct_x_guesses(
x_guesses: Iterable[Dict[str, float]],
x_guesses: Iterable[dict[str, float]],
model: Model,
petab_problem: petab.Problem = None,
hierarchical: bool = False,
Expand Down
10 changes: 5 additions & 5 deletions pypesto/select/model_problem.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Calibrate a PEtab Select model with pyPESTO."""
import time
from typing import Any, Callable, Dict, List, Optional
from typing import Any, Callable, Optional

from petab_select import Criterion, Model

Expand Down Expand Up @@ -33,7 +33,7 @@ class ModelProblem:
Keyword argument options that will be passed on to
:func:`pypesto.optimize.minimize`.
minimize_result:
A pyPESTO result with an optimize result.
A pyPESTO result with an `optimize` result.
model:
A PEtab Select model.
model_id:
Expand Down Expand Up @@ -61,8 +61,8 @@ def __init__(
criterion: Criterion,
valid: bool = True,
autorun: bool = True,
x_guess: List[float] = None,
minimize_options: Dict = None,
x_guess: list[float] = None,
minimize_options: dict = None,
objective_customizer: Optional[OBJECTIVE_CUSTOMIZER_TYPE] = None,
postprocessor: Optional["TYPE_POSTPROCESSOR"] = None,
model_to_pypesto_problem_method: Callable[[Any], Problem] = None,
Expand Down Expand Up @@ -141,7 +141,7 @@ def set_result(self, result: Result):
Parameters
----------
result:
A pyPESTO result with an optimize result.
A pyPESTO result with an `optimize` result.
"""
self.minimize_result = result
# TODO extract best parameter estimates, to use as start point for
Expand Down
5 changes: 2 additions & 3 deletions pypesto/select/postprocessors.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
"""Process a model selection :class:`ModelProblem` after calibration."""
from pathlib import Path
from typing import List

import matplotlib.pyplot as plt
import numpy as np
Expand All @@ -20,7 +19,7 @@

def multi_postprocessor(
problem: ModelProblem,
postprocessors: List[TYPE_POSTPROCESSOR] = None,
postprocessors: list[TYPE_POSTPROCESSOR] = None,
):
"""Combine multiple postprocessors into a single postprocessor.

Expand Down Expand Up @@ -118,7 +117,7 @@ def model_id_binary_postprocessor(problem: ModelProblem):
def report_postprocessor(
problem: ModelProblem,
output_filepath: TYPE_PATH,
criteria: List[Criterion] = None,
criteria: list[Criterion] = None,
):
"""Create a TSV table of model selection results.

Expand Down
44 changes: 20 additions & 24 deletions pypesto/select/problem.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""Manage all components of a pyPESTO model selection problem."""
from typing import Any, Dict, Iterable, List, Optional, Tuple
from typing import Any, Iterable, Optional

import petab_select
from petab_select import Model
Expand Down Expand Up @@ -57,8 +57,7 @@ def create_method_caller(self, **kwargs) -> MethodCaller:

Returns
-------
MethodCaller
A `MethodCaller` instance.
A :class:`MethodCaller` instance.
"""
return MethodCaller(
petab_select_problem=self.petab_select_problem,
Expand All @@ -69,8 +68,8 @@ def create_method_caller(self, **kwargs) -> MethodCaller:

def set_state(
self,
calibrated_models: Dict[str, Model],
newly_calibrated_models: Dict[str, Model],
calibrated_models: dict[str, Model],
newly_calibrated_models: dict[str, Model],
) -> None:
"""Set the state of the problem.

Expand All @@ -81,7 +80,7 @@ def set_state(

def update_with_newly_calibrated_models(
self,
newly_calibrated_models: Optional[Dict[str, Model]] = None,
newly_calibrated_models: Optional[dict[str, Model]] = None,
) -> None:
"""Update the state of the problem with newly calibrated models.

Expand All @@ -94,7 +93,7 @@ def update_with_newly_calibrated_models(

def handle_select_kwargs(
self,
kwargs: Dict[str, Any],
kwargs: dict[str, Any],
):
"""Check keyword arguments to select calls."""
if "newly_calibrated_models" in kwargs:
Expand All @@ -111,7 +110,7 @@ def handle_select_kwargs(
def select(
self,
**kwargs,
) -> Tuple[Model, Dict[str, Model], Dict[str, Model]]:
) -> tuple[Model, dict[str, Model], dict[str, Model]]:
"""Run a single iteration of a model selection algorithm.

The result is the selected model for the current run, independent of
Expand All @@ -121,14 +120,13 @@ def select(

Returns
-------
tuple
A 3-tuple, with the following values:

1. the best model;
2. all candidate models in this iteration, as a `dict` with
model hashes as keys and models as values; and
3. all candidate models from all iterations, as a `dict` with
model hashes as keys and models as values.
A 3-tuple, with the following values:

1. the best model;
2. all candidate models in this iteration, as a `dict` with
model hashes as keys and models as values; and
3. all candidate models from all iterations, as a `dict` with
model hashes as keys and models as values.
"""
# TODO move some options to PEtab Select? e.g.:
# - startpoint_latest_mle
Expand Down Expand Up @@ -159,7 +157,7 @@ def select(
def select_to_completion(
self,
**kwargs,
) -> List[Model]:
) -> list[Model]:
"""Run an algorithm until an exception `StopIteration` is raised.

``kwargs`` are passed to the :class:`MethodCaller` constructor.
Expand All @@ -170,8 +168,7 @@ def select_to_completion(

Returns
-------
list
The best models (the best model at each iteration).
The best models (the best model at each iteration).
"""
best_models = []
self.handle_select_kwargs(kwargs)
Expand Down Expand Up @@ -208,7 +205,7 @@ def multistart_select(
self,
predecessor_models: Iterable[Model] = None,
**kwargs,
) -> Tuple[Model, List[Model]]:
) -> tuple[Model, list[Model]]:
"""Run an algorithm multiple times, with different predecessor models.

Note that the same method caller is currently shared between all calls.
Expand All @@ -228,11 +225,10 @@ def multistart_select(

Returns
-------
tuple
A 2-tuple, with the following values:
A 2-tuple, with the following values:

1. the best model; and
2. the best models (the best model at each iteration).
1. the best model; and
2. the best models (the best model at each iteration).
"""
self.handle_select_kwargs(kwargs)
model_lists = []
Expand Down
Loading