diff --git a/adaptive/learner/learner1D.py b/adaptive/learner/learner1D.py
index 9987da468..00afd3257 100644
--- a/adaptive/learner/learner1D.py
+++ b/adaptive/learner/learner1D.py
@@ -6,7 +6,7 @@
 from copy import copy, deepcopy
 from numbers import Integral as Int
 from numbers import Real
-from typing import Any, Callable, Dict, List, Sequence, Tuple, Union
+from typing import TYPE_CHECKING, Any, Callable, Dict, List, Sequence, Tuple, Union
 
 import cloudpickle
 import numpy as np
@@ -24,12 +24,22 @@
     partial_function_from_dataframe,
 )
 
+if TYPE_CHECKING:
+    import holoviews
+
 try:
     from typing import TypeAlias
 except ImportError:
     # Remove this when we drop support for Python 3.9
     from typing_extensions import TypeAlias
 
+try:
+    from typing import Literal
+except ImportError:
+    # Remove this when we drop support for Python 3.7
+    from typing_extensions import Literal
+
+
 try:
     import pandas
 
@@ -145,7 +155,7 @@ def resolution_loss_function(
 
     Returns
     -------
-    loss_function : callable
+    loss_function
 
     Examples
     --------
@@ -230,12 +240,12 @@ class Learner1D(BaseLearner):
 
     Parameters
     ----------
-    function : callable
+    function
         The function to learn. Must take a single real parameter and
         return a real number or 1D array.
-    bounds : pair of reals
+    bounds
         The bounds of the interval on which to learn 'function'.
-    loss_per_interval: callable, optional
+    loss_per_interval
         A function that returns the loss for a single interval of the domain.
         If not provided, then a default is used, which uses the scaled distance
         in the x-y plane as the loss. See the notes for more details.
@@ -356,15 +366,15 @@ def to_dataframe(
 
         Parameters
         ----------
-        with_default_function_args : bool, optional
+        with_default_function_args
             Include the ``learner.function``'s default arguments as a
             column, by default True
-        function_prefix : str, optional
+        function_prefix
             Prefix to the ``learner.function``'s default arguments' names,
             by default "function."
-        x_name : str, optional
+        x_name
             Name of the input value, by default "x"
-        y_name : str, optional
+        y_name
             Name of the output value, by default "y"
 
         Returns
@@ -403,16 +413,16 @@ def load_dataframe(
 
         Parameters
         ----------
-        df : pandas.DataFrame
+        df
             The data to load.
-        with_default_function_args : bool, optional
+        with_default_function_args
             The ``with_default_function_args`` used in ``to_dataframe()``,
             by default True
-        function_prefix : str, optional
+        function_prefix
             The ``function_prefix`` used in ``to_dataframe``, by default "function."
-        x_name : str, optional
+        x_name
             The ``x_name`` used in ``to_dataframe``, by default "x"
-        y_name : str, optional
+        y_name
             The ``y_name`` used in ``to_dataframe``, by default "y"
         """
         self.tell_many(df[x_name].values, df[y_name].values)
@@ -795,17 +805,19 @@ def _loss(
         loss = mapping[ival]
         return finite_loss(ival, loss, self._scale[0])
 
-    def plot(self, *, scatter_or_line: str = "scatter"):
+    def plot(
+        self, *, scatter_or_line: Literal["scatter", "line"] = "scatter"
+    ) -> holoviews.Overlay:
         """Returns a plot of the evaluated data.
 
         Parameters
         ----------
-        scatter_or_line : str, default: "scatter"
+        scatter_or_line
             Plot as a scatter plot ("scatter") or a line plot ("line").
 
         Returns
         -------
-        plot : `holoviews.Overlay`
+        plot
             Plot of the evaluated data.
         """
         if scatter_or_line not in ("scatter", "line"):
diff --git a/adaptive/learner/learner2D.py b/adaptive/learner/learner2D.py
index 385f5b7d5..8944a8223 100644
--- a/adaptive/learner/learner2D.py
+++ b/adaptive/learner/learner2D.py
@@ -5,7 +5,7 @@
 from collections import OrderedDict
 from copy import copy
 from math import sqrt
-from typing import Callable, Iterable
+from typing import TYPE_CHECKING, Callable, Iterable
 
 import cloudpickle
 import numpy as np
@@ -22,6 +22,9 @@
     partial_function_from_dataframe,
 )
 
+if TYPE_CHECKING:
+    import holoviews
+
 try:
     import pandas
 
@@ -40,11 +43,11 @@ def deviations(ip: LinearNDInterpolator) -> list[np.ndarray]:
 
     Parameters
     ----------
-    ip : `scipy.interpolate.LinearNDInterpolator` instance
+    ip
 
     Returns
     -------
-    deviations : list
+    deviations
         The deviation per triangle.
     """
     values = ip.values / (ip.values.ptp(axis=0).max() or 1)
@@ -79,11 +82,11 @@ def areas(ip: LinearNDInterpolator) -> np.ndarray:
 
     Parameters
     ----------
-    ip : `scipy.interpolate.LinearNDInterpolator` instance
+    ip
 
     Returns
     -------
-    areas : numpy.ndarray
+    areas
         The area per triangle in ``ip.tri``.
     """
     p = ip.tri.points[ip.tri.simplices]
@@ -99,11 +102,11 @@ def uniform_loss(ip: LinearNDInterpolator) -> np.ndarray:
 
     Parameters
     ----------
-    ip : `scipy.interpolate.LinearNDInterpolator` instance
+    ip
 
     Returns
     -------
-    losses : numpy.ndarray
+    losses
         Loss per triangle in ``ip.tri``.
 
     Examples
@@ -136,7 +139,7 @@ def resolution_loss_function(
 
     Returns
     -------
-    loss_function : callable
+    loss_function
 
     Examples
     --------
@@ -173,11 +176,11 @@ def minimize_triangle_surface_loss(ip: LinearNDInterpolator) -> np.ndarray:
 
     Parameters
     ----------
-    ip : `scipy.interpolate.LinearNDInterpolator` instance
+    ip
 
     Returns
     -------
-    losses : numpy.ndarray
+    losses
         Loss per triangle in ``ip.tri``.
 
     Examples
@@ -217,11 +220,11 @@ def default_loss(ip: LinearNDInterpolator) -> np.ndarray:
 
     Parameters
     ----------
-    ip : `scipy.interpolate.LinearNDInterpolator` instance
+    ip
 
     Returns
     -------
-    losses : numpy.ndarray
+    losses
         Loss per triangle in ``ip.tri``.
     """
     dev = np.sum(deviations(ip), axis=0)
@@ -241,15 +244,15 @@ def choose_point_in_triangle(triangle: np.ndarray, max_badness: int) -> np.ndarr
 
     Parameters
     ----------
-    triangle : numpy.ndarray
+    triangle
         The coordinates of a triangle with shape (3, 2).
-    max_badness : int
+    max_badness
         The badness at which the point is either chosen on a edge or
         in the middle.
 
     Returns
     -------
-    point : numpy.ndarray
+    point
         The x and y coordinate of the suggested new point.
     """
     a, b, c = triangle
@@ -267,17 +270,17 @@ def choose_point_in_triangle(triangle: np.ndarray, max_badness: int) -> np.ndarr
     return point
 
 
-def triangle_loss(ip):
+def triangle_loss(ip: LinearNDInterpolator) -> list[float]:
     r"""Computes the average of the volumes of the simplex combined with each
     neighbouring point.
 
     Parameters
     ----------
-    ip : `scipy.interpolate.LinearNDInterpolator` instance
+    ip
 
     Returns
     -------
-    triangle_loss : list
+    triangle_loss
         The mean volume per triangle.
 
     Notes
@@ -311,13 +314,13 @@ class Learner2D(BaseLearner):
 
     Parameters
     ----------
-    function : callable
+    function
         The function to learn. Must take a tuple of two real
         parameters and return a real number.
-    bounds : list of 2-tuples
+    bounds
         A list ``[(a1, b1), (a2, b2)]`` containing bounds,
         one per dimension.
-    loss_per_triangle : callable, optional
+    loss_per_triangle
         A function that returns the loss for every triangle.
         If not provided, then a default is used, which uses
         the deviation from a linear estimate, as well as
@@ -424,19 +427,19 @@ def to_dataframe(
 
         Parameters
         ----------
-        with_default_function_args : bool, optional
+        with_default_function_args
             Include the ``learner.function``'s default arguments as a
             column, by default True
-        function_prefix : str, optional
+        function_prefix
             Prefix to the ``learner.function``'s default arguments' names,
             by default "function."
-        seed_name : str, optional
+        seed_name
             Name of the seed parameter, by default "seed"
-        x_name : str, optional
+        x_name
             Name of the input x value, by default "x"
-        y_name : str, optional
+        y_name
             Name of the input y value, by default "y"
-        z_name : str, optional
+        z_name
             Name of the output value, by default "z"
 
         Returns
@@ -475,18 +478,18 @@ def load_dataframe(
 
         Parameters
         ----------
-        df : pandas.DataFrame
+        df
             The data to load.
-        with_default_function_args : bool, optional
+        with_default_function_args
             The ``with_default_function_args`` used in ``to_dataframe()``,
             by default True
-        function_prefix : str, optional
+        function_prefix
             The ``function_prefix`` used in ``to_dataframe``, by default "function."
-        x_name : str, optional
+        x_name
             The ``x_name`` used in ``to_dataframe``, by default "x"
-        y_name : str, optional
+        y_name
             The ``y_name`` used in ``to_dataframe``, by default "y"
-        z_name : str, optional
+        z_name
             The ``z_name`` used in ``to_dataframe``, by default "z"
         """
         data = df.set_index([x_name, y_name])[z_name].to_dict()
@@ -538,7 +541,7 @@ def interpolated_on_grid(
 
         Parameters
         ----------
-        n : int, optional
+        n
             Number of points in x and y. If None (default) this number is
             evaluated by looking at the size of the smallest triangle.
 
@@ -611,14 +614,14 @@ def interpolator(self, *, scaled: bool = False) -> LinearNDInterpolator:
 
         Parameters
         ----------
-        scaled : bool
+        scaled
             Use True if all points are inside the
             unit-square [(-0.5, 0.5), (-0.5, 0.5)] or False if
             the data points are inside the ``learner.bounds``.
 
         Returns
         -------
-        interpolator : `scipy.interpolate.LinearNDInterpolator`
+        interpolator
 
         Examples
         --------
@@ -755,7 +758,9 @@ def remove_unfinished(self) -> None:
             if p not in self.data:
                 self._stack[p] = np.inf
 
-    def plot(self, n=None, tri_alpha=0):
+    def plot(
+        self, n: int = None, tri_alpha: float = 0
+    ) -> holoviews.Overlay | holoviews.HoloMap:
         r"""Plot the Learner2D's current state.
 
         This plot function interpolates the data on a regular grid.
@@ -764,16 +769,16 @@ def plot(self, n=None, tri_alpha=0):
 
         Parameters
         ----------
-        n : int
+        n
             Number of points in x and y. If None (default) this number is
             evaluated by looking at the size of the smallest triangle.
-        tri_alpha : float
+        tri_alpha
             The opacity ``(0 <= tri_alpha <= 1)`` of the triangles overlayed
             on top of the image. By default the triangulation is not visible.
 
         Returns
         -------
-        plot : `holoviews.core.Overlay` or `holoviews.core.HoloMap`
+        plot
             A `holoviews.core.Overlay` of
             ``holoviews.Image * holoviews.EdgePaths``. If the
             `learner.function` returns a vector output, a
diff --git a/adaptive/runner.py b/adaptive/runner.py
index ff3a137c3..812d287fb 100644
--- a/adaptive/runner.py
+++ b/adaptive/runner.py
@@ -104,49 +104,48 @@ class BaseRunner(metaclass=abc.ABCMeta):
 
     Parameters
     ----------
-    learner : `~adaptive.BaseLearner` instance
-    goal : callable, optional
+    learner
+        The learner that will be used to run.
+    goal
         The end condition for the calculation. This function must take
         the learner as its sole argument, and return True when we should
         stop requesting more points.
-    loss_goal : float, optional
+    loss_goal
         Convenience argument, use instead of ``goal``. The end condition for the
         calculation. Stop when the loss is smaller than this value.
-    npoints_goal : int, optional
+    npoints_goal
         Convenience argument, use instead of ``goal``. The end condition for the
         calculation. Stop when the number of points is larger or
         equal than this value.
-    end_time_goal : datetime, optional
+    end_time_goal
         Convenience argument, use instead of ``goal``. The end condition for the
         calculation. Stop when the current time is larger or equal than this
         value.
-    duration_goal : timedelta or number, optional
+    duration_goal
         Convenience argument, use instead of ``goal``. The end condition for the
         calculation. Stop when the current time is larger or equal than
         ``start_time + duration_goal``. ``duration_goal`` can be a number
         indicating the number of seconds.
-    executor : `concurrent.futures.Executor`, `distributed.Client`,\
-               `mpi4py.futures.MPIPoolExecutor`, `ipyparallel.Client` or\
-               `loky.get_reusable_executor`, optional
+    executor
         The executor in which to evaluate the function to be learned.
         If not provided, a new `~concurrent.futures.ProcessPoolExecutor` on
         Linux, and a `loky.get_reusable_executor` on MacOS and Windows.
-    ntasks : int, optional
+    ntasks
         The number of concurrent function evaluations. Defaults to the number
         of cores available in `executor`.
-    log : bool, default: False
+    log
         If True, record the method calls made to the learner by this runner.
-    shutdown_executor : bool, default: False
+    shutdown_executor
         If True, shutdown the executor when the runner has completed. If
         `executor` is not provided then the executor created internally
         by the runner is shut down, regardless of this parameter.
-    retries : int, default: 0
+    retries
         Maximum amount of retries of a certain point ``x`` in
         ``learner.function(x)``. After `retries` is reached for ``x``
         the point is present in ``runner.failed``.
-    raise_if_retries_exceeded : bool, default: True
+    raise_if_retries_exceeded
         Raise the error after a point ``x`` failed `retries`.
-    allow_running_forever : bool, default: False
+    allow_running_forever
         Allow the runner to run forever when the goal is None.
 
     Attributes
@@ -391,23 +390,23 @@ class BlockingRunner(BaseRunner):
 
     Parameters
     ----------
-    learner : `~adaptive.BaseLearner` instance
-    goal : callable, optional
+    learner
+    goal
         The end condition for the calculation. This function must take
         the learner as its sole argument, and return True when we should
         stop requesting more points.
-    loss_goal : float, optional
+    loss_goal
         Convenience argument, use instead of ``goal``. The end condition for the
         calculation. Stop when the loss is smaller than this value.
-    npoints_goal : int, optional
+    npoints_goal
         Convenience argument, use instead of ``goal``. The end condition for the
         calculation. Stop when the number of points is larger or
         equal than this value.
-    end_time_goal : datetime, optional
+    end_time_goal
         Convenience argument, use instead of ``goal``. The end condition for the
         calculation. Stop when the current time is larger or equal than this
         value.
-    duration_goal : timedelta or number, optional
+    duration_goal
         Convenience argument, use instead of ``goal``. The end condition for the
         calculation. Stop when the current time is larger or equal than
         ``start_time + duration_goal``. ``duration_goal`` can be a number
@@ -418,20 +417,20 @@ class BlockingRunner(BaseRunner):
         The executor in which to evaluate the function to be learned.
         If not provided, a new `~concurrent.futures.ProcessPoolExecutor` on
         Linux, and a `loky.get_reusable_executor` on MacOS and Windows.
-    ntasks : int, optional
+    ntasks
         The number of concurrent function evaluations. Defaults to the number
         of cores available in `executor`.
-    log : bool, default: False
+    log
         If True, record the method calls made to the learner by this runner.
-    shutdown_executor : bool, default: False
+    shutdown_executor
         If True, shutdown the executor when the runner has completed. If
         `executor` is not provided then the executor created internally
         by the runner is shut down, regardless of this parameter.
-    retries : int, default: 0
+    retries
         Maximum amount of retries of a certain point ``x`` in
         ``learner.function(x)``. After `retries` is reached for ``x``
         the point is present in ``runner.failed``.
-    raise_if_retries_exceeded : bool, default: True
+    raise_if_retries_exceeded
         Raise the error after a point ``x`` failed `retries`.
 
     Attributes
@@ -471,7 +470,7 @@ def __init__(
         npoints_goal: int | None = None,
         end_time_goal: datetime | None = None,
         duration_goal: timedelta | int | float | None = None,
-        executor: (ExecutorTypes | None) = None,
+        executor: ExecutorTypes | None = None,
         ntasks: int | None = None,
         log: bool = False,
         shutdown_executor: bool = False,
@@ -536,25 +535,25 @@ class AsyncRunner(BaseRunner):
 
     Parameters
     ----------
-    learner : `~adaptive.BaseLearner` instance
-    goal : callable, optional
+    learner
+    goal
         The end condition for the calculation. This function must take
         the learner as its sole argument, and return True when we should
         stop requesting more points.
         If not provided, the runner will run forever (or stop when no more
         points can be added), or until ``runner.task.cancel()`` is called.
-    loss_goal : float, optional
+    loss_goal
         Convenience argument, use instead of ``goal``. The end condition for the
         calculation. Stop when the loss is smaller than this value.
-    npoints_goal : int, optional
+    npoints_goal
         Convenience argument, use instead of ``goal``. The end condition for the
         calculation. Stop when the number of points is larger or
         equal than this value.
-    end_time_goal : datetime, optional
+    end_time_goal
         Convenience argument, use instead of ``goal``. The end condition for the
         calculation. Stop when the current time is larger or equal than this
         value.
-    duration_goal : timedelta or number, optional
+    duration_goal
         Convenience argument, use instead of ``goal``. The end condition for the
         calculation. Stop when the current time is larger or equal than
         ``start_time + duration_goal``. ``duration_goal`` can be a number
@@ -565,25 +564,25 @@ class AsyncRunner(BaseRunner):
         The executor in which to evaluate the function to be learned.
         If not provided, a new `~concurrent.futures.ProcessPoolExecutor` on
         Linux, and a `loky.get_reusable_executor` on MacOS and Windows.
-    ntasks : int, optional
+    ntasks
         The number of concurrent function evaluations. Defaults to the number
         of cores available in `executor`.
-    log : bool, default: False
+    log
         If True, record the method calls made to the learner by this runner.
-    shutdown_executor : bool, default: False
+    shutdown_executor
         If True, shutdown the executor when the runner has completed. If
         `executor` is not provided then the executor created internally
         by the runner is shut down, regardless of this parameter.
     ioloop : ``asyncio.AbstractEventLoop``, optional
         The ioloop in which to run the learning algorithm. If not provided,
         the default event loop is used.
-    retries : int, default: 0
+    retries
         Maximum amount of retries of a certain point ``x`` in
         ``learner.function(x)``. After `retries` is reached for ``x``
         the point is present in ``runner.failed``.
-    raise_if_retries_exceeded : bool, default: True
+    raise_if_retries_exceeded
         Raise the error after a point ``x`` failed `retries`.
-    allow_running_forever : bool, default: True
+    allow_running_forever
         If True, the runner will run forever if the goal is not provided.
 
     Attributes
@@ -630,7 +629,7 @@ def __init__(
         npoints_goal: int | None = None,
         end_time_goal: datetime | None = None,
         duration_goal: timedelta | int | float | None = None,
-        executor: (ExecutorTypes | None) = None,
+        executor: ExecutorTypes | None = None,
         ntasks: int | None = None,
         log: bool = False,
         shutdown_executor: bool = False,
@@ -736,22 +735,21 @@ def live_plot(
 
         Parameters
         ----------
-        runner : `~adaptive.Runner`
-        plotter : function
+        plotter
             A function that takes the learner as a argument and returns a
             holoviews object. By default ``learner.plot()`` will be called.
-        update_interval : int
+        update_interval
             Number of second between the updates of the plot.
-        name : hasable
+        name
             Name for the `live_plot` task in `adaptive.active_plotting_tasks`.
             By default the name is None and if another task with the same name
             already exists that other `live_plot` is canceled.
-        normalize : bool
+        normalize
             Normalize (scale to fit) the frame upon each update.
 
         Returns
         -------
-        dm : `holoviews.core.DynamicMap`
+        dm
             The plot that automatically updates every `update_interval`.
         """
         return live_plot(
@@ -811,12 +809,12 @@ def start_periodic_saving(
 
         Parameters
         ----------
-        save_kwargs : dict
+        save_kwargs
             Key-word arguments for ``learner.save(**save_kwargs)``.
             Only used if ``method=None``.
-        interval : int
+        interval
             Number of seconds between saving the learner.
-        method : callable
+        method
             The method to use for saving the learner. If None, the default
             saves the learner using "pickle" which calls
             ``learner.save(**save_kwargs)``. Otherwise provide a callable
@@ -874,23 +872,23 @@ def simple(
 
     Parameters
     ----------
-    learner : ~`adaptive.BaseLearner` instance
-    goal : callable, optional
+    learner
+    goal
         The end condition for the calculation. This function must take
         the learner as its sole argument, and return True when we should
         stop requesting more points.
-    loss_goal : float, optional
+    loss_goal
         Convenience argument, use instead of ``goal``. The end condition for the
         calculation. Stop when the loss is smaller than this value.
-    npoints_goal : int, optional
+    npoints_goal
         Convenience argument, use instead of ``goal``. The end condition for the
         calculation. Stop when the number of points is larger or
         equal than this value.
-    end_time_goal : datetime, optional
+    end_time_goal
         Convenience argument, use instead of ``goal``. The end condition for the
         calculation. Stop when the current time is larger or equal than this
         value.
-    duration_goal : timedelta or number, optional
+    duration_goal
         Convenience argument, use instead of ``goal``. The end condition for the
         calculation. Stop when the current time is larger or equal than
         ``start_time + duration_goal``. ``duration_goal`` can be a number
@@ -922,9 +920,9 @@ def replay_log(
 
     Parameters
     ----------
-    learner : `~adaptive.BaseLearner` instance
+    learner
         New learner where the log will be applied.
-    log : list
+    log
         contains tuples: ``(method_name, *args)``.
     """
     for method, *args in log:
@@ -978,7 +976,9 @@ def _get_ncores(
 # --- Useful runner goals
 
 # TODO: deprecate
-def stop_after(*, seconds=0, minutes=0, hours=0) -> Callable[[BaseLearner], bool]:
+def stop_after(
+    *, seconds: float = 0.0, minutes: float = 0.0, hours: float = 0.0
+) -> Callable[[BaseLearner], bool]:
     """Stop a runner after a specified time.
 
     For example, to specify a runner that should stop after
@@ -998,7 +998,7 @@ def stop_after(*, seconds=0, minutes=0, hours=0) -> Callable[[BaseLearner], bool
 
     Returns
     -------
-    goal : callable
+    goal
         Can be used as the ``goal`` parameter when constructing
         a `Runner`.
 
@@ -1040,13 +1040,13 @@ def auto_goal(
 
     Parameters
     ----------
-    loss : float, optional
+    loss
         Stop when the loss is smaller than this value.
-    npoints : int, optional
+    npoints
         Stop when the number of points is larger or equal than this value.
-    end_time : datetime, optional
+    end_time
         Stop when the current time is larger or equal than this value.
-    duration : timedelta or number, optional
+    duration
         Stop when the current time is larger or equal than
         ``start_time + duration``. ``duration`` can be a number
         indicating the number of seconds.
diff --git a/docs/environment.yml b/docs/environment.yml
index fb5c30cc6..d63c37fef 100644
--- a/docs/environment.yml
+++ b/docs/environment.yml
@@ -6,21 +6,22 @@ channels:
 dependencies:
   - python
   - sortedcollections=2.1.0
-  - scikit-optimize=0.8.1
-  - scikit-learn=0.24.2
-  - scipy=1.9.1
+  - scikit-optimize=0.9.0
+  - scikit-learn=1.1.3
+  - scipy=1.9.3
   - holoviews=1.14.6
   - bokeh=2.4.0
   - panel=0.12.7
-  - pandas=1.4.4
-  - plotly=5.3.1
-  - ipywidgets=7.6.5
-  - myst-nb=0.16.0
+  - pandas=1.5.2
+  - plotly=5.11.0
+  - ipywidgets=8.0.2
+  - myst-nb=0.17.1
   - sphinx_fontawesome=0.0.6
-  - sphinx=4.2.0
-  - ffmpeg=5.1.1
-  - cloudpickle
-  - loky
+  - sphinx=5.3.0
+  - ffmpeg=5.1.2
+  - cloudpickle=2.2.0
+  - loky=3.3.0
   - furo
   - myst-parser
   - dask
+  - sphinx-autodoc-typehints
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 336484e3e..bb443241e 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -36,6 +36,7 @@
     "sphinx.ext.napoleon",
     "myst_nb",
     "sphinx_fontawesome",
+    "sphinx_autodoc_typehints",
 ]
 source_parsers = {}
 templates_path = ["_templates"]