diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 000000000..22878ba2d --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,54 @@ +# From https://github.com/eeholmes/readthedoc-test/blob/main/.github/workflows/docs_pages.yml +name: docs + +# execute this workflow automatically when a we push to main +on: + push: + branches: [ master ] + workflow_dispatch: + +jobs: + + build_docs: + runs-on: ubuntu-latest + + steps: + - name: Checkout main + uses: actions/checkout@v3 + with: + path: master + + - name: Checkout gh-pages + uses: actions/checkout@v3 + with: + path: gh-pages + ref: gh-pages + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: 3.11 + cache: 'pip' + + - name: Install dependencies + run: | + cd ./master + python -m pip install .[docs] + - name: Make the Sphinx docs + run: | + cd ./master/docsrc + make clean + make github + - name: Commit changes to docs + run: | + cd ./gh-pages + cp -R ../master/docs/* ./ + git config --local user.email "" + git config --local user.name "github-actions" + git add -A + if ! git diff-index --quiet HEAD; then + git commit -m "auto: Rebuild docs." + git push + else + echo No commit made because the docs have not changed. + fi \ No newline at end of file diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index ac2348e23..2459e8d37 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -1,7 +1,8 @@ name: Tests on: - - push + - push: + branches: [master] - pull_request jobs: diff --git a/.gitignore b/.gitignore index c5ac1b063..b7960eb55 100644 --- a/.gitignore +++ b/.gitignore @@ -4,11 +4,10 @@ __pycache__/ */__pycache__/ projects/ */bayesflow.egg-info -docs/build/ +docsrc/_build/ build +docs/ -# Notebooks -docs/source/tutorial_notebooks/** # mypy .mypy_cache @@ -31,3 +30,6 @@ docs/source/tutorial_notebooks/** # tox .tox + +# MacOS +.DS_Store \ No newline at end of file diff --git a/.readthedocs.yaml b/.readthedocs.yaml deleted file mode 100644 index d0bbcf8d3..000000000 --- a/.readthedocs.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# .readthedocs.yaml -# Read the Docs configuration file -# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details - -# Format version -version: 2 - -# Build documentation with Sphinx -sphinx: - configuration: docs/source/conf.py - -# Additional build formats -formats: all - -# Configure conda environment -conda: - environment: environment.yaml diff --git a/CONTRIBUTING.md b/CONTRIBUTING similarity index 67% rename from CONTRIBUTING.md rename to CONTRIBUTING index 9419fe477..6c250a1e5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING @@ -1,5 +1,5 @@ Contributing to BayesFlow -========== +========================= Workflow -------- @@ -65,3 +65,26 @@ You can run the all tests locally via: Or a specific test via: pytest -e test_[mytest] + +Tutorial Notebooks +------------------ + +New tutorial notebooks are always welcome! You can add your tutorial notebook file to `examples/` and add a reference +to the list of notebooks in `docsrc/source/examples.rst`. +Re-build the documentation (see below) and your notebook will be included. + +Documentation +------------- + +The documentation uses [sphinx](https://www.sphinx-doc.org/) and relies on [numpy style docstrings](https://numpydoc.readthedocs.io/en/latest/format.html) in classes and functions. +The overall *structure* of the documentation is manually designed. This also applies to the API documentation. This has two implications for you: + +1. If you add to existing submodules, the documentation will update automatically (given that you use proper numpy docstrings). +2. If you add a new submodule or subpackage, you need to add a file to `docsrc/source/api` and a reference to the new module to the appropriate section of `docsrc/source/api/bayesflow.rst`. + +You can re-build the documentation with + + cd docsrc/ + make clean && make github + +The entry point of the rendered documentation will be at `docs/index.html`. \ No newline at end of file diff --git a/INSTALL.rst b/INSTALL.rst index a4747ec36..040bdfb64 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -1,5 +1,5 @@ -Install -======= +Full Installation Instructions +============================== Requirements ------------ diff --git a/README.md b/README.md index 0725215b8..f021b0988 100644 --- a/README.md +++ b/README.md @@ -7,13 +7,13 @@ Welcome to our BayesFlow library for efficient simulation-based Bayesian workflo For starters, check out some of our walk-through notebooks: -1. [Quickstart amortized posterior estimation](docs/source/tutorial_notebooks/Intro_Amortized_Posterior_Estimation.ipynb) -2. [Detecting model misspecification in posterior inference](docs/source/tutorial_notebooks/Model_Misspecification.ipynb) -3. [Principled Bayesian workflow for cognitive models](docs/source/tutorial_notebooks/LCA_Model_Posterior_Estimation.ipynb) -4. [Posterior estimation for ODEs](docs/source/tutorial_notebooks/Linear_ODE_system.ipynb) -5. [Posterior estimation for SIR-like models](docs/source/tutorial_notebooks/Covid19_Initial_Posterior_Estimation.ipynb) -6. [Model comparison for cognitive models](docs/source/tutorial_notebooks/Model_Comparison_MPT.ipynb) -7. [Hierarchical model comparison for cognitive models](docs/source/tutorial_notebooks/Hierarchical_Model_Comparison_MPT.ipynb) +1. [Quickstart amortized posterior estimation](examples/Intro_Amortized_Posterior_Estimation.ipynb) +2. [Detecting model misspecification in posterior inference](examples/Model_Misspecification.ipynb) +3. [Principled Bayesian workflow for cognitive models](examples/LCA_Model_Posterior_Estimation.ipynb) +4. [Posterior estimation for ODEs](examples/Linear_ODE_system.ipynb) +5. [Posterior estimation for SIR-like models](examples/Covid19_Initial_Posterior_Estimation.ipynb) +6. [Model comparison for cognitive models](examples/Model_Comparison_MPT.ipynb) +7. [Hierarchical model comparison for cognitive models](examples/Hierarchical_Model_Comparison_MPT.ipynb) ## Project Documentation @@ -247,7 +247,7 @@ conf_matrix = bf.diagnostics.plot_confusion_matrix(sims["model_indices"], model_ For the vast majority of simulated data sets, the "true" data-generating model is correctly identified. With these diagnostic results backing us up, we can proceed and apply our trained network to empirical data. -BayesFlow is also able to conduct model comparison for hierarchical models. See this [tutorial notebook](docs/source/tutorial_notebooks/Hierarchical_Model_Comparison_MPT.ipynb) for an introduction to the associated workflow. +BayesFlow is also able to conduct model comparison for hierarchical models. See this [tutorial notebook](examples/Hierarchical_Model_Comparison_MPT.ipynb) for an introduction to the associated workflow. ### References and Further Reading diff --git a/bayesflow/amortizers.py b/bayesflow/amortizers.py index 214dada03..c2a779dad 100644 --- a/bayesflow/amortizers.py +++ b/bayesflow/amortizers.py @@ -219,7 +219,7 @@ def compute_loss(self, input_dict, **kwargs): # Case dynamic latent space - function of summary conditions if self.latent_is_dynamic: logpdf = self.latent_dist(sum_out).log_prob(z) - # Case static latent space + # Case _static latent space else: logpdf = self.latent_dist.log_prob(z) @@ -297,7 +297,7 @@ def sample(self, input_dict, n_samples, to_numpy=True, **kwargs): if self.latent_is_dynamic: z_samples = self.latent_dist(conditions).sample(n_samples) z_samples = tf.transpose(z_samples, (1, 0, 2)) - # Case static latent - marginal samples from the specified dist + # Case _static latent - marginal samples from the specified dist else: z_samples = self.latent_dist.sample((n_data_sets, n_samples)) @@ -382,7 +382,7 @@ def log_posterior(self, input_dict, to_numpy=True, **kwargs): # Case dynamic latent - function of conditions if self.latent_is_dynamic: log_post = self.latent_dist(conditions).log_prob(z) + log_det_J - # Case static latent - marginal samples from z + # Case _static latent - marginal samples from z else: log_post = self.latent_dist.log_prob(z) + log_det_J self._check_output_sanity(log_post) @@ -1103,8 +1103,8 @@ def sample(self, input_dict, n_samples, to_numpy=True, **kwargs): **kwargs : dict, optional, default: {} Additional keyword arguments passed to the summary network as the amortizers - Returns: - -------- + Returns + ------- samples_dict : dict A dictionary with keys `global_samples` and `local_samples` Local samples will hold an array-like of shape (num_replicas, num_samples, num_local) diff --git a/bayesflow/computational_utilities.py b/bayesflow/computational_utilities.py index 5c5c2ddc2..0e21764e0 100644 --- a/bayesflow/computational_utilities.py +++ b/bayesflow/computational_utilities.py @@ -59,8 +59,8 @@ def posterior_calibration_error( max_quantile : float in (0, 1), optional, default: 0.995 The maximum posterior quantile to consider - Returns: - -------- + Returns + ------- calibration_errors : np.ndarray of shape (num_params, ) or (alpha_resolution, num_params), if ``aggregator_fun is None``. The aggregated calibration error per marginal posterior. @@ -248,8 +248,8 @@ def expected_calibration_error(m_true, m_pred, num_bins=10): Obtaining well calibrated probabilities using bayesian binning. In Proceedings of the AAAI conference on artificial intelligence (Vol. 29, No. 1). - Important - --------- + Notes + ----- Make sure that ``m_true`` are **one-hot encoded** classes! Parameters diff --git a/bayesflow/coupling_networks.py b/bayesflow/coupling_networks.py index 4a2c737b8..05970b645 100644 --- a/bayesflow/coupling_networks.py +++ b/bayesflow/coupling_networks.py @@ -601,8 +601,8 @@ def call(self, target_or_z, condition, inverse=False, **kwargs): target : tf.Tensor If inverse=True: The back-transformed z, shape (batch_size, inp_dim) - Important - --------- + Notes + ----- If ``inverse=False``, the return is ``(z, log_det_J)``.\n If ``inverse=True``, the return is ``target`` """ diff --git a/bayesflow/helper_networks.py b/bayesflow/helper_networks.py index a0046839d..5f62e51c8 100644 --- a/bayesflow/helper_networks.py +++ b/bayesflow/helper_networks.py @@ -292,24 +292,30 @@ def call(self, inputs): class ActNorm(tf.keras.Model): """Implements an Activation Normalization (ActNorm) Layer. Activation Normalization is learned invertible normalization, using - a Scale (s) and Bias (b) vector [1]. - y = s * x + b (forward) - x = (y - b)/s (inverse) + a Scale (s) and Bias (b) vector:: - The scale and bias can be data dependent initalized, such that the - output has a mean of zero and standard deviation of one [1,2]. + y = s * x + b (forward) + x = (y - b)/s (inverse) + + Notes + ----- + + The scale and bias can be data dependent initialized, such that the + output has a mean of zero and standard deviation of one [1]_[2]_. Alternatively, it is initialized with vectors of ones (scale) and zeros (bias). - [1] - Kingma, Diederik P., and Prafulla Dhariwal. - "Glow: Generative flow with invertible 1x1 convolutions." - arXiv preprint arXiv:1807.03039 (2018). + References + ---------- + + .. [1] Kingma, Diederik P., and Prafulla Dhariwal. + "Glow: Generative flow with invertible 1x1 convolutions." + arXiv preprint arXiv:1807.03039 (2018). - [2] - Salimans, Tim, and Durk P. Kingma. - "Weight normalization: A simple reparameterization to accelerate - training of deep neural networks." - Advances in neural information processing systems 29 - (2016): 901-909. + .. [2] Salimans, Tim, and Durk P. Kingma. + "Weight normalization: A simple reparameterization to accelerate + training of deep neural networks." + Advances in neural information processing systems 29 (2016): 901-909. """ def __init__(self, latent_dim, act_norm_init, **kwargs): @@ -353,8 +359,8 @@ def call(self, target, inverse=False): target : tf.Tensor If inverse=True: The inversly transformed targets, shape == target.shape - Important - --------- + Notes + ----- If ``inverse=False``, the return is ``(z, log_det_J)``.\n If ``inverse=True``, the return is ``target``. """ diff --git a/bayesflow/inference_networks.py b/bayesflow/inference_networks.py index 6420be85e..a86b4afc8 100644 --- a/bayesflow/inference_networks.py +++ b/bayesflow/inference_networks.py @@ -167,8 +167,8 @@ def call(self, targets, condition, inverse=False, **kwargs): target : tf.Tensor If inverse=True: The transformed out, shape (batch_size, ...) - Important - --------- + Notes + ----- If ``inverse=False``, the return is ``(z, log_det_J)``.\n If ``inverse=True``, the return is ``target``. """ diff --git a/bayesflow/simulation.py b/bayesflow/simulation.py index 1f5716bed..b2e776319 100644 --- a/bayesflow/simulation.py +++ b/bayesflow/simulation.py @@ -52,10 +52,13 @@ class ContextGenerator: While the latter can also be considered batchable in principle, batching them would require non-Tensor (i.e., non-rectangular) data structures, which usually means inefficient computations. + Examples + -------- Example for a simulation context which will generate a random number of observations between 1 and 100 for each training batch: >>> gen = ContextGenerator(non_batchable_context_fun=lambda : np.random.randint(1, 101)) + """ def __init__( @@ -103,8 +106,8 @@ def __call__(self, batch_size, *args, **kwargs): context_dict : dictionary A dictionary with context variables with the following keys: - `batchable_context` : value - `non_batchable_context` : value + ``batchable_context`` : value + ``non_batchable_context`` : value Note, that the values of the context variables will be None, if the corresponding context-generating functions have not been provided when @@ -210,7 +213,7 @@ def __init__( self.is_batched = False def __call__(self, batch_size, *args, **kwargs): - """Generates `batch_size` draws from the prior given optional context generator. + """Generates ``batch_size`` draws from the prior given optional context generator. Parameters ---------- @@ -313,12 +316,12 @@ def __call__(self, batch_size, *args, **kwargs): def plot_prior2d(self, **kwargs): """Generates a 2D plot representing bivariate prior ditributions. Uses the function - `bayesflow.diagnostics.plot_prior2d() internally for generating the plot. + ``bayesflow.diagnostics.plot_prior2d()`` internally for generating the plot. Parameters ---------- **kwargs : dict - Optional keyword arguments passed to the `plot_prior2d` function. + Optional keyword arguments passed to the ``plot_prior2d`` function. Returns ------- @@ -400,9 +403,10 @@ def __init__( An optional function (ideally an instance of ``ContextGenerator``) for generating control variables for the local_prior_fun. - Example: Varying number of local factors (e.g., groups, participants) between 1 and 100: + Examples + -------- + Varying number of local factors (e.g., groups, participants) between 1 and 100:: - `` def draw_hyper(): # Draw location for 2D conditional prior return np.random.normal(size=2) @@ -415,6 +419,7 @@ def draw_prior(means, num_groups, sigma=1.): context = ContextGenerator(non_batchable_context_fun=lambda : np.random.randint(1, 101)) prior = TwoLevelPrior(draw_hyper, draw_prior, local_context_generator=context) prior_dict = prior(batch_size=32) + """ self.hyper_prior = hyper_prior_fun @@ -512,19 +517,19 @@ class Simulator: An optional context generator (i.e., an instance of ContextGenerator) or a user-defined callable object implementing the following two methods can be provided: - - context_generator.batchable_context(batch_size) - - context_generator.non_batchable_context() + - ``context_generator.batchable_context(batch_size)`` + - ``context_generator.non_batchable_context()`` """ def __init__(self, batch_simulator_fun=None, simulator_fun=None, context_generator=None): """Instantiates a data generator which will perform randomized simulations given a set of parameters and optional context. - Either a batch_simulator_fun or simulator_fun, but not both, should be provided to instantiate a Simulator object. + Either a ``batch_simulator_fun`` or ``simulator_fun``, but not both, should be provided to instantiate a ``Simulator`` object. - If a batch_simulator_fun is provided, the interface will assume that the function operates on batches of parameter + If a ``batch_simulator_fun`` is provided, the interface will assume that the function operates on batches of parameter vectors and context variables and will pass the latter directly to the function. Power users should attempt to provide optimized batched simulators. - If a simulator_fun is provided, the interface will assume thatthe function operates on single parameter vectors and + If a ``simulator_fun`` is provided, the interface will assume thatthe function operates on single parameter vectors and context variables and will wrap the simulator internally to allow batched functionality. Parameters @@ -535,8 +540,8 @@ def __init__(self, batch_simulator_fun=None, simulator_fun=None, context_generat simulator_fun : callable A function (callable object) with optional control arguments responsible for generating a simulaiton given a single parameter vector and optional variables. - context generator : callable (default None, recommended instance of ContextGenerator) - An optional function (ideally an instance of ContextGenerator) for generating prior context variables. + context_generator : callable (default None, recommended instance of ContextGenerator) + An optional function (ideally an instance of ``ContextGenerator``) for generating prior context variables. """ if (batch_simulator_fun is None) is (simulator_fun is None): @@ -562,9 +567,9 @@ def __call__(self, params, *args, **kwargs): out_dict : dictionary An output dictionary with randomly simulated variables, the following keys are mandatory, if default keys not modified: - `sim_data` : value - `non_batchable_context` : value - `batchable_context` : value + ``sim_data`` : value + ``non_batchable_context`` : value + ``batchable_context`` : value """ # Always assume first dimension is batch dimension @@ -728,8 +733,8 @@ def __init__( name : str (default - "anonoymous") An optional name for the generative model. If kept default (None), 'anonymous' is set as name. - Important - ---------- + Notes + ----- If you are not using the provided ``Prior`` and ``Simulator`` wrappers for your prior and data generator, only functions returning a ``np.ndarray`` in the correct format will be accepted, since these will be wrapped internally. In addition, you need to indicate whether your simulator operates on batched of @@ -761,7 +766,7 @@ def __init__( self._test() def __call__(self, batch_size, **kwargs): - """Carries out forward inference 'batch_size' times.""" + """Carries out forward inference ``batch_size`` times.""" # Forward inference prior_out = self.prior(batch_size, **kwargs.pop("prior_args", {})) @@ -780,7 +785,7 @@ def __call__(self, batch_size, **kwargs): return out_dict def _config_custom_simulator(self, sim_fun, is_batched): - """Only called if user has provided a custom simulator not using the Simulator wrapper.""" + """Only called if user has provided a custom simulator not using the ``Simulator`` wrapper.""" if is_batched is None: raise ConfigurationError( @@ -796,8 +801,8 @@ def _config_custom_simulator(self, sim_fun, is_batched): def plot_pushforward( self, parameter_draws=None, funcs_list=None, funcs_labels=None, batch_size=1000, show_raw_sims=True ): - """Creates simulations from parameter_draws (generated from self.prior if they are not passed as an argument) - and plots visualizations for them. + """Creates simulations from ``parameter_draws`` (generated from ``self.prior`` if they are not passed as + an argument) and plots visualizations for them. Parameters ---------- @@ -959,8 +964,8 @@ def presimulate_and_save( disable_user_input: bool, optional, default: False If True, user will not be asked if memory space is sufficient for presimulation. - Important - ---------- + Notes + ----- One of the following pairs of parameters has to be provided: - (iterations_per_epoch, epochs), @@ -968,7 +973,7 @@ def presimulate_and_save( - (total_iterations, epochs) Providing all three of the parameters in these pairs leads to a consistency check, - since incompatible combinations are possible. + since incompatible combinations are possible. """ # Ensure that the combination of parameters provided is sufficient to perform presimulation # and does not contain internal contradictions @@ -1117,6 +1122,7 @@ def presimulate_and_save( class TwoLevelGenerativeModel: """Basic interface for a generative model in a simulation-based context. + Generally, a generative model consists of two mandatory components: - MultilevelPrior : A randomized function returning random parameter draws from a two-level prior distribution; - Simulator : A function which transforms the parameters into observables in a non-deterministic manner. @@ -1149,8 +1155,8 @@ def __init__( name : str (default - "anonymous") An optional name for the generative model. - Important - ---------- + Notes + ----- If you are not using the provided ``TwoLevelPrior`` and ``Simulator`` wrappers for your prior and data generator, only functions returning a ``np.ndarray`` in the correct format will be accepted, since these will be wrapped internally. In addition, you need to indicate whether your simulator operates on batched of @@ -1205,7 +1211,7 @@ def __call__(self, batch_size, **kwargs): return out_dict def _config_custom_simulator(self, sim_fun, is_batched): - """Only called if user has provided a custom simulator not using the Simulator wrapper.""" + """Only called if user has provided a custom simulator not using the ``Simulator`` wrapper.""" if is_batched is None: raise ConfigurationError( diff --git a/bayesflow/trainers.py b/bayesflow/trainers.py index 812196021..d4779befb 100644 --- a/bayesflow/trainers.py +++ b/bayesflow/trainers.py @@ -21,14 +21,11 @@ import logging import os from pickle import load as pickle_load +import tensorflow as tf import numpy as np from tqdm.autonotebook import tqdm -logging.basicConfig() - -import tensorflow as tf - from bayesflow.amortizers import ( AmortizedLikelihood, AmortizedModelComparison, @@ -51,60 +48,69 @@ from bayesflow.helper_functions import backprop_step, extract_current_lr, format_loss_string, loss_to_string from bayesflow.simulation import GenerativeModel, MultiGenerativeModel +logging.basicConfig() + class Trainer: """This class connects a generative model (or, already simulated data from a model) with a configurator and a neural inference architecture for amortized inference (amortizer). A Trainer instance is responsible for optimizing the amortizer via various forms of simulation-based training. - At the very minimum, the trainer must be initialized with an ``amortizer`` instance, which is capable - of processing the (configured) outputs of a generative model. A ``configurator`` will then process + At the very minimum, the trainer must be initialized with an `amortizer` instance, which is capable + of processing the (configured) outputs of a generative model. A `configurator` will then process the outputs of the generative model and convert them into suitable inputs for the amortizer. Users can choose from a palette of default configurators or create their own configurators, essentially - building a modularized pipeline GenerativeModel -> Configurator -> Amortizer. Most complex models - wtill require custom configurators. + building a modularized pipeline `GenerativeModel` -> `Configurator` -> `Amortizer`. Most complex models + will require custom configurators. + + Notes + ----- Currently, the trainer supports the following simulation-based training regimes, based on efficiency considerations: - - Online training - Usage: - >>> trainer.train_online(epochs, iterations_per_epoch, batch_size, **kwargs) + * Online training + + >>> trainer.train_online(epochs, iterations_per_epoch, batch_size, **kwargs) + + This training regime is optimal for fast generative models which can efficiently simulated data on-the-fly. + In order for this training regime to be efficient, on-the-fly batch simulations should not take longer + than 2-3 seconds. + + * Experience replay training + + >>> trainer.train_experience_replay(epochs, iterations_per_epoch, batch_size, **kwargs) + + This training regime is also good for fast generative models capable of efficiently simulating data on-the-fly. + Compare to pure online training, this training will keep an experience replay buffer from which simulations + are randomly sampled, so the networks will likely see some simulations multiple times. - This training regime is optimal for fast generative models which can efficiently simulated data on-the-fly. - In order for this training regime to be efficient, on-the-fly batch simulations should not take longer than 2-3 seconds. + * Round-based training - - Experience replay training - Usage: - >>> trainer.train_experience_replay(epochs, iterations_per_epoch, batch_size, **kwargs) + >>> trainer.train_rounds(rounds, sim_per_round, epochs, batch_size, **kwargs) - This training regime is also good for fast generative models capable of efficiently simulating data on-the-fly. - Compare to pure online training, this training will keep an experience replay buffer from which simulations - are randomly sampled, so the networks will likely see some simulations multiple times. + This training regime is optimal for slow, but still reasonably performant generative models. + In order for this training regime to be efficient, on-the-fly batch simulations should not take + longer than 2-3 minutes. - - Round-based training - Usage: - >>> trainer.train_rounds(rounds, sim_per_round, epochs, batch_size, **kwargs) + .. note:: overfitting presents a danger when using small numbers of simulated data sets, so it is recommended + to use some amount of regularization for the neural amortizer(s). - This training regime is optimal for slow, but still reasonably performant generative models. - In order for this training regime to be efficient, on-the-fly batch simulations should not take longer than one 2-3 minutes. + * Offline training - Important: overfitting presents a danger when using small numbers of simulated data sets, so it is recommended to use - some amount of regularization for the neural amortizer(s). + >>> trainer.train_offline(simulations_dict, epochs, batch_size, **kwargs) - - Offline taining - Usage: - >>> trainer.train_offline(simulations_dict, epochs, batch_size, **kwargs) + This training regime is optimal for very slow, external simulators, which take several minutes for a + single simulation. It assumes that all training data has been already simulated and stored on disk. - This training regime is optimal for very slow, external simulators, which take several minutes for a single simulation. - It assumes that all training data has been already simulated and stored on disk. + .. warning:: Overfitting presents a danger when using a small simulated data set, so it is recommended to use + some amount of regularization for the neural amortizer(s). - Important: overfitting presents a danger when using a small simulated data set, so it is recommended to use - some amount of regularization for the neural amortizer(s). + .. note:: + For extremely slow simulators (i.e., more than an hour of a single simulation), the BayesFlow framework + might not be the ideal choice and should probably be considered in combination with a black-box surrogate + optimization method, such as Bayesian optimization. - Note: For extremely slow simulators (i.e., more than an hour of a single simulation), the BayesFlow framework - might not be the ideal choice and should probably be considered in combination with a black-box surrogate optimization method, - such as Bayesian optimization. """ def __init__( @@ -124,9 +130,9 @@ def __init__( Parameters ---------- - amortizer : bayesflow.amortizers.Amortizer + amortizer : `bayesflow.amortizers.Amortizer` The neural architecture to be optimized. - generative_model : bayesflow.forward_inference.GenerativeModel + generative_model : `bayesflow.forward_inference.GenerativeModel` A generative model returning a dictionary with randomly sampled parameters, data, and optional context configurator : callable or None, optional, default: None A callable object transforming and combining the outputs of the generative model into inputs for a BayesFlow @@ -141,15 +147,18 @@ def __init__( If True, do not perform consistency checks, i.e., simulator runs and passed through nets memory : bool or bayesflow.SimulationMemory, optional, default: False If ``True``, store a pre-defined amount of simulations for later use (validation, etc.). - If ``SimulationMemory`` instance provided, stores a reference to the instance. + If `SimulationMemory` instance provided, stores a reference to the instance. Otherwise the corresponding attribute will be set to None. - **kwargs : dict, optional, default: {} - Optional keyword arguments for controling the behavior of the Trainer instance. As of now, these could be: - memory_kwargs : dict - Keyword arguments to be passed to the ``SimulationMemory`` instance, if ``memory=True`` - num_models : int - The number of models in an amortized model comparison scenario, in case of a custom model comparison - amortizer which does not have a num_models attribute. + + Other Parameters: + ----------------- + + memory_kwargs : dict + Keyword arguments to be passed to the `SimulationMemory` instance, if ``memory=True`` + num_models : int + The number of models in an amortized model comparison scenario, in case of a custom model comparison + amortizer which does not have a num_models attribute. + """ # Set-up logging @@ -206,7 +215,7 @@ def __init__( self.manager = None self.checkpoint_path = checkpoint_path - # Perform a sanity check wiuth provided components + # Perform a sanity check with provided components if not skip_checks: self._check_consistency() @@ -214,23 +223,28 @@ def diagnose_latent2d(self, inputs=None, **kwargs): """Performs visual pre-inference diagnostics of latent space on either provided validation data (new simulations) or internal simulation memory. If ``inputs is not None``, then diagnostics will be performed on the inputs, regardless - whether the ``simulation_memory`` of the trainer is empty or not. If ``inputs is None``, then - the trainer will try to access is memory or raise a ``ConfigurationError``. + whether the `simulation_memory` of the trainer is empty or not. If ``inputs is None``, then + the trainer will try to access is memory or raise a `ConfigurationError`. Parameters ---------- inputs : None, list, or dict, optional, default: None The optional inputs to use - **kwargs : dict, optional, default: {} - Optional keyword arguments, which could be: - ``conf_args`` - optional keyword arguments passed to the configurator - ``net_args`` - optional keyword arguments passed to the amortizer - ``plot_args`` - optional keyword arguments passed to ``plot_latent_space_2d`` + + Other Parameters + ---------------- + + conf_args : + optional keyword arguments passed to the configurator + net_args : + optional keyword arguments passed to the amortizer + plot_args : + optional keyword arguments passed to `plot_latent_space_2d` Returns ------- fig : plt.Figure - The figure object which can be readily saved to disk using ``fig.savefig()``. + The figure object which can be readily saved to disk using `fig.savefig()`. """ if type(self.amortizer) is AmortizedPosterior: @@ -238,7 +252,7 @@ def diagnose_latent2d(self, inputs=None, **kwargs): if inputs is None: if self.simulation_memory is None: raise ConfigurationError( - "You should either enable ``simulation memory`` or supply the ``inputs`` argument." + "You should either enable simulation memory or supply the inputs argument." ) else: inputs = self.simulation_memory.get_memory() @@ -258,8 +272,8 @@ def diagnose_sbc_histograms(self, inputs=None, n_samples=None, **kwargs): """Performs visual pre-inference diagnostics via simulation-based calibration (SBC) (new simulations) or internal simulation memory. If ``inputs is not None``, then diagnostics will be performed on the inputs, regardless - whether the ``simulation_memory`` of the trainer is empty or not. If ``inputs is None``, then - the trainer will try to access is memory or raise a ``ConfigurationError``. + whether the `simulation_memory` of the trainer is empty or not. If ``inputs is None``, then + the trainer will try to access is memory or raise a `ConfigurationError`. Parameters ---------- @@ -267,17 +281,21 @@ def diagnose_sbc_histograms(self, inputs=None, n_samples=None, **kwargs): The optional inputs to use n_samples : int or None, optional, default: None The number of posterior samples to draw for each simulated data set. - If None, the number will be heuristically determined so n_sim / n_draws ~= 20 - **kwargs : dict, optional, default: {} - Optional keyword arguments, which could be: - ``conf_args`` - optional keyword arguments passed to the configurator - `net_args`` - optional keyword arguments passed to the amortizer - ``plot_args`` - optional keyword arguments passed to ``plot_sbc`` + If None, the number will be heuristically determined so that n_sim / n_draws is approximately equal to 20 + + Other Parameters + ---------------- + conf_args : + optional keyword arguments passed to the configurator + net_args : + optional keyword arguments passed to the amortizer + plot_args : + optional keyword arguments passed to `plot_sbc()` Returns ------- fig : plt.Figure - The figure object which can be readily saved to disk using ``fig.savefig()``. + The figure object which can be readily saved to disk using `fig.savefig()`. """ if type(self.amortizer) is AmortizedPosterior: @@ -347,7 +365,7 @@ def train_online( Number of batch simulations to perform per epoch batch_size : int Number of simulations to perform at each backprop step - save_checkpoint : bool (default - True) + save_checkpoint : bool, default: True A flag to decide whether to save checkpoints after each epoch, if a checkpoint_path provided during initialization, otherwise ignored. optimizer : tf.keras.optimizer.Optimizer or None @@ -362,29 +380,33 @@ def train_online( Whether to use optional stopping or not during training. Could speed up training. Only works if ``validation_sims is not None``, i.e., validation data has been provided. use_autograph : bool, optional, default: True - Whether to use autograph for the backprop step. Could lead to enourmous speed-ups but + Whether to use autograph for the backprop step. Could lead to enormous speed-ups but could also be harder to debug. validation_sims : dict or None, optional, default: None Simulations used as a "validation set". If ``dict``, will assume it's the output of a generative model and try - ``amortizer.compute_loss(configurator(validation_sims))'' + ``amortizer.compute_loss(configurator(validation_sims))`` after each epoch. If ``int``, will assume it's the number of sims to generate from the generative model before starting training. Only considered if a generative model has been provided during initialization. If ``None`` (default), no validation set will be used. - **kwargs : dict, optional - Optional keyword arguments, which can be: - ``model_args`` - optional kwargs passed to the generative model - ``val_model_args`` - optional kwargs passed to the generative model - for generating validation data. Only useful if - ``type(validation_sims) is int``. - ``conf_args`` - optional kwargs passed to the configurator - before each backprop (update) step. - ``val_conf_args`` - optional kwargs passed to the configurator - then configuring the validation data. - ``net_args`` - optional kwargs passed to the amortizer - ``early_stopping_args`` - optional kwargs passed to the ``EarlyStopper`` + + Other Parameters + ---------------- + model_args : + optional kwargs passed to the generative model + val_model_args: + optional kwargs passed to the generative model for generating validation data. Only useful if + ``type(validation_sims) is int``. + conf_args : + optional kwargs passed to the configurator before each backprop (update) step. + val_conf_args : + optional kwargs passed to the configurator then configuring the validation data. + net_args : + optional kwargs passed to the amortizer + early_stopping_args : + optional kwargs passed to the `EarlyStopper` Returns ------- @@ -463,54 +485,57 @@ def train_offline( Parameters ---------- simulations_dict : dict - A dictionaty containing the simulated data / context, if using the default keys, - the method expects at least the mandatory keys ``sim_data`` and ``prior_draws`` to be present + A dictionary containing the simulated data / context, if using the default keys, + the method expects at least the mandatory keys ``sim_data`` and ``prior_draws`` to be present epochs : int - Number of epochs (and number of times a checkpoint is stored) + Number of epochs (and number of times a checkpoint is stored) batch_size : int - Number of simulations to perform at each backpropagation step - save_checkpoint : bool (default - True) - Determines whether to save checkpoints after each epoch, - if a checkpoint_path provided during initialization, otherwise ignored. + Number of simulations to perform at each backpropagation step + save_checkpoint : bool, default: True + Determines whether to save checkpoints after each epoch, + if a checkpoint_path provided during initialization, otherwise ignored. optimizer : tf.keras.optimizer.Optimizer or None - Optimizer for the neural network. ``None`` will result in ``tf.keras.optimizers.Adam`` - using a learning rate of 5e-4 and a cosine decay from 5e-4 to 0. A custom optimizer - will override default learning rate and schedule settings. + Optimizer for the neural network. ``None`` will result in ``tf.keras.optimizers.Adam`` + using a learning rate of 5e-4 and a cosine decay from 5e-4 to 0. A custom optimizer + will override default learning rate and schedule settings. reuse_optimizer : bool, optional, default: False - A flag indicating whether the optimizer instance should be treated as persistent or not. - If ``False``, the optimizer and its states are not stored after training has finished. - Otherwise, the optimizer will be stored as ``self.optimizer`` and re-used in further training runs. + A flag indicating whether the optimizer instance should be treated as persistent or not. + If ``False``, the optimizer and its states are not stored after training has finished. + Otherwise, the optimizer will be stored as ``self.optimizer`` and re-used in further training runs. early_stopping : bool, optional, default: False - Whether to use optional stopping or not during training. Could speed up training. - Only works if ``validation_sims is not None``, i.e., validation data has been provided. + Whether to use optional stopping or not during training. Could speed up training. + Only works if ``validation_sims is not None``, i.e., validation data has been provided. use_autograph : bool, optional, default: True - Whether to use autograph for the backprop step. Could lead to enourmous speed-ups but - could also be harder to debug. + Whether to use autograph for the backprop step. Could lead to enormous speed-ups but + could also be harder to debug. validation_sims : dict, int, or None, optional, default: None - Simulations used as a "validation set". - If ``dict``, will assume it's the output of a generative model and try - ``amortizer.compute_loss(configurator(validation_sims))'' - after each epoch. - If ``int``, will assume it's the number of sims to generate from the generative - model before starting training. Only considered if a generative model has been - provided during initialization. - If ``None`` (default), no validation set will be used. - **kwargs : dict, optional - Optional keyword arguments, which can be: - ``val_model_args`` - optional kwargs passed to the generative model - for generating validation data. Only useful if - ``type(validation_sims) is int``. - ``conf_args`` - optional kwargs passed to the configurator - before each backprop (update) step. - ``val_conf_args`` - optional kwargs passed to the configurator - then configuring the validation data. - ``net_args`` - optional kwargs passed to the amortizer - ``early_stopping_args`` - optional kwargs passed to the ``EarlyStopper`` + Simulations used as a "validation set". + If ``dict``, will assume it's the output of a generative model and try + ``amortizer.compute_loss(configurator(validation_sims))`` after each epoch. + If ``int``, will assume it's the number of sims to generate from the generative + model before starting training. Only considered if a generative model has been + provided during initialization. + If ``None`` (default), no validation set will be used. + + Other Parameters + ---------------- + val_model_args : + optional kwargs passed to the generative model for generating validation data. + Only useful if ``type(validation_sims) is int``. + conf_args : + optional kwargs passed to the configurator before each backprop (update) step. + val_conf_args : + optional kwargs passed to the configurator then configuring the validation data. + net_args : + optional kwargs passed to the amortizer + early_stopping_args : + optional kwargs passed to the `EarlyStopper` Returns ------- losses : ``dict`` or ``pandas.DataFrame`` - A dictionary or a data frame storing the losses across epochs and iterations + A dictionary or a data frame storing the losses across epochs and iterations + """ # Compile update function, if specified @@ -600,13 +625,14 @@ def train_from_presimulation( presimulation_path : str File path to the folder containing the files from the precomputed simulation. Ideally generated using a GenerativeModel's presimulate_and_save method, otherwise must match - the structure produced by that method: - + the structure produced by that method. Each file contains the data for one epoch (i.e. a number of batches), and must be compatible with the custom_loader provided. - The custom_loader must read each file into a collection (either a dictionary or a list) of simulation_dict objects. - This is easily achieved with the pickle library: if the files were generated from collections of simulation_dict objects - using pickle.dump, the _default_loader (default for custom_load) will load them using pickle.load. + The custom_loader must read each file into a collection (either a dictionary or a list) of simulation_dict + objects. + This is easily achieved with the pickle library: if the files were generated from collections of + simulation_dict objects using pickle.dump, the _default_loader (default for custom_load) will + load them using pickle.load. Training parameters like number of iterations and batch size are inferred from the files during training. optimizer : tf.keras.optimizer.Optimizer Optimizer for the neural network training. Since for this training, it is impossible to guess the number of @@ -622,33 +648,33 @@ def train_from_presimulation( If ``False``, the optimizer and its states are not stored after training has finished. Otherwise, the optimizer will be stored as ``self.optimizer`` and re-used in further training runs. custom_loader : callable, optional, default: self._default_loader - Must take a string file_path as an input and output a collection (dictionary or list) of simulation_dict objects. - A simulation_dict has the keys - - ``prior_non_batchable_context``, - - ``prior_batchable_context``, - - ``prior_draws``, - - ``sim_non_batchable_context``, - - ``sim_batchable_context``, - - ``sim_data``. - ``prior_draws`` and ``sim_data`` must have actual data as values, the rest are optional. + Must take a string file_path as an input and output a collection (dictionary or list) of + simulation_dict objects. A simulation_dict has the keys ``prior_non_batchable_context``, + ``prior_batchable_context``, ``prior_draws``, ``sim_non_batchable_context``, ``sim_batchable_context``, and + ``sim_data``. + Here, ``prior_draws`` and ``sim_data`` must have actual data as values, the rest are optional. early_stopping : bool, optional, default: False Whether to use optional stopping or not during training. Could speed up training. validation_sims : dict, int, or None, optional, default: None - Simulations used as a "validation set". + Simulations used as a validation set. If ``dict``, will assume it's the output of a generative model and try - ``amortizer.compute_loss(configurator(validation_sims))'' + ``amortizer.compute_loss(configurator(validation_sims))`` after each epoch. If ``int``, will assume it's the number of sims to generate from the generative model before starting training. Only considered if a generative model has been provided during initialization. If ``None`` (default), no validation set will be used. use_autograph : bool, optional, default: True - Whether to use autograph for the backprop step. Could lead to enourmous speed-ups but + Whether to use autograph for the backprop step. Could lead to enormous speed-ups but could also be harder to debug. - **kwargs : dict, optional - Optional keyword arguments, which can be: - ``conf_args`` - optional keyword arguments passed to the configurator - ``net_args`` - optional keyword arguments passed to the amortizer + + Other Parameters + ---------------- + + conf_args : + optional keyword arguments passed to the configurator + net_args : + optional keyword arguments passed to the amortizer Returns ------- @@ -694,7 +720,8 @@ def train_from_presimulation( file_path = os.path.join(presimulation_path, current_filename) epoch_data = custom_loader(file_path) - # For each epoch, the number of iterations is inferred from the presimulated dictionary or list used for that epoch + # For each epoch, the number of iterations is inferred from the presimulated dictionary or + # list used for that epoch if isinstance(epoch_data, dict): index_list = list(epoch_data.keys()) elif isinstance(epoch_data, list): @@ -789,29 +816,33 @@ def train_experience_replay( Whether to use optional stopping or not during training. Could speed up training. Only works if ``validation_sims is not None``, i.e., validation data has been provided. use_autograph : bool, optional, default: True - Whether to use autograph for the backprop step. Could lead to enourmous speed-ups but + Whether to use autograph for the backprop step. Could lead to enormous speed-ups but could also be harder to debug. validation_sims : dict or None, optional, default: None Simulations used as a "validation set". If ``dict``, will assume it's the output of a generative model and try - ``amortizer.compute_loss(configurator(validation_sims))'' + ``amortizer.compute_loss(configurator(validation_sims))`` after each epoch. If ``int``, will assume it's the number of sims to generate from the generative model before starting training. Only considered if a generative model has been provided during initialization. If ``None`` (default), no validation set will be used. - **kwargs : dict, optional, default: {} - Optional keyword arguments, which can be: - ``model_args`` - optional kwargs passed to the generative model - ``val_model_args`` - optional kwargs passed to the generative model - for generating validation data. Only useful if - ``type(validation_sims) is int``. - ``conf_args`` - optional kwargs passed to the configurator - before each backprop (update) step. - ``val_conf_args`` - optional kwargs passed to the configurator - then configuring the validation data. - ``net_args`` - optional kwargs passed to the amortizer - ``early_stopping_args`` - optional kwargs passed to the ``EarlyStopper`` + + Other Parameters + ---------------- + model_args : + optional kwargs passed to the generative model + val_model_args : + optional kwargs passed to the generative model for generating validation data. Only useful if + ``type(validation_sims) is int``. + conf_args : + optional kwargs passed to the configurator before each backprop (update) step. + val_conf_args : + optional kwargs passed to the configurator then configuring the validation data. + net_args : + optional kwargs passed to the amortizer + early_stopping_args: + optional kwargs passed to the `EarlyStopper` Returns ------- @@ -900,9 +931,10 @@ def train_rounds( are simulated from the generative model and added to the data sets simulated in previous round. Then, the networks are trained for ``epochs`` on the augmented set of data sets. - Important: Training time will increase from round to round, since the number of simulations - increases correspondingly. The final round will then train the networks on ``rounds * sim_per_round`` - data sets, so make sure this number does not eat up all available memory. + .. note:: + Training time will increase from round to round, since the number of simulations + increases correspondingly. The final round will then train the networks on ``rounds * sim_per_round`` + data sets, so make sure this number does not eat up all available memory. Parameters ---------- @@ -914,7 +946,7 @@ def train_rounds( Number of epochs (and number of times a checkpoint is stored, inner loop) within a round. batch_size : int Number of simulations to use at each backpropagation step - save_checkpoint : bool, optional, (default - True) + save_checkpoint : bool, optional, default: True A flag to decide whether to save checkpoints after each epoch, if a checkpoint_path provided during initialization, otherwise ignored. optimizer : tf.keras.optimizer.Optimizer or None @@ -930,29 +962,34 @@ def train_rounds( Only works if ``validation_sims is not None``, i.e., validation data has been provided. Will be performed within rounds, not between rounds! use_autograph : bool, optional, default: True - Whether to use autograph for the backprop step. Could lead to enourmous speed-ups but + Whether to use autograph for the backprop step. Could lead to enormous speed-ups but could also be harder to debug. validation_sims : dict or None, optional, default: None Simulations used as a "validation set". If ``dict``, will assume it's the output of a generative model and try - ``amortizer.compute_loss(configurator(validation_sims))'' + ``amortizer.compute_loss(configurator(validation_sims))`` after each epoch. If ``int``, will assume it's the number of sims to generate from the generative model before starting training. Only considered if a generative model has been provided during initialization. If ``None`` (default), no validation set will be used. - **kwargs : dict, optional - Optional keyword arguments, which can be: - ``model_args`` - optional kwargs passed to the generative model - ``val_model_args`` - optional kwargs passed to the generative model - for generating validation data. Only useful if - ``type(validation_sims) is int``. - ``conf_args`` - optional kwargs passed to the configurator - before each backprop (update) step. - ``val_conf_args`` - optional kwargs passed to the configurator - then configuring the validation data. - ``net_args`` - optional kwargs passed to the amortizer - ``early_stopping_args`` - optional kwargs passed to the ``EarlyStopper`` + + Other Parameters + ---------------- + + model_args : + optional kwargs passed to the generative model + val_model_args : + optional kwargs passed to the generative model for generating validation data. Only useful if + ``type(validation_sims) is int``. + conf_args : + optional kwargs passed to the configurator before each backprop (update) step. + val_conf_args : + optional kwargs passed to the configurator then configuring the validation data. + net_args : + optional kwargs passed to the amortizer + early_stopping_args : + optional kwargs passed to the `EarlyStopper` Returns ------- @@ -1013,30 +1050,30 @@ def train_rounds( def mmd_hypothesis_test( self, observed_data, reference_data=None, num_reference_simulations=1000, num_null_samples=100, bootstrap=False ): - """ + """Performs a sampling-based hypothesis test for detecting Out-Of-Simulation (model misspecification). Parameters ---------- - observed_data: np.ndarray - Observed data, shape (num_observed, ...) - reference_data: np.ndarray - Reference data representing samples from the "well-specified model", shape (num_reference, ...) - num_reference_simulations: int, default: 1000 - Number of reference simulations (M) simulated from the trainer's generative model - if no `reference_data` are provided. - num_null_samples: int, default: 100 - Number of draws from the MMD sampling distribution under the null hypothesis "the trainer's generative - model is well-specified" - bootstrap: bool, default: False - If true, the reference data (see above) are bootstrapped for each sample from the MMD sampling distribution. - If false, a new data set is simulated for computing each draw from the MMD sampling distribution. + observed_data : np.ndarray + Observed data, shape (num_observed, ...) + reference_data : np.ndarray + Reference data representing samples from the well-specified model, shape (num_reference, ...) + num_reference_simulations : int, default: 1000 + Number of reference simulations (M) simulated from the trainer's generative model + if no `reference_data` are provided. + num_null_samples : int, default: 100 + Number of draws from the MMD sampling distribution under the null hypothesis "the trainer's generative + model is well-specified" + bootstrap : bool, default: False + If true, the reference data (see above) are bootstrapped for each sample from the MMD sampling distribution. + If false, a new data set is simulated for computing each draw from the MMD sampling distribution. Returns ------- - mmd_null_samples: np.ndarray - samples from the H0 sampling distribution ("well-specified model") - mmd_observed: float - summary MMD estimate for the observed data sets + mmd_null_samples : np.ndarray + samples from the H0 sampling distribution ("well-specified model") + mmd_observed : float + summary MMD estimate for the observed data sets """ if reference_data is None: @@ -1087,12 +1124,12 @@ def _config_validation(self, validation_sims, **kwargs): logger.info(f"Generated {validation_sims} simulations for validation.") return vals else: - logger.warn( + logger.warning( "Validation simulations can only be generated if the Trainer is initialized " + "with a generative model." ) return None - logger.warn('Type of argument "validation_sims" not understood. No validation simulations were created.') + logger.warning('Type of argument "validation_sims" not understood. No validation simulations were created.') def _config_early_stopping(self, early_stopping, validation_sims, **kwargs): """Helper method to configure early stopping or warn user for.""" @@ -1102,7 +1139,7 @@ def _config_early_stopping(self, early_stopping, validation_sims, **kwargs): early_stopper = EarlyStopper(**kwargs.pop("early_stopping_args", {})) else: logger = logging.getLogger() - logger.warn("No early stopping will be used, since validation_sims were not provided.") + logger.warning("No early stopping will be used, since validation_sims were not provided.") early_stopper = None return early_stopper return None @@ -1160,7 +1197,6 @@ def _train_step(self, batch_size, update_step, input_dict=None, **kwargs): Parameters ---------- - batch_size : int Number of simulations to perform at each backprop step update_step : callable @@ -1168,11 +1204,16 @@ def _train_step(self, batch_size, update_step, input_dict=None, **kwargs): ``update_step(input_dict, amortizer, optimizer, **kwargs)`` input_dict : dict The optional pre-configured forward dict from a generative model, simulated, if None - **kwargs : dict (default - {}) - Optional keyword arguments, which can be: - ``model_args`` - optional keyword arguments passed to the generative model - ``conf_args`` - optional keyword arguments passed to the configurator - ``net_args`` - optional keyword arguments passed to the amortizer + + Other Parameters + ---------------- + + model_args : + optional keyword arguments passed to the generative model + conf_args : + optional keyword arguments passed to the configurator + net_args : + optional keyword arguments passed to the amortizer """ diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index d0c3cbf10..000000000 --- a/docs/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = source -BUILDDIR = build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/source/bayesflow.benchmarks.rst b/docs/source/bayesflow.benchmarks.rst deleted file mode 100644 index 47912d3c8..000000000 --- a/docs/source/bayesflow.benchmarks.rst +++ /dev/null @@ -1,93 +0,0 @@ -bayesflow.benchmarks package -============================ - -Submodules ----------- - -bayesflow.benchmarks.bernoulli\_glm module ------------------------------------------- - -.. automodule:: bayesflow.benchmarks.bernoulli_glm - :members: - :undoc-members: - :show-inheritance: - -bayesflow.benchmarks.bernoulli\_glm\_raw module ------------------------------------------------ - -.. automodule:: bayesflow.benchmarks.bernoulli_glm_raw - :members: - :undoc-members: - :show-inheritance: - -bayesflow.benchmarks.gaussian\_linear module --------------------------------------------- - -.. automodule:: bayesflow.benchmarks.gaussian_linear - :members: - :undoc-members: - :show-inheritance: - -bayesflow.benchmarks.gaussian\_linear\_uniform module ------------------------------------------------------ - -.. automodule:: bayesflow.benchmarks.gaussian_linear_uniform - :members: - :undoc-members: - :show-inheritance: - -bayesflow.benchmarks.gaussian\_mixture module ---------------------------------------------- - -.. automodule:: bayesflow.benchmarks.gaussian_mixture - :members: - :undoc-members: - :show-inheritance: - -bayesflow.benchmarks.lotka\_volterra module -------------------------------------------- - -.. automodule:: bayesflow.benchmarks.lotka_volterra - :members: - :undoc-members: - :show-inheritance: - -bayesflow.benchmarks.sir module -------------------------------- - -.. automodule:: bayesflow.benchmarks.sir - :members: - :undoc-members: - :show-inheritance: - -bayesflow.benchmarks.slcp module --------------------------------- - -.. automodule:: bayesflow.benchmarks.slcp - :members: - :undoc-members: - :show-inheritance: - -bayesflow.benchmarks.slcp\_distractors module ---------------------------------------------- - -.. automodule:: bayesflow.benchmarks.slcp_distractors - :members: - :undoc-members: - :show-inheritance: - -bayesflow.benchmarks.two\_moons module --------------------------------------- - -.. automodule:: bayesflow.benchmarks.two_moons - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: bayesflow.benchmarks - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/bayesflow.rst b/docs/source/bayesflow.rst deleted file mode 100644 index 869fe043c..000000000 --- a/docs/source/bayesflow.rst +++ /dev/null @@ -1,173 +0,0 @@ -bayesflow package -================= - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - bayesflow.benchmarks - -Submodules ----------- - -bayesflow.amortizers module ---------------------------- - -.. automodule:: bayesflow.amortizers - :members: - :undoc-members: - :show-inheritance: - -bayesflow.computational\_utilities module ------------------------------------------ - -.. automodule:: bayesflow.computational_utilities - :members: - :undoc-members: - :show-inheritance: - -bayesflow.configuration module ------------------------------- - -.. automodule:: bayesflow.configuration - :members: - :undoc-members: - :show-inheritance: - -bayesflow.coupling\_networks module ------------------------------------ - -.. automodule:: bayesflow.coupling_networks - :members: - :undoc-members: - :show-inheritance: - -bayesflow.default\_settings module ----------------------------------- - -.. automodule:: bayesflow.default_settings - :members: - :undoc-members: - :show-inheritance: - -bayesflow.diagnostics module ----------------------------- - -.. automodule:: bayesflow.diagnostics - :members: - :undoc-members: - :show-inheritance: - -bayesflow.exceptions module ---------------------------- - -.. automodule:: bayesflow.exceptions - :members: - :undoc-members: - :show-inheritance: - -bayesflow.helper\_classes module --------------------------------- - -.. automodule:: bayesflow.helper_classes - :members: - :undoc-members: - :show-inheritance: - -bayesflow.helper\_functions module ----------------------------------- - -.. automodule:: bayesflow.helper_functions - :members: - :undoc-members: - :show-inheritance: - -bayesflow.helper\_networks module ---------------------------------- - -.. automodule:: bayesflow.helper_networks - :members: - :undoc-members: - :show-inheritance: - -bayesflow.inference\_networks module ------------------------------------- - -.. automodule:: bayesflow.inference_networks - :members: - :undoc-members: - :show-inheritance: - -bayesflow.losses module ------------------------ - -.. automodule:: bayesflow.losses - :members: - :undoc-members: - :show-inheritance: - -bayesflow.mcmc module ---------------------- - -.. automodule:: bayesflow.mcmc - :members: - :undoc-members: - :show-inheritance: - -bayesflow.networks module -------------------------- - -.. automodule:: bayesflow.networks - :members: - :undoc-members: - :show-inheritance: - -bayesflow.simulation module ---------------------------- - -.. automodule:: bayesflow.simulation - :members: - :undoc-members: - :show-inheritance: - -bayesflow.summary\_networks module ----------------------------------- - -.. automodule:: bayesflow.summary_networks - :members: - :undoc-members: - :show-inheritance: - -bayesflow.trainers module -------------------------- - -.. automodule:: bayesflow.trainers - :members: - :undoc-members: - :show-inheritance: - -bayesflow.version module ------------------------- - -.. automodule:: bayesflow.version - :members: - :undoc-members: - :show-inheritance: - -bayesflow.wrappers module -------------------------- - -.. automodule:: bayesflow.wrappers - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: bayesflow - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/conf.py b/docs/source/conf.py deleted file mode 100644 index a599850fa..000000000 --- a/docs/source/conf.py +++ /dev/null @@ -1,62 +0,0 @@ -# Configuration file for the Sphinx documentation builder. -# -# This file only contains a selection of the most common options. For a full -# list see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -import os -import sys - -sys.path.insert(0, os.path.abspath("../..")) - - -# -- Project information ----------------------------------------------------- - -project = "BayesFlow" -copyright = "2022, Stefan T. Radev" -author = "Stefan T. Radev" - -# The full version, including alpha/beta/rc tags -release = "beta" - - -# -- General configuration --------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.viewcode", - "nbsphinx", -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = [] - -source_suffix = [".rst", ".md"] - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = "sphinx_rtd_theme" - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] diff --git a/docs/source/index.rst b/docs/source/index.rst deleted file mode 100644 index 11cabf814..000000000 --- a/docs/source/index.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. BayesFlow documentation master file, created by - sphinx-quickstart on Wed Nov 23 10:54:07 2022. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to BayesFlow's documentation! -===================================== - -.. toctree:: - :maxdepth: 2 - :caption: Contents: - - modules - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/docs/source/modules.rst b/docs/source/modules.rst deleted file mode 100644 index 3616a09fe..000000000 --- a/docs/source/modules.rst +++ /dev/null @@ -1,7 +0,0 @@ -bayesflow -========= - -.. toctree:: - :maxdepth: 4 - - bayesflow diff --git a/docs/source/tutorial_notebooks/Diffusion_Model_Posterior_Estimation.ipynb b/docs/source/tutorial_notebooks/Diffusion_Model_Posterior_Estimation.ipynb deleted file mode 100644 index d5a7696e2..000000000 --- a/docs/source/tutorial_notebooks/Diffusion_Model_Posterior_Estimation.ipynb +++ /dev/null @@ -1,46 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "spatial-frank", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.1" - }, - "toc": { - "base_numbering": 1, - "nav_menu": {}, - "number_sections": true, - "sideBar": true, - "skip_h1_title": false, - "title_cell": "Table of Contents", - "title_sidebar": "Contents", - "toc_cell": false, - "toc_position": {}, - "toc_section_display": true, - "toc_window_display": false - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/docsrc/.nojekyll b/docsrc/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/docsrc/Makefile b/docsrc/Makefile new file mode 100644 index 000000000..c8c463d50 --- /dev/null +++ b/docsrc/Makefile @@ -0,0 +1,42 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +# from https://github.com/acerbilab/pyvbmc/blob/main/docsrc/Makefile +# copy example notebooks to source folder +# render docs +# copy rendered docs and .nojekyll (for github.io) to docs folder +# remove example notebooks from source folder +github: + @cp ../INSTALL.rst source/installation.rst + @cp ../CONTRIBUTING source/contributing.md + @cp -a ../examples/. source/_examples + @rm -r source/_examples/in_progress + @make html + @cp -a _build/html/. ../docs + @cp .nojekyll ../docs/.nojekyll + @rm -r _build/html/ + @rm -r source/_examples + @rm source/installation.rst + @rm source/contributing.md + +clean: + @rm -rf ../docs/* + @rm -rf _build + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file diff --git a/docs/make.bat b/docsrc/make.bat similarity index 52% rename from docs/make.bat rename to docsrc/make.bat index dc1312ab0..4537aaa15 100644 --- a/docs/make.bat +++ b/docsrc/make.bat @@ -8,7 +8,10 @@ if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=source -set BUILDDIR=build +set BUILDDIR=_build + +if "%1" == "" goto help +if "%1" == "github" goto github %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( @@ -19,17 +22,35 @@ if errorlevel 9009 ( echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from - echo.https://www.sphinx-doc.org/ + echo.http://sphinx-doc.org/ exit /b 1 ) -if "%1" == "" goto help - %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:github +mkdir source\_examples +xcopy /y /s ..\INSTALL.rst source\installation.rst +xcopy /y /s ..\CONTRIBUTING.md source\contributing.md +xcopy /y /s ..\examples source\_examples +rmdir /q /s source\_examples\in_progress +%SPHINXBUILD% -M html %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +xcopy /y /s "%BUILDDIR%\html" ..\docs +xcopy /y .nojekyll ..\docs\.nojekyll +rmdir /q /s source\_examples +rmdir /q /s _build\html\ +del /q /s source\installation.rst +del /q /s source\contributing.md +goto end + +:clean +del /q /s ..\docs\* +rmdir /q /s %BUILDDIR% :end -popd +popd \ No newline at end of file diff --git a/docsrc/source/_static/bayesflow_hex.ico b/docsrc/source/_static/bayesflow_hex.ico new file mode 100644 index 000000000..5b2b73c73 Binary files /dev/null and b/docsrc/source/_static/bayesflow_hex.ico differ diff --git a/docsrc/source/_static/bayesflow_hex.png b/docsrc/source/_static/bayesflow_hex.png new file mode 100644 index 000000000..279467755 Binary files /dev/null and b/docsrc/source/_static/bayesflow_hex.png differ diff --git a/docs/source/images/bayesflow_overview.png b/docsrc/source/_static/bayesflow_overview.png similarity index 100% rename from docs/source/images/bayesflow_overview.png rename to docsrc/source/_static/bayesflow_overview.png diff --git a/docsrc/source/_static/bayesflow_software_overview.png b/docsrc/source/_static/bayesflow_software_overview.png new file mode 100644 index 000000000..8a8e2035b Binary files /dev/null and b/docsrc/source/_static/bayesflow_software_overview.png differ diff --git a/docs/source/images/generative_model.png b/docsrc/source/_static/generative_model.png similarity index 100% rename from docs/source/images/generative_model.png rename to docsrc/source/_static/generative_model.png diff --git a/docsrc/source/about.rst b/docsrc/source/about.rst new file mode 100644 index 000000000..de57a0d2d --- /dev/null +++ b/docsrc/source/about.rst @@ -0,0 +1,22 @@ +About us +======== + +Core developers +--------------- + +Active developers, in order of joining: + +* `Stefan Radev `__, Heidelberg University +* `Ullrich Köthe `__, Heidelberg University +* `Marvin Schmitt `__, University of Stuttgart +* `Paul Bürkner `__, TU Dortmund University +* `Lukas Schumacher `__, Heidelberg University +* `Lasse Elsemüller `__, Heidelberg University +* `Valentin Pratz `__, Heidelberg University +* `Yannik Schälte `__, University of Bonn + +Join the team +------------- + +If you are interested in contributing to BayesFlow, feel free to participate via GitHub. +We welcome pull requests, bug reports, and feature requests. For other opportunities to work with us, contact `Stefan Radev `__. \ No newline at end of file diff --git a/docsrc/source/api/bayesflow.amortizers.rst b/docsrc/source/api/bayesflow.amortizers.rst new file mode 100644 index 000000000..6b466b5a2 --- /dev/null +++ b/docsrc/source/api/bayesflow.amortizers.rst @@ -0,0 +1,7 @@ +bayesflow.amortizers module +=========================== + +.. automodule:: bayesflow.amortizers + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.attention.rst b/docsrc/source/api/bayesflow.attention.rst new file mode 100644 index 000000000..1685b5eaa --- /dev/null +++ b/docsrc/source/api/bayesflow.attention.rst @@ -0,0 +1,7 @@ +bayesflow.attention module +========================== + +.. automodule:: bayesflow.attention + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.benchmarks.bernoulli_glm.rst b/docsrc/source/api/bayesflow.benchmarks.bernoulli_glm.rst new file mode 100644 index 000000000..e61e3b9ba --- /dev/null +++ b/docsrc/source/api/bayesflow.benchmarks.bernoulli_glm.rst @@ -0,0 +1,7 @@ +bayesflow.benchmarks.bernoulli\_glm module +========================================== + +.. automodule:: bayesflow.benchmarks.bernoulli_glm + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.benchmarks.bernoulli_glm_raw.rst b/docsrc/source/api/bayesflow.benchmarks.bernoulli_glm_raw.rst new file mode 100644 index 000000000..306694a0b --- /dev/null +++ b/docsrc/source/api/bayesflow.benchmarks.bernoulli_glm_raw.rst @@ -0,0 +1,7 @@ +bayesflow.benchmarks.bernoulli\_glm\_raw module +=============================================== + +.. automodule:: bayesflow.benchmarks.bernoulli_glm_raw + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.benchmarks.gaussian_linear.rst b/docsrc/source/api/bayesflow.benchmarks.gaussian_linear.rst new file mode 100644 index 000000000..bbf4d4c07 --- /dev/null +++ b/docsrc/source/api/bayesflow.benchmarks.gaussian_linear.rst @@ -0,0 +1,7 @@ +bayesflow.benchmarks.gaussian\_linear module +============================================ + +.. automodule:: bayesflow.benchmarks.gaussian_linear + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.benchmarks.gaussian_linear_uniform.rst b/docsrc/source/api/bayesflow.benchmarks.gaussian_linear_uniform.rst new file mode 100644 index 000000000..faf0a6d47 --- /dev/null +++ b/docsrc/source/api/bayesflow.benchmarks.gaussian_linear_uniform.rst @@ -0,0 +1,7 @@ +bayesflow.benchmarks.gaussian\_linear\_uniform module +===================================================== + +.. automodule:: bayesflow.benchmarks.gaussian_linear_uniform + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.benchmarks.gaussian_mixture.rst b/docsrc/source/api/bayesflow.benchmarks.gaussian_mixture.rst new file mode 100644 index 000000000..79ca5e7f7 --- /dev/null +++ b/docsrc/source/api/bayesflow.benchmarks.gaussian_mixture.rst @@ -0,0 +1,7 @@ +bayesflow.benchmarks.gaussian\_mixture module +============================================= + +.. automodule:: bayesflow.benchmarks.gaussian_mixture + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.benchmarks.inverse_kinematics.rst b/docsrc/source/api/bayesflow.benchmarks.inverse_kinematics.rst new file mode 100644 index 000000000..2fc61fa0e --- /dev/null +++ b/docsrc/source/api/bayesflow.benchmarks.inverse_kinematics.rst @@ -0,0 +1,7 @@ +bayesflow.benchmarks.inverse\_kinematics module +=============================================== + +.. automodule:: bayesflow.benchmarks.inverse_kinematics + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.benchmarks.lotka_volterra.rst b/docsrc/source/api/bayesflow.benchmarks.lotka_volterra.rst new file mode 100644 index 000000000..124e94bb6 --- /dev/null +++ b/docsrc/source/api/bayesflow.benchmarks.lotka_volterra.rst @@ -0,0 +1,7 @@ +bayesflow.benchmarks.lotka\_volterra module +=========================================== + +.. automodule:: bayesflow.benchmarks.lotka_volterra + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.benchmarks.rst b/docsrc/source/api/bayesflow.benchmarks.rst new file mode 100644 index 000000000..b7096f91e --- /dev/null +++ b/docsrc/source/api/bayesflow.benchmarks.rst @@ -0,0 +1,28 @@ +bayesflow.benchmarks package +============================ + +Submodules +---------- + +.. toctree:: + :maxdepth: 4 + + bayesflow.benchmarks.bernoulli_glm + bayesflow.benchmarks.bernoulli_glm_raw + bayesflow.benchmarks.gaussian_linear + bayesflow.benchmarks.gaussian_linear_uniform + bayesflow.benchmarks.gaussian_mixture + bayesflow.benchmarks.inverse_kinematics + bayesflow.benchmarks.lotka_volterra + bayesflow.benchmarks.sir + bayesflow.benchmarks.slcp + bayesflow.benchmarks.slcp_distractors + bayesflow.benchmarks.two_moons + +Module contents +--------------- + +.. automodule:: bayesflow.benchmarks + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.benchmarks.sir.rst b/docsrc/source/api/bayesflow.benchmarks.sir.rst new file mode 100644 index 000000000..d0fa22079 --- /dev/null +++ b/docsrc/source/api/bayesflow.benchmarks.sir.rst @@ -0,0 +1,7 @@ +bayesflow.benchmarks.sir module +=============================== + +.. automodule:: bayesflow.benchmarks.sir + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.benchmarks.slcp.rst b/docsrc/source/api/bayesflow.benchmarks.slcp.rst new file mode 100644 index 000000000..cbb1778b2 --- /dev/null +++ b/docsrc/source/api/bayesflow.benchmarks.slcp.rst @@ -0,0 +1,7 @@ +bayesflow.benchmarks.slcp module +================================ + +.. automodule:: bayesflow.benchmarks.slcp + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.benchmarks.slcp_distractors.rst b/docsrc/source/api/bayesflow.benchmarks.slcp_distractors.rst new file mode 100644 index 000000000..627e6e4cf --- /dev/null +++ b/docsrc/source/api/bayesflow.benchmarks.slcp_distractors.rst @@ -0,0 +1,7 @@ +bayesflow.benchmarks.slcp\_distractors module +============================================= + +.. automodule:: bayesflow.benchmarks.slcp_distractors + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.benchmarks.two_moons.rst b/docsrc/source/api/bayesflow.benchmarks.two_moons.rst new file mode 100644 index 000000000..df74237f3 --- /dev/null +++ b/docsrc/source/api/bayesflow.benchmarks.two_moons.rst @@ -0,0 +1,7 @@ +bayesflow.benchmarks.two\_moons module +====================================== + +.. automodule:: bayesflow.benchmarks.two_moons + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.computational_utilities.rst b/docsrc/source/api/bayesflow.computational_utilities.rst new file mode 100644 index 000000000..4e139420c --- /dev/null +++ b/docsrc/source/api/bayesflow.computational_utilities.rst @@ -0,0 +1,7 @@ +bayesflow.computational\_utilities module +========================================= + +.. automodule:: bayesflow.computational_utilities + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.configuration.rst b/docsrc/source/api/bayesflow.configuration.rst new file mode 100644 index 000000000..e5137b51c --- /dev/null +++ b/docsrc/source/api/bayesflow.configuration.rst @@ -0,0 +1,7 @@ +bayesflow.configuration module +============================== + +.. automodule:: bayesflow.configuration + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.coupling_networks.rst b/docsrc/source/api/bayesflow.coupling_networks.rst new file mode 100644 index 000000000..71bea83dc --- /dev/null +++ b/docsrc/source/api/bayesflow.coupling_networks.rst @@ -0,0 +1,7 @@ +bayesflow.coupling\_networks module +=================================== + +.. automodule:: bayesflow.coupling_networks + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.default_settings.rst b/docsrc/source/api/bayesflow.default_settings.rst new file mode 100644 index 000000000..d0213d190 --- /dev/null +++ b/docsrc/source/api/bayesflow.default_settings.rst @@ -0,0 +1,7 @@ +bayesflow.default\_settings module +================================== + +.. automodule:: bayesflow.default_settings + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.diagnostics.rst b/docsrc/source/api/bayesflow.diagnostics.rst new file mode 100644 index 000000000..2a4e379e5 --- /dev/null +++ b/docsrc/source/api/bayesflow.diagnostics.rst @@ -0,0 +1,7 @@ +bayesflow.diagnostics module +============================ + +.. automodule:: bayesflow.diagnostics + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.exceptions.rst b/docsrc/source/api/bayesflow.exceptions.rst new file mode 100644 index 000000000..da5a1c6b9 --- /dev/null +++ b/docsrc/source/api/bayesflow.exceptions.rst @@ -0,0 +1,7 @@ +bayesflow.exceptions module +=========================== + +.. automodule:: bayesflow.exceptions + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.experimental.rectifiers.rst b/docsrc/source/api/bayesflow.experimental.rectifiers.rst new file mode 100644 index 000000000..9301ff513 --- /dev/null +++ b/docsrc/source/api/bayesflow.experimental.rectifiers.rst @@ -0,0 +1,7 @@ +bayesflow.experimental.rectifiers module +======================================== + +.. automodule:: bayesflow.experimental.rectifiers + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.experimental.rst b/docsrc/source/api/bayesflow.experimental.rst new file mode 100644 index 000000000..8582f0d1f --- /dev/null +++ b/docsrc/source/api/bayesflow.experimental.rst @@ -0,0 +1,18 @@ +bayesflow.experimental package +============================== + +Submodules +---------- + +.. toctree:: + :maxdepth: 4 + + bayesflow.experimental.rectifiers + +Module contents +--------------- + +.. automodule:: bayesflow.experimental + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.helper_classes.rst b/docsrc/source/api/bayesflow.helper_classes.rst new file mode 100644 index 000000000..ae1ccbf80 --- /dev/null +++ b/docsrc/source/api/bayesflow.helper_classes.rst @@ -0,0 +1,7 @@ +bayesflow.helper\_classes module +================================ + +.. automodule:: bayesflow.helper_classes + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.helper_functions.rst b/docsrc/source/api/bayesflow.helper_functions.rst new file mode 100644 index 000000000..796f5e3b3 --- /dev/null +++ b/docsrc/source/api/bayesflow.helper_functions.rst @@ -0,0 +1,7 @@ +bayesflow.helper\_functions module +================================== + +.. automodule:: bayesflow.helper_functions + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.helper_networks.rst b/docsrc/source/api/bayesflow.helper_networks.rst new file mode 100644 index 000000000..47a749c4c --- /dev/null +++ b/docsrc/source/api/bayesflow.helper_networks.rst @@ -0,0 +1,7 @@ +bayesflow.helper\_networks module +================================= + +.. automodule:: bayesflow.helper_networks + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.inference_networks.rst b/docsrc/source/api/bayesflow.inference_networks.rst new file mode 100644 index 000000000..eb7323b67 --- /dev/null +++ b/docsrc/source/api/bayesflow.inference_networks.rst @@ -0,0 +1,7 @@ +bayesflow.inference\_networks module +==================================== + +.. automodule:: bayesflow.inference_networks + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.losses.rst b/docsrc/source/api/bayesflow.losses.rst new file mode 100644 index 000000000..5af3bb9b5 --- /dev/null +++ b/docsrc/source/api/bayesflow.losses.rst @@ -0,0 +1,7 @@ +bayesflow.losses module +======================= + +.. automodule:: bayesflow.losses + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.mcmc.rst b/docsrc/source/api/bayesflow.mcmc.rst new file mode 100644 index 000000000..c8c8959c7 --- /dev/null +++ b/docsrc/source/api/bayesflow.mcmc.rst @@ -0,0 +1,7 @@ +bayesflow.mcmc module +===================== + +.. automodule:: bayesflow.mcmc + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.networks.rst b/docsrc/source/api/bayesflow.networks.rst new file mode 100644 index 000000000..28b8e3ee0 --- /dev/null +++ b/docsrc/source/api/bayesflow.networks.rst @@ -0,0 +1,7 @@ +bayesflow.networks module +========================= + +.. automodule:: bayesflow.networks + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.rst b/docsrc/source/api/bayesflow.rst new file mode 100644 index 000000000..917563169 --- /dev/null +++ b/docsrc/source/api/bayesflow.rst @@ -0,0 +1,58 @@ +.. currentmodule:: bayesflow + +Public API: bayesflow package +============================= + +Submodules +---------- + +.. toctree:: + :maxdepth: 1 + + bayesflow.benchmarks + bayesflow.amortizers + bayesflow.attention + bayesflow.coupling_networks + bayesflow.diagnostics + bayesflow.inference_networks + bayesflow.losses + bayesflow.networks + bayesflow.sensitivity + bayesflow.simulation + bayesflow.summary_networks + bayesflow.trainers + + +Configuration +------------- +.. toctree:: + :maxdepth: 1 + + bayesflow.configuration + bayesflow.default_settings + + +Helpers +------- +.. toctree:: + :maxdepth: 1 + + bayesflow.computational_utilities + bayesflow.helper_classes + bayesflow.helper_functions + bayesflow.helper_networks + +Miscellaneous +------------- +.. toctree:: + :maxdepth: 1 + + bayesflow.exceptions + bayesflow.mcmc + bayesflow.version + bayesflow.wrappers + +.. toctree:: + :hidden: + + bayesflow.experimental \ No newline at end of file diff --git a/docsrc/source/api/bayesflow.sensitivity.rst b/docsrc/source/api/bayesflow.sensitivity.rst new file mode 100644 index 000000000..3c40a7bda --- /dev/null +++ b/docsrc/source/api/bayesflow.sensitivity.rst @@ -0,0 +1,7 @@ +bayesflow.sensitivity module +============================ + +.. automodule:: bayesflow.sensitivity + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.simulation.rst b/docsrc/source/api/bayesflow.simulation.rst new file mode 100644 index 000000000..9a6ab6719 --- /dev/null +++ b/docsrc/source/api/bayesflow.simulation.rst @@ -0,0 +1,7 @@ +bayesflow.simulation module +=========================== + +.. automodule:: bayesflow.simulation + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.summary_networks.rst b/docsrc/source/api/bayesflow.summary_networks.rst new file mode 100644 index 000000000..f18a3d52e --- /dev/null +++ b/docsrc/source/api/bayesflow.summary_networks.rst @@ -0,0 +1,7 @@ +bayesflow.summary\_networks module +================================== + +.. automodule:: bayesflow.summary_networks + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.trainers.rst b/docsrc/source/api/bayesflow.trainers.rst new file mode 100644 index 000000000..d19768e38 --- /dev/null +++ b/docsrc/source/api/bayesflow.trainers.rst @@ -0,0 +1,7 @@ +bayesflow.trainers module +========================= + +.. automodule:: bayesflow.trainers + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.version.rst b/docsrc/source/api/bayesflow.version.rst new file mode 100644 index 000000000..c9ad2bd41 --- /dev/null +++ b/docsrc/source/api/bayesflow.version.rst @@ -0,0 +1,7 @@ +bayesflow.version module +======================== + +.. automodule:: bayesflow.version + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/api/bayesflow.wrappers.rst b/docsrc/source/api/bayesflow.wrappers.rst new file mode 100644 index 000000000..853cb8e4a --- /dev/null +++ b/docsrc/source/api/bayesflow.wrappers.rst @@ -0,0 +1,7 @@ +bayesflow.wrappers module +========================= + +.. automodule:: bayesflow.wrappers + :members: + :undoc-members: + :show-inheritance: diff --git a/docsrc/source/conf.py b/docsrc/source/conf.py new file mode 100644 index 000000000..8d3b2a431 --- /dev/null +++ b/docsrc/source/conf.py @@ -0,0 +1,116 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +import os +import sys + +sys.path.insert(0, os.path.abspath("../..")) + +# -- Project information ----------------------------------------------------- + +project = "BayesFlow" +copyright = "2023, BayesFlow authors (lead maintainer: Stefan T. Radev)" + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "numpydoc", + "sphinx.ext.autosummary", + "sphinx.ext.todo", + "sphinx.ext.coverage", + "sphinx.ext.viewcode", + "myst_nb", + "sphinx.ext.extlinks", + "sphinx.ext.intersphinx", + "sphinx_design" +] + +numpydoc_show_class_members = False +myst_enable_extensions = [ + "amsmath", + "colon_fence", + "deflist", + "dollarmath", + "html_image", +] +myst_url_schemes = ["http", "https", "mailto"] +autodoc_default_options = { + "members": "var1, var2", + "special-members": "__call__", + "undoc-members": True, + "exclude-members": "__weakref__", +} + +# Define shorthand for external links: +extlinks = { + "mainbranch": ("https://github.com/stefanradev93/BayesFlow/blob/master/%s", None), +} + +coverage_show_missing_items = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = "sphinx_book_theme" +html_title = "BayesFlow: Amortized Bayesian Inference" + +# Add any paths that contain custom _static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin _static files, +# so a file named "default.css" will overwrite the builtin "default.css". +# html_static_path = ["css/custom.css"] +html_show_sourcelink = False +html_theme_options = { + "repository_url": "https://github.com/stefanradev93/BayesFlow", + "repository_branch": "master", + "use_edit_page_button": True, + "use_issues_button": True, + "use_repository_button": True, + "use_download_button": True, + "logo": {"alt-text": "BayesFlow"}, +} +html_logo = "_static/bayesflow_hex.png" +html_favicon = '_static/bayesflow_hex.ico' +html_baseurl = "https://www.bayesflow.org/" +html_js_files = [ + "https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js" +] + +todo_include_todos = True + +# do not execute jupyter notebooks when building docs +nb_execution_mode = "off" + +# download notebooks as .ipynb and not as .ipynb.txt +html_sourcelink_suffix = "" + +suppress_warnings = [ + f"autosectionlabel._examples/{filename.split('.')[0]}" + for filename in os.listdir("../../examples") + if os.path.isfile(os.path.join("../../examples", filename)) +] # Avoid duplicate label warnings for Jupyter notebooks. + +remove_from_toctrees = ["_autosummary/*"] diff --git a/docsrc/source/css/custom.css b/docsrc/source/css/custom.css new file mode 100644 index 000000000..e69de29bb diff --git a/docsrc/source/examples.rst b/docsrc/source/examples.rst new file mode 100644 index 000000000..bf59bffce --- /dev/null +++ b/docsrc/source/examples.rst @@ -0,0 +1,19 @@ +Examples +======== + +This page contains tutorial notebooks for BayesFlow, ranging from toy models to more complex applied modeling scenarios. +The corresponding Jupyter Notebooks are available :mainbranch:`here `. + +.. toctree:: + :maxdepth: 1 + :titlesonly: + :glob: + :numbered: + + _examples/Intro_Amortized_Posterior_Estimation.ipynb + _examples/Model_Misspecification.ipynb + _examples/LCA_Model_Posterior_Estimation.ipynb + _examples/Linear_ODE_system.ipynb + _examples/Covid19_Initial_Posterior_Estimation.ipynb + _examples/Model_Comparison_MPT.ipynb + _examples/Hierarchical_Model_Comparison_MPT.ipynb diff --git a/docsrc/source/index.rst b/docsrc/source/index.rst new file mode 100644 index 000000000..d1fc4d7d3 --- /dev/null +++ b/docsrc/source/index.rst @@ -0,0 +1,114 @@ +BayesFlow +========= + +Welcome to our BayesFlow library for efficient simulation-based Bayesian workflows! +Our library enables users to create specialized neural networks for amortized Bayesian inference, +which repay users with rapid statistical inference after a potentially longer simulation-based training phase. + +.. image:: _static/bayesflow_software_overview.png + :width: 100% + :align: center + :alt: BayesFlow defines a formal workflow for data generation, neural approximation, and model criticism. + +BayesFlow features four key capabilities to enhance Bayesian workflows: + +1. **Amortized posterior estimation:** Train a generative network to efficiently infer full posteriors (i.e., solve the inverse problem) for all existing and future data compatible with a simulation model. +2. **Amortized likelihood estimation:** Train a generative network to efficiently emulate a simulation model (i.e., solve the forward problem) for all possible parameter configurations or interact with external probabilistic programs. +3. **Amortized model comparison:** Train a neural classifier to recognize the "best" model in a set of competing candidates or combine amortized posterior and likelihood estimation to compute Bayesian evidence and out-of-sample predictive performance. +4. **Model misspecification detection:** Ensure that the resulting posteriors are faithful approximations of the otherwise intractable target posterior, even when simulations do not perfectly represent reality. + +Installation +------------ + +.. tab-set:: + + .. tab-item:: Users (stable) + + .. code-block:: bash + + pip install bayesflow + + + .. tab-item:: Developers (nightly) + + .. code-block:: bash + + pip install git+https://github.com/stefanradev93/bayesflow@Development + + +BayesFlow requires Python version 3.9 or later. +The installer should automatically choose the appropriate TensorFlow version depending on your operating system. +However, if the installation fails, Tensorflow and Tensorflow-Probability are likely to be the culprit, +and you might consider starting your bug hunt there. +You can find detailed installation instructions for developers :doc:`here `. + + +Citation +-------- + +You can cite BayesFlow along the lines of: + + - We estimated the approximate posterior distribution with neural posterior estimation (NPE; Papamakarios & Murray, 2016) via the BayesFlow software for amortized Bayesian workflows (Radev et al., 2023b). + - We trained an neural likelihood estimator (NLE; Papamakarios et al., 2019) via the BayesFlow software for amortized Bayesian workflows (Radev et al., 2023b). + - We sampled from the approximate joint distribution :math:`p(x, \theta)` using jointly amortized neural approximation (JANA; Radev et al., 2023a), as implemented in the BayesFlow software for amortized Bayesian workflows (Radev et al., 2023b). + +1. Radev, S. T., Schmitt, M., Schumacher, L., Elsemüller, L., Pratz, V., Schälte, Y., Köthe, U., & Bürkner, P.-C. (2023). BayesFlow: Amortized Bayesian Workflows With Neural Networks. *arXiv:2306.16015*. (`arXiv paper `__) +2. Radev, S. T., Schmitt, M., Pratz, V., Picchini, U., Köthe, U., & Bürkner, P.-C. (2023). JANA: Jointly Amortized Neural Approximation of Complex Bayesian Models. *39th conference on Uncertainty in Artificial Intelligence*. (`UAI Proceedings `__) + + +:: + + @misc{radev2023bayesflow, + title = {BayesFlow: Amortized Bayesian Workflows With Neural Networks}, + author = {Stefan T Radev and Marvin Schmitt and Lukas Schumacher and Lasse Elsem\"{u}ller and Valentin Pratz and Yannik Sch\"{a}lte and Ullrich K\"{o}the and Paul-Christian B\"{u}rkner}, + year = {2023}, + publisher= {arXiv}, + url={https://arxiv.org/abs/2306.16015} + } + + @inproceedings{radev2023jana, + title={{JANA}: Jointly Amortized Neural Approximation of Complex Bayesian Models}, + author={Stefan T. Radev and Marvin Schmitt and Valentin Pratz and Umberto Picchini and Ullrich Koethe and Paul-Christian Buerkner}, + booktitle={The 39th Conference on Uncertainty in Artificial Intelligence}, + year={2023}, + url={https://openreview.net/forum?id=dS3wVICQrU0} + } + +Acknowledgments +--------------- + +We thank the `PyVBMC `__ team for their great open source documentation which heavily inspired our docs. +The BayesFlow development team acknowledges support from: +Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) +under Germany’s Excellence Strategy -– EXC-2181 - 390900948 (the Heidelberg Cluster of Excellence STRUCTURES), +DFG EXC-2075 - 390740016 (the Stuttgart Cluster of Excellence SimTech), +DFG GRK 2277 via the research training group Statistical Modeling in Psychology (SMiP), +the Cyber Valley Research Fund (grant number: CyVy-RF-2021-16), +the Joachim Herz Foundation, +the EMUNE project ("Invertierbare Neuronale Netze für ein verbessertes Verständnis von Infektionskrankheiten", BMBF, 031L0293A-D), +and the Informatics for Life initiative funded by the Klaus Tschira Foundation. + +License and Source Code +----------------------- + +BayesFlow is released under :mainbranch:`MIT License `. +The source code is hosted on the public `GitHub repository `__. + +Indices +------- + +* :ref:`genindex` +* :ref:`modindex` + + +.. toctree:: + :maxdepth: 0 + :titlesonly: + :hidden: + + self + examples + api/bayesflow + installation + contributing + about diff --git a/environment.yaml b/environment.yaml index 3fdeeab30..2fb72a6aa 100644 --- a/environment.yaml +++ b/environment.yaml @@ -1,4 +1,4 @@ -# Read the docs +# Read the docsrc # run: conda env create --file environment.yaml diff --git a/docs/source/tutorial_notebooks/Covid19_Initial_Posterior_Estimation.ipynb b/examples/Covid19_Initial_Posterior_Estimation.ipynb similarity index 99% rename from docs/source/tutorial_notebooks/Covid19_Initial_Posterior_Estimation.ipynb rename to examples/Covid19_Initial_Posterior_Estimation.ipynb index 8376533e8..c0c06d87a 100644 --- a/docs/source/tutorial_notebooks/Covid19_Initial_Posterior_Estimation.ipynb +++ b/examples/Covid19_Initial_Posterior_Estimation.ipynb @@ -1,5 +1,13 @@ { "cells": [ + { + "cell_type": "markdown", + "id": "01fcb38f", + "metadata": {}, + "source": [ + "# Posterior Estimation for SIR-like Models" + ] + }, { "cell_type": "markdown", "id": "thirty-canada", @@ -58,8 +66,8 @@ "id": "polished-warning", "metadata": {}, "source": [ - "

Introduction

\n", - "
\n", + "## Introduction\n", + "\n", "In this tutorial, we will illustrate how to perform posterior inference on simple, stationary SIR-like models (complex models will be tackled in a further notebook). SIR-like models comprise suitable illustrative examples, since they generate time-series and their outputs represent the results of solving a system of ordinary differential equations (ODEs).\n", "\n", "The details for tackling stochastic epidemiological models are described in our corresponding paper, which you can consult for a more formal exposition and a more comprehensive treatment of neural architectures:\n", @@ -1614,13 +1622,13 @@ "source": [ "That's it for this tutorial! You now know how to use the basic building blocks of `BayesFlow` to create amortized neural approximators. :)\n", "\n", - "In the [next tutorial](./PriorSensitivity_Covid19_Initial.ipynb), we will go through a prior sensitivity analysis with `BayesFlow`, which is as easy to perform as it is important for ascertaining the robustness of our inferences." + "" ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -1634,7 +1642,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.13" + "version": "3.10.11" }, "toc": { "base_numbering": 1, @@ -1657,4 +1665,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/docs/source/tutorial_notebooks/Hierarchical_Model_Comparison_MPT.ipynb b/examples/Hierarchical_Model_Comparison_MPT.ipynb similarity index 99% rename from docs/source/tutorial_notebooks/Hierarchical_Model_Comparison_MPT.ipynb rename to examples/Hierarchical_Model_Comparison_MPT.ipynb index 72f1d0ae7..ff3e768ce 100644 --- a/docs/source/tutorial_notebooks/Hierarchical_Model_Comparison_MPT.ipynb +++ b/examples/Hierarchical_Model_Comparison_MPT.ipynb @@ -1,31 +1,24 @@ { "cells": [ { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "toc": true - }, - "source": [ - "

Table of Contents

\n", - "" - ] - }, - { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "# Amortized Hierarchical Model Comparison Workflow for Cognitive Modeling\n", + "# Hierarchical Model Comparison for Cognitive Models\n", + "\n", + "Part 2: Hierarchical Model Comparison\n", + "\n", "by Lasse Elsemüller" ] }, { - "attachments": {}, "cell_type": "markdown", - "metadata": {}, + "metadata": { + "toc": true + }, "source": [ - "# Part 2: Hierarchical Model Comparison" + "

Table of Contents

\n", + "" ] }, { @@ -67,11 +60,10 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "# Introduction \n", + "## Introduction \n", "\n", "This is the second part of the tutorial series covering amortized model comparison with BayesFlow! The general workflow, the scenario and the cognitive models were introduced in [part 1](./Model_Comparison_MPT.ipynb) and are assumed to be known, so here we will focus on the new elements introduced when comparing hierarchical models.\n", "\n", @@ -79,7 +71,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -87,7 +78,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -95,7 +85,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -112,15 +101,13 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "## Hyperpriors and Priors" + "### Hyperpriors and Priors" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -138,7 +125,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -176,7 +162,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -238,15 +223,13 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "## Creating the Simulators" + "### Creating the Simulators" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -382,7 +365,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -427,15 +409,13 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "## Prior Predictive Checks" + "### Prior Predictive Checks" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -514,7 +494,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -573,7 +552,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -581,7 +559,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -589,7 +566,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -628,7 +604,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -764,7 +739,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -772,7 +746,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -780,7 +753,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -818,7 +790,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -852,7 +823,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -903,7 +873,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -913,7 +882,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -927,7 +896,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.10" + "version": "3.10.11" }, "toc": { "base_numbering": "1", diff --git a/docs/source/tutorial_notebooks/Intro_Amortized_Posterior_Estimation.ipynb b/examples/Intro_Amortized_Posterior_Estimation.ipynb similarity index 97% rename from docs/source/tutorial_notebooks/Intro_Amortized_Posterior_Estimation.ipynb rename to examples/Intro_Amortized_Posterior_Estimation.ipynb index 6b4ce848a..1f2a02b43 100644 --- a/docs/source/tutorial_notebooks/Intro_Amortized_Posterior_Estimation.ipynb +++ b/examples/Intro_Amortized_Posterior_Estimation.ipynb @@ -1,5 +1,13 @@ { "cells": [ + { + "cell_type": "markdown", + "id": "edb5a3e7", + "metadata": {}, + "source": [ + "# Quickstart: Amortized Posterior Estimation" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -9,39 +17,23 @@ "source": [ "import os\n", "import sys\n", - "\n", - "sys.path.append(os.path.abspath(os.path.join(\"../../..\")))\n", "import numpy as np\n", "import tensorflow as tf" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "id": "attractive-radar", "metadata": {}, "outputs": [ { - "data": { - "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEABALDBgYFRcVFRgXFRUVFx0VFRUVFSUXGRUdLicxMC0nLSs1PVBCNThLOSstRWFFS1NWW11bMkFxbWRYbFBZXVcBERISGBYYJxoaJVc2LTZXV1dXV1dXV1dXV1dXV1dXV1dXV1dXV1dXV1dXV1dXV1dXV1dXV1dXV1dXV1dXV1dXV//AABEIAWgB4AMBIgACEQEDEQH/xAAbAAEAAgMBAQAAAAAAAAAAAAAAAQIDBQYEB//EADoQAAIBAgIGCAQFBAIDAAAAAAABAgMRBCEGEjGSsdIFFjRBUVJhgxMyU3EUIiSR0RVCgaFi4SNywf/EABkBAQEBAQEBAAAAAAAAAAAAAAABAgMEBf/EACIRAQEAAgMAAwACAwAAAAAAAAABAhEDEjEhQVETMgQiYf/aAAwDAQACEQMRAD8A+fgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADa9G9AVsTT+LTlSjHWcbTlJO6+yfiezqdivqYffnymutv0OeB0S0NxX1MPvz5SVoXivqYffnyjpl+DnAdJ1KxX1MPvz5R1KxX1MPvz5R1o5sHSdSsV9TD78+UjqVivqYffnyjrfwc4Do+peK+ph9+fKOpeK+ph9+fKOt/BzgOj6l4r6mH358o6l4r6mH358o638HOA6LqZivqYffnyjqbivqYffnyjrfwc6DoupmK+ph9+fKOpmK8+H358o638HOg6LqbivPh9+fKR1NxXnob0+UnWrpzwOh6m4rz0N6fKOp2K89DenyjVNOeB0HU/E/Uw+/LlHVDE+ehvy5RqmnPg3/VHE+ehvS5R1RxPno70+UappoAb/AKoYnz0N6fKT1QxXnob0+Uappz4Og6n4rz0N6fKQ9EMT56G9PlGqaaAG+6pYnz0N6XKFolifPQ3pco6000IN/wBUcT56G9PlI6pYnz0d6fKNU00IN91SxPnob0+UdUsT56G9PlHWmmhBvuqWJ89DenykrRHE+ehvS5R1qNADoOp+J89Dflyk9TsV9TD78+UvW/g54HRdTcV9TD78+UjqdivqYffnyjrfwc8Doep2K+ph9+fKOp2J8+H358o638HPA6B6IYnz0N+XKeTpHoCthqfxakqTjrKNoSk3d/dehOtGqABAAAAAAAAAAAAAAAAAAAHcaHr9H7suCOgNBod2P3ZcEb89WH9YLIsVRYWokm5AIqbi5AAkJBFkWCJbDGzLKJRwKK3IASIrLHYSRYmxLVQQ2LFbGaqXIrOWQZjqbDOxRyEdpRsvTQ2LkkKJLiFWiSgkEgDKT2F7FaqyCPOy1MrYvCJRe5LI1VcOIQFydUhQAMi5LiiHEqK6xdMxF0UXuCpJULkNi5W4Bs0Wl3Y/dhwZvLGk0vX6P3YcGZvg4cAHAAAAAAAAAAAAAAAAAAAB3Why/R+9Pgjfmh0N7F7s+CN8enHxFkSVTLJlokEEkAkhFkFEizTES18wKtP0IaZdsrN5FGFiO0gtTJWl3ch39A5LxPNiMWllHNmMspJurJvxnlJpXdrHlni/Bq/qzXYrGXvnd7LFcHGUruaX/E82XP8AjtOL9e/8RUW3Vf8AkfiLpvwPFia0Y7Xn4I8f43au5nO81anHGxhim737u8v+KaNPXxdopJbVm/E86xcmrXeQ/lyOkdBDHNv18C0cfnZ9xzaxrTv3loY+S2eNx/JknSOsp4hNXuZVJvY0cr/VHbwf+jLS6XdttnxNzn/UvG6ZX9CtRPvNFR6dlF/mWsn396NpRxkaqvFr7d6O2PJMnO42L2MkUzGZYnRkSd+4mz9CUCohp+hKT9CSUBR38ERK9i5E9hUYCUCUAFwAK3CBaKCJsaPTFfovdhwZv0jRaZL9F7sODJl5RwYAOAAAAAAAAAAAAAAAAAAADvNDOx+7Pgjemj0LX6L3p8Eb5o9GF+BUm5FgbRa4KkoC5KKXJJoZolkUUVYlQ+4VYx1e4soL1MVTaBQyQMdzHiMTqR2XM5XTUiMTiUnqx2vJngnUWtJXzPBiekZJt2V7ngeLldu+0+fyZXO/8erCTGNnVlFWeW39zy43HZfkbj/o18qrXeHVTWauY012XVWU9uZFnfMqqyislmYnVb2hnb1Kk3HPZ3GGUGsrkQr+uwtfWeRdFsYtVkx22ZmjFolw1n4GtMbY5QIUDNbLVe3uZSKzHVdrxj/2Z6U5walHItHVdi6l3DVXbb0cVfVusmv2NjTaOfwlTJx9S9LHNScb7GdseTXrncXQpC2Z5cNXckj1JO56McpY52aS4oKKDuTmbRGqis45FsyJ7AjA0QWZARDIJZAVFi8UVsXiiIujRaZ9i96HBm9SNFpn2L3ocGMvKjggAedQAAAAAAAAAAAAAAAAAAd/oV2H3p8Eb9o0GhXYvenwR0LOkvwMTRBdoix1lRQkWJNCCUC0NoGVXsWzIuS2RUXZgm8zO3keeRFUkzx4mbkmsjJWrLYu7aa7E4iK232dx5ebP6jvx4/bwYiN9p45wXjYvXrX2HkbZ53TKk1na6ZVRIcSLF05WrjIqSkDaHkZIeKMUiYso9qmv3In+W78DBGV2ZKj+b7F2qKkytOtaTInsX2MMdo2PX8RpJ+pnp1bq546jskZqOcbeLJtXspvv8Suo73DeWRenfVsUbLC1UlHPNI2sK98znadTV7rnuwmPUnq2N8eWq55Ns6voSqvoYUSeuMM3xfQrOpcoCg2RchhBAEktBFS0SLFkgi9zQ6Z9i96HBm9NFpn2L3ocGTLyjggAedQAAAAAAAAAAAAAAAAAAd/oV2L3p8EdEc7oV2L3p8EdCb+hDKssyrN41EBgWOkEGSmiheK9SjIiWiij6jO+0yqalkjW4uvq5I9tXLvNJjJudR6rtGntfi/A58mWo6YY7rBXqNPLa82eCvMy1aktZ6zu/E81Q8N+Xq3pgkVsZHEpIrnVJFCzKlc6khksqwiGELFoxCpplm7/wCQiHBhpMn3E04ERgy/5gKTV2Z6asVpwd8zNqgVci9Ks0xq7SkNpdD2Rncii9WVzGpWLRdypXQYaqpRuj0KFzVdHTs3H0NpGR6ePLcc7PlOow42LGOcjoiGTExtl4sIsSQmSiolE2CLpBFTRaZ9i96HBm/ZoNM+xe9DgyZeDggAedQAAAAAAAAAAAAAAAAAAd/oT2L3p8EdEc9oT2L3p8EdCa+hBDLCxZUUsLFrCx12KmRZEJGVouxjuV1jK0UaFV5MTUyb9DQyqvUs1nJuRu8SrprxNHWoyVsm7ZZHm5vHfjeKV3L1Iq0mlc9Dmo/25+vcYK1WUnmeV2eZshq5eUSYwNRhg1CfhnrVJF1FGtJp440ifgPwPbGBfULo6td8EvCie/4ZMaQ0aeaGHMscN6HtpQMlkjfUeNYP0Lfgsj13Lpl6w08KwRMsHke9E2L1NNNKDi80YWrP0N3WpKSNTiabiYuOkYtbMmE8zHBPMy045mBtOjFeT+xtEa3o3azYs78Vc8mSMylRlLg6shlijEjNE0iUiyiEixUEi6SKougiGjntNF+i96HBnRHPaa9i96HBky8HAgA86gAAAAAAAAAAAAAAAAAA+gaEdi96fBHQnPaE9i96fBHQmvoAAAJIDNwWgZHbxZji2u65bXflNRESt4sxza8WWlP0MVSeWw01HnkePEw8D2M8WJnZNnDlusXTjny1OIaTzeZ4Zydy+Jm3J3MKkzyybdsqyQTZ6IRsUoxMzNyJEAEFVkiZEYYsyxYVkRKZVFkjSMkWWuY0WuUSjJAxIyxKrKiSqJNMjPHjKWR7DBiVkSjStNOxnprxLVY5E0mmkjhky92AdpG0kavBpKSNo7M68TGSgLOJB3YEZomJGSLNIyRLFYlioksiiLoIk57TXsXvQ4M6E5/TbsPvQ4MmXlSPn4APO0AAAAAAAAAAAAAAAAAAD6BoT2L3p8EdCc9oR2L3p8EdCa+gAAAAM3BeEvRkqXoyI7C1vU1Bjcs9jMVaXcZrephrI0rzs13STstpsrGq6U2v0Rx5f6uvH60NZ3kykVmTPaTA80ay9eulsLmOGwuaWBABG0l4sxl4lGeJksYosyo0iCyKSZMWUZEjJFGOLM0TUFgAaAx1VdGQpU2EqNbUIgs8iau0U47GjhkzXrw8XrI2sGrGvopuzNjCWX/RviYyW1kUZk/x/ooz0MIRkiUiZUzSLRJCJsVEJl0ypZBE3Of01f6H3ocGdAc/pr2H3ocGTLxHAAA87QAAAAAAAAAAAAAAAAAAPoGhPYvenwR0Jz2hPYvenwR0Br6EggNlkAEXCZ0kGW+RLlkVDKCkjz1XdnoPNLaVVUeDpdWV/FGxgszwdOO0DlyeN4euWqbXcRJmTSWZ5nS+vRDYWBSUsirCUzH8Uq4Mj4LCsirIvGsjz/AY+G0Qe2FU9EZmtgz0U5mpVemUhrFbZFJRdiqzqskZYYmPia1p+JTMsqN0qy8SymjTxcvUz06ska2jZlZrIpSqp5GRlGsxHzMvQRXGQtIyYNnLJmthhY7D3KGZgw8e+37HqUvR/sb4p9udW1TDYzNmJHeMEUZEisUZDQlIEoBAWBZIIg0Gm3YfehwZ0Bz+m3YfehwZnLwfPwAcFAAAAAAAAAAAAAAAAAAB9A0J7F70+COgOe0J7F70+COhNfQEMllGzeKBMUVuWidRZRz2sar8SYsm4FZRdtphZmqPIwkVamszydM0b0Xe2Vme2mjHj461OS9DGfjePri6qJpLMYhWk14MmntPK6srI1SxEgsUk0jHLEJFnTuUlh7haj8Xsy2lviXI/DJO5b4aCTaGi9Ii3cWgrBptMNFNZk1qWWRXCvI9L2HSeDTVXZlFiUnsR6MTQWtdmB4RSM2KzU8XB9x6oThI8lLo8v8AgprOLNRjb1/D8DNTTMNGUllLaeukila/HrYRg4XaM+PiYsHlJHPJmt9ShZIyIpBO23/ReMX4noxmo5VMthiRlknYxm4iYovYrEtcqJSAuQ2UTYvFGO5ZMiLNHPabL9D70ODOguc/pt2H3ocGZy8Hz8AHBQAAAAAAAAAAAAAAAAAAd9oT2L3p8EdEc7oT2L3p8EdEb+oIbKMsyjOmKJLwWRRIyKLsbFokoizGZBSqYy9R5lArLBZHhxvSFODlDNvvPfeyOQxTc6km+9s5cmWo9HBx96wYta03JbGRRhmTJHowcVm/A823TLH5JU8jE0e2pax45DaaVZQs2QUVaFixAUsTEgmG0DZYdZI9J56GwznWJWOrSUtp5pYa2xnrkygsGGKku8z05srYtCJIMqVzLTRjSM0TTNeXGrvPJSlqvWbsk9p78UrpFPw8ZQaa2mNbqSfLY4TERmsnc9UTneiJONTU7s1Y6KKOmF3E5cOtJvIwoy1FkY0dY4rIsVRY0iUAQAJRBZEQOf027D70ODOgOf027D70ODM5eD5+ADgoAAAAAAAAAAAAAAAAAAO+0K7F70+COhZz2hXYvenwRv2zpPBDIJYR1glGVNGOCzMqXogibol7CLf8RJK2xk2rBLaEiWTFZlFp7DkaqtUkvBs65x8Gc/0vhdSprd0kcuSbj1f4+Wq1NV5now0sn6mCosy9CWZ5q75T/Xb1VHkeWRnqmBkclWQSQyiCL5hmO7QVlsWha55Zub+XIvBy72VG4oWM5rMPOVnYRrV9bNK3odJTT31Vl9jz0q18mZvzOOeRrp/lmxlVjaIvFHloVbnqTLLtKuZIsxIuhWKxYydl69xXDVLqzzJxcbx+x58M87E8rpjjLiyYCnevfwbN/Fo1/R9CzlLx2GwisjpjPty5st1WoyhaoUR1jhV0WKokIsgQEESSipZASc9pt2H3ocGdAc/ps/0PvQ4Mzl4Pn4AOCgAAAAAAAAAAAAAAAAAA73QvsXvT4I35z+hnYvenwRvzvjPgSSQiyRpF6aMkSKewyIzaK3IqSyLoxVWBiLwKmSmiqnK5r+mqKdK/lzNiylekpRce5qxL43jdVxjgUpuzPbiqDpzcWrHiqM8uU09Xbb01HkYGZr3gmYTCIKtlmUZRBOqEiwFdUtHDthPM9dOSsakEUaeqeymjzpmenI3EZbGux9Kz1kbK5irQ1lYt+YStXRqWZsqcro1dWm4s9mEldHKfDVe+JkRjpl2zo5VSotbLuKUsOlIypnopU3e9jel7WRnpRcY2sZVJ+Bj1n6F1J+B0cLVZsqJvMhMsRkjIm5VMXKi9yblbkhE3JTKkoCxz2mr/AEXvQ4M6A5/TXsXvQ4Mzl4OAABwUAAAAAAAAAAAAAAAAAAHd6Gdi96fBHQGg0MX6L3p8EdAejHwSi6KIuhUZorItYopr1J1kYVKMVXaZU14mGbzNCplp7DFYzx2FEByLIh7QrxdIYFVl4SWxmhr9DV9iSl6pnWMpYxnhK6456cg6Mqa1JqzRhZuOm6dpKXiadnnymq6y7VZVouQzKsMqyiQqjlsRjxUf7kYqWMcdqBvT06kvAywoTsRhpSqRlUvGEYP+7vM1PGQtZu79EWJcrExo1LXuZIUaurrf/SHi1ayv+xalj7K0o/saZ7Vjnip01eSyLUelYydu9+h5MdXnVjqRha72k9G9HzT1pppfYbv0sy29GJd8zNg45FcXDNJGfDRtFE+2t/D1QLqLlktpjTPXhY5NnSRyvwrTwsv7mvsj1RiXsQkddOdu1WmTd+hLIZWWObzITIltIEGVSJ1iiBUZVIlMxpl0UTcuiiLIIsc9pr2L3ocGdCc9pr2L3ocGZy8HAAA4KAAAAAAAAAAAAAAAAAADu9DOxe9PgjoDn9Dexe9Pgjfnox8gsjJExGSBUZkyWVQbMqs0jAzLOWRgY0JjtM6WW1mCG0zXNCUn4jO+1fsEwmBDv6EXfh/ss2Rcixrul6WtSeWad0c0ddi43g16HKV42Zw5I74eKkAHF0VlG54p4fM95SSKRRNKg6dndzTWRidGcEpSTS8Wj2UE3rONrxV83YxzqOTbc5TktkW8kajnyT5TQxUbXeaW3I9FDpCg3bv8NU8lRbNd6jt8sbZmKV7Wt8ON/mtmyuTYxxkHKWrnbb+XYe1dIUZWippNtJLZ3GqwNSUZNRyT+abS2GbEKLxNJpJprN2tdnTG/A9NaF5GaCsHH8zLIx9uyyNnRglFHiwsLy+2ZsVJHTGOeVHFDVRLkiusjo5quJDRa6IkyDCyECUBYEkFQTLxZQsii6ZdMx3LphFrnPaa9i96HBnQGg017F70ODJl4OAAB51AAAAAAAAAAAAAAAAAAB3ehnYvenwRvzQ6Gdj92fBG/PRj5AsZIFC8SoyJk3KWCAmo8jC2TUkY2wrLTMjRipl28wLhRMeuTcC9irIuQ2FRUvZ/Y5jEx/NJep00nkc1i3ao/ucOR243lBkqx713mM4OgQWKSkUXgo2es2vC3iYdd2d1qK/z5XM9KX5JZXvlsueV5Xz18/kNRjJljJJrVj8TL5pWyKtJq7bqO/ypZIqnfv1Fb5LbSYPZqr4eecmtpqVzZ8PFa12msvkSPVOmnVptLV/Mvy+Bgw99bL5vO0euEf8Azxd752NRHoltf3YRMu8RDr9Pfgo2TPUjz4T5TPFnXHxyy9WZFkLi5WVHFeBWcVYtcrN5AYCUSIoCRYsQVFSUyWQQTcvExl0yjImaDTV/ovehwZvrmg007F70ODJl4jggAedQAAAAAAAAAAAAAAAAAAd3oY/0fuz4I6E+c9GaQ1sLT+FTjSlHWc7zjJu7+zXgezrnivp4fcnzHWZyQd0SmcJ1zxX08PuT5h1zxX08PuT5jXeI7zWCkcH1yxX08PuT5h1yxX08PuT5h3xV3M3mVOH65Yr6eH3J8w644ryYfcnzDviO8gydY4LrjivJQ3Jcw65YryUNyfMTvB3ly1zgeuOK8lDcnzE9c8V5MPuT5h3g74ozhOueK8mH3J8xWWmGKf8AZQ/xGXMO8V1uOxV5KlB5v5mvA0+NX5maGnpFXjJy1aTb8Yy/kpU6drSd3Gn/AIi/5OGW7XXHPGNxrMi5pP6xV8tP9n/I/q9Xy0/2f8meta/kxby5jkzT/wBYq+Wn+z/kj+rVPLD9n/I61P5I6Ci7Qef+jybb6q1Xf5mjWrpqqo6urTt/6v8AkpLpeq1ZqFvs/wCTWmblK21lfNa0rfN3Iu1a3xPzru1e408el6qVrQ8Nj/kU+mKsdkab+6f8lZ3HQ4dq+b/J4d57KTTrRtktv+jlKXTdWMtZRpN+sXbiZ+stfWUtSjdf8JW4mpU26xvP/JJyb0mr+Sjuy/kdZ8R5KO7L+RtuZx3NCdo+h64O6Pn/AFtxNralDdlzE09L8VFWUKD+8ZcxuZxzt+X0Ek4DrlivJQ3J8w654ryUNyfMXvB3ljHM4brlivJQ3Z8w644ryUNyfMO8R2pKRxHXDE+ShuS5ieuOK8lDcnzDvB3FhY4frjivJQ3J8w644ryYfcnzF7wdvYixxPXHFfTw+5PmI64YnyUNyXMTvB2wRxPW/E+ShuS5h1vxPkobkuYd4O5TNDpk/wBF70ODNJ1wxPkobkuY8nSWkFbE0/hVI0lHWU/yRkndfdvxJcpoaoAHIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAf/Z\n", - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/marvin/projects/BayesFlow/bayesflow/trainers.py:26: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n", + " from tqdm.autonotebook import tqdm\n" + ] } ], "source": [ @@ -57,8 +49,8 @@ "id": "contemporary-arthritis", "metadata": {}, "source": [ - "

Introduction

\n", - "
\n", + "## Introduction\n", + "\n", "Welcome to the very first tutorial on using BayesFlow for amortized posterior estimation! In this notebook, we will estimate the means of a multivariate Gaussian model and illustrate some features of the library along the way. Above, we have already imported the core entities we will need for this notebook. In brief:\n", "\n", "* The module `simulations` contains high-level wrappers for gluing together priors, simulators, and context generators into a single `GenerateModel` object, which will generate all quantities of interest for a modeling scenario.\n", @@ -1192,7 +1184,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -1206,7 +1198,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.13" + "version": "3.10.11" }, "toc": { "base_numbering": 1, @@ -1234,4 +1226,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/docs/source/tutorial_notebooks/LCA_Model_Posterior_Estimation.ipynb b/examples/LCA_Model_Posterior_Estimation.ipynb similarity index 99% rename from docs/source/tutorial_notebooks/LCA_Model_Posterior_Estimation.ipynb rename to examples/LCA_Model_Posterior_Estimation.ipynb index 7fdeccb83..525a7b3cc 100644 --- a/docs/source/tutorial_notebooks/LCA_Model_Posterior_Estimation.ipynb +++ b/examples/LCA_Model_Posterior_Estimation.ipynb @@ -2,20 +2,20 @@ "cells": [ { "cell_type": "markdown", - "metadata": { - "toc": true - }, + "metadata": {}, "source": [ - "

Table of Contents

\n", - "" + "# Principled Amortized Bayesian Workflow for Cognitive Modeling\n", + "by Lukas Schumacher " ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "toc": true + }, "source": [ - "# Principled Amortized Bayesian Workflow for Cognitive Modeling\n", - "by Lukas Schumacher " + "

Table of Contents

\n", + "" ] }, { @@ -58,7 +58,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "

Introduction

\n", + "## Introduction\n", "\n", "Psychological research often relies on mathematical models to explain and predict human behavior.\n", "Such models aim to formalize cognitive processes by mapping latent psychological constructs to model parameters and specifying how these generate manifest data. In this tutorial, we go through the steps of a principled [Bayesian workflow](https://betanalpha.github.io/assets/case_studies/principled_bayesian_workflow.html) that is imperative when developing and applying cognitive models.\n", @@ -1300,7 +1300,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.15" + "version": "3.10.11" }, "toc": { "base_numbering": 1, diff --git a/docs/source/tutorial_notebooks/Linear_ODE_system.ipynb b/examples/Linear_ODE_system.ipynb similarity index 99% rename from docs/source/tutorial_notebooks/Linear_ODE_system.ipynb rename to examples/Linear_ODE_system.ipynb index 01b09dde5..0d8f87ee8 100644 --- a/docs/source/tutorial_notebooks/Linear_ODE_system.ipynb +++ b/examples/Linear_ODE_system.ipynb @@ -1,5 +1,13 @@ { "cells": [ + { + "cell_type": "markdown", + "id": "bfb78b75", + "metadata": {}, + "source": [ + "# Posterior Estimation for ODEs" + ] + }, { "cell_type": "markdown", "id": "preliminary-breathing", @@ -71,7 +79,7 @@ "id": "06246a18", "metadata": {}, "source": [ - "# Introduction\n", + "## Introduction\n", "
\n", "In this tutorial, we will look at a simple linear ODE system:\n", "\n", @@ -100,7 +108,7 @@ "id": "97353c55", "metadata": {}, "source": [ - "## Analytical Solution\n", + "### Analytical Solution\n", "\n", "The advantage of such a simple ODE system is, that the analytical solutions for $u$ and $v$ are known:\n", "\n", @@ -159,7 +167,7 @@ "id": "5dc25c80", "metadata": {}, "source": [ - "## Stability of Solutions\n", + "### Stability of Solutions\n", "\n", "The solution $u$ and $v$ will be of the form:\n", "\n", @@ -187,7 +195,7 @@ "id": "f122ff9f", "metadata": {}, "source": [ - " # Generative Model Structure\n", + "## Generative Model Structure\n", " \n", "We have to generate some simulated data to train our BayesFlow architecture. The first step is to randomly draw combinations of $a$, $b$, $c$, $d$, $u_0$ and $v_0$ from a uniform prior distribution. By computing the eigenvalues, we can preemptively reject prior samples that will lead to unstable solutions. Additionally, we estimate the prior means and standard deviations for standardization later. The standardization step is just a convenience, since neural networks like well-scaled data." ] @@ -382,7 +390,7 @@ "id": "lined-hybrid", "metadata": {}, "source": [ - "# BayesFlow Architecture" + "## BayesFlow Architecture" ] }, { @@ -398,7 +406,7 @@ "id": "mexican-vegetable", "metadata": {}, "source": [ - "## Summary Network\n", + "### Summary Network\n", "\n", "For this tutorial, we will use a small LSTM summary network:" ] @@ -459,7 +467,7 @@ "id": "particular-serum", "metadata": {}, "source": [ - "## Inference Network\n", + "### Inference Network\n", "\n", "For the inference network, we will use a 8-layer conditional invertible neural network (cINN):" ] @@ -517,7 +525,7 @@ "id": "0d16c686", "metadata": {}, "source": [ - "# Preproccessing\n", + "## Preproccessing\n", "\n", "Before we feed the simulation data to our BayesFlow amortizer, we want to perform some preprocessing such as normalization, logscale conversion for stability, and removing nan/infinite samples." ] @@ -563,7 +571,7 @@ "id": "cae45084", "metadata": {}, "source": [ - "# Training\n", + "## Training\n", "\n", "To train our BayesFlow amortizer, we first have to define a `Trainer` instance, which will take care of simulation-based training. Note, that we also use a piecewise constant decay learning rate scheduler to reduce the learning rate after a pre-selected number of training steps." ] @@ -1108,7 +1116,7 @@ "id": "f3dc675e", "metadata": {}, "source": [ - "# Validating the Results\n", + "## Validating the Results\n", "\n", "After training our BayesFlow architecture, we want to validate the results. Our first step is to inspect the latent space $z$, which we enforce to be Gaussian using the default Kullback-Leibler (KL) loss during training. " ] @@ -1382,7 +1390,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.10.11" }, "toc": { "base_numbering": "1", diff --git a/docs/source/tutorial_notebooks/Model_Comparison_MPT.ipynb b/examples/Model_Comparison_MPT.ipynb similarity index 99% rename from docs/source/tutorial_notebooks/Model_Comparison_MPT.ipynb rename to examples/Model_Comparison_MPT.ipynb index a074ac1cf..6789c6f13 100644 --- a/docs/source/tutorial_notebooks/Model_Comparison_MPT.ipynb +++ b/examples/Model_Comparison_MPT.ipynb @@ -1,31 +1,23 @@ { "cells": [ { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "toc": true - }, - "source": [ - "

Table of Contents

\n", - "" - ] - }, - { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "# Amortized Model Comparison Workflow for Cognitive Modeling\n", + "# Model Comparison for Cognitive Models\n", + "Part 1: Non-Hierarchical Model Comparison.\n", + "\n", "by Lasse Elsemüller" ] }, { - "attachments": {}, "cell_type": "markdown", - "metadata": {}, + "metadata": { + "toc": true + }, "source": [ - "# Part 1: Non-Hierarchical Model Comparison" + "

Table of Contents

\n", + "" ] }, { @@ -46,17 +38,15 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "

Introduction

\n", + "## Introduction\n", "\n", "This tutorial series contains workflows for comparing competing probabilistic models via posterior model probabilities (PMPs) or Bayes Factors (BFs) with BayesFlow. We start with non-hierarchical model comparison in this tutorial (part 1), while [part 2](./Hierarchical_Model_Comparison_MPT.ipynb) will look at the modifications that allow us to compare hierarchical models. To keep the content concise, the focus will be on the model comparison steps themselves. For a comprehensive overview of the different functionalities BayesFlow has to offer, see the [\"Principled Amortized Bayesian Workflow for Cognitive Modeling\"](./LCA_Model_Posterior_Estimation.ipynb) tutorial notebook." ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -64,7 +54,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -72,7 +61,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -86,7 +74,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -96,7 +83,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -104,7 +90,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -135,15 +120,13 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "## Priors" + "### Priors" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -177,7 +160,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -194,7 +176,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -228,7 +209,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -256,7 +236,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -264,15 +243,13 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "## Creating Simulators" + "### Creating Simulators" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -348,7 +325,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -398,7 +374,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -447,7 +422,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -464,15 +438,13 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "## Prior Predictive Checks" + "### Prior Predictive Checks" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -555,7 +527,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -563,7 +534,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -571,7 +541,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -590,7 +559,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -607,7 +575,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -624,15 +591,13 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "## Defining the Configurator" + "### Defining the Configurator" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -640,15 +605,13 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "## Defining the Trainer" + "### Defining the Trainer" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -677,7 +640,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -714,7 +676,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -722,7 +683,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -782,7 +742,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -810,7 +769,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -818,7 +776,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -826,7 +783,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -851,7 +807,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -884,7 +839,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -913,7 +867,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -921,7 +874,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -929,7 +881,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -964,7 +915,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -992,7 +942,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1022,7 +971,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1051,7 +999,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1059,7 +1006,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1083,7 +1029,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.10" + "version": "3.10.11" }, "toc": { "base_numbering": 1, diff --git a/docs/source/tutorial_notebooks/Model_Misspecification.ipynb b/examples/Model_Misspecification.ipynb similarity index 99% rename from docs/source/tutorial_notebooks/Model_Misspecification.ipynb rename to examples/Model_Misspecification.ipynb index 90cedbb02..7530d1487 100644 --- a/docs/source/tutorial_notebooks/Model_Misspecification.ipynb +++ b/examples/Model_Misspecification.ipynb @@ -1,7 +1,14 @@ { "cells": [ { - "attachments": {}, + "cell_type": "markdown", + "id": "4066f8d3", + "metadata": {}, + "source": [ + "# Detecting Model Misspecification in Amortized Posterior Inference" + ] + }, + { "cell_type": "markdown", "id": "e2df8c31", "metadata": { @@ -35,16 +42,14 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "6e175999-940f-4220-8696-7ba877600f89", "metadata": {}, "source": [ - "# Introduction" + "## Introduction" ] }, { - "attachments": {}, "cell_type": "markdown", "id": "c34cc2c7-65d4-43b5-b71b-0515443a1abb", "metadata": {}, @@ -58,25 +63,22 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "69c3be58", "metadata": {}, "source": [ - "" + "" ] }, { - "attachments": {}, "cell_type": "markdown", "id": "dab21025-c5ad-406b-a7cb-d5c59d129601", "metadata": {}, "source": [ - "# Model specification" + "## Model specification" ] }, { - "attachments": {}, "cell_type": "markdown", "id": "fb30c6ac", "metadata": {}, @@ -100,7 +102,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "1de375a6", "metadata": {}, @@ -147,16 +148,14 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "c2b6c728", "metadata": {}, "source": [ - "# Training" + "## Training" ] }, { - "attachments": {}, "cell_type": "markdown", "id": "cf7f806c", "metadata": {}, @@ -193,16 +192,14 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "3674126e", "metadata": {}, "source": [ - "## Training loop" + "### Training loop" ] }, { - "attachments": {}, "cell_type": "markdown", "id": "65269680", "metadata": {}, @@ -223,12 +220,11 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "1cbbdebb", "metadata": {}, "source": [ - "## Diagnostics" + "### Diagnostics" ] }, { @@ -297,12 +293,11 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "00f3acc4", "metadata": {}, "source": [ - "## Inspecting the summary space\n", + "### Inspecting the summary space\n", "\n", "In fact, the summary space has essentially converged to a unit Gaussian for samples from the generative model which we used to train the networks." ] @@ -333,18 +328,16 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "bf961a09-c49f-4443-b1fb-584cdc80d7c8", "metadata": {}, "source": [ - "# Observed Data: Misspecification Detection\n", + "## Observed Data: Misspecification Detection\n", "\n", "After assessing the converged neural posterior approximator's performance for the reference model used for training, we will now perform inference on data from a different data generating process. In a real-life analysis, this would be the observed data $x_{\\text{obs}}$ from an experiment or study." ] }, { - "attachments": {}, "cell_type": "markdown", "id": "3a727c31", "metadata": {}, @@ -405,16 +398,14 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "c11508c3", "metadata": {}, "source": [ - "## Visualization in data space" + "### Visualization in data space" ] }, { - "attachments": {}, "cell_type": "markdown", "id": "e16aca3b", "metadata": {}, @@ -461,12 +452,11 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "01bd544d", "metadata": {}, "source": [ - "## Detecting misspecification in summary space\n", + "### Detecting misspecification in summary space\n", "\n", "As proposed in our paper [2], we will detect the deviating observed data as deviations in the structured summary space. Therefore, we compute the learned summary statistics of the well-specified data $h_{\\psi}(x)$ and for the observed data $h_{\\psi}(x_{\\text{obs}})$ by a simple pass through the trainer's summary network $h_{\\psi}$.\n", "\n", @@ -516,7 +506,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "86bea4e7", "metadata": {}, @@ -549,12 +538,11 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "db160622", "metadata": {}, "source": [ - "# Hypothesis test for observed data\n", + "## Hypothesis test for observed data\n", "\n", "In real-life modeling scenarios, a researcher might desire to perform inference on observed data $x_{\\text{obs}}$. After training the neural posterior estimator with samples from a generative model $\\mathcal{M}$, the natural question arises: \"Is the model $\\mathcal{M}$ well-specified for the observed data $x_{\\text{obs}}$?\"\n", "\n", @@ -565,7 +553,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "b68edbb1", "metadata": {}, @@ -615,7 +602,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "df48417a-26a8-4ba2-8bbf-b1264cd99681", "metadata": {}, @@ -665,7 +651,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "0653338c-35c7-443c-b7e0-8ec3ed1d4575", "metadata": {}, @@ -717,18 +702,16 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "822fc97c-f4f4-49aa-8c2c-6d028b31ee9c", "metadata": {}, "source": [ - "# Sensitivity to Misspecification\n", + "## Sensitivity to Misspecification\n", "\n", "The submodule `bayesflow.sensitivity` contains functions to analyze the sensitivity of a converged `Trainer` (i.e., the neural posterior estimator) to model misspecification." ] }, { - "attachments": {}, "cell_type": "markdown", "id": "53cf80ac", "metadata": {}, @@ -763,7 +746,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "d9afb3b0", "metadata": {}, @@ -795,12 +777,11 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "ca8c198a", "metadata": {}, "source": [ - "## Computing Sensitivity\n", + "### Computing Sensitivity\n", "\n", "As described above, the `bf.sensitivity.misspecification_experiment` function requires the converged `Trainer`, the factory for misspecified models, and meta-information on the settings. In addition, the number of posterior samples per simulated data set as well as the total number of simulated data sets per setting configuration can be specified." ] @@ -831,16 +812,14 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "c919073b", "metadata": {}, "source": [ - "## Plotting the results" + "### Plotting the results" ] }, { - "attachments": {}, "cell_type": "markdown", "id": "9317ad22", "metadata": {}, @@ -872,7 +851,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "ee01b222", "metadata": {}, @@ -936,7 +914,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.9" + "version": "3.10.11" }, "toc": { "base_numbering": 1, diff --git a/docs/source/tutorial_notebooks/img/1HT2HT.png b/examples/img/1HT2HT.png similarity index 100% rename from docs/source/tutorial_notebooks/img/1HT2HT.png rename to examples/img/1HT2HT.png diff --git a/docs/source/tutorial_notebooks/img/bayesflow_overview.png b/examples/img/bayesflow_overview.png similarity index 100% rename from docs/source/tutorial_notebooks/img/bayesflow_overview.png rename to examples/img/bayesflow_overview.png diff --git a/docs/source/tutorial_notebooks/img/generative_model.png b/examples/img/generative_model.png similarity index 100% rename from docs/source/tutorial_notebooks/img/generative_model.png rename to examples/img/generative_model.png diff --git a/docs/source/images/model_misspecification_amortized_sbi.png b/examples/img/model_misspecification_amortized_sbi.png similarity index 100% rename from docs/source/images/model_misspecification_amortized_sbi.png rename to examples/img/model_misspecification_amortized_sbi.png diff --git a/docs/source/tutorial_notebooks/img/trainer_connection.png b/examples/img/trainer_connection.png similarity index 100% rename from docs/source/tutorial_notebooks/img/trainer_connection.png rename to examples/img/trainer_connection.png diff --git a/docs/source/tutorial_notebooks/ECG_Model.ipynb b/examples/in_progress/ECG_Model.ipynb similarity index 100% rename from docs/source/tutorial_notebooks/ECG_Model.ipynb rename to examples/in_progress/ECG_Model.ipynb diff --git a/docs/source/tutorial_notebooks/PriorSensitivity_Covid19_Initial.ipynb b/examples/in_progress/PriorSensitivity_Covid19_Initial.ipynb similarity index 100% rename from docs/source/tutorial_notebooks/PriorSensitivity_Covid19_Initial.ipynb rename to examples/in_progress/PriorSensitivity_Covid19_Initial.ipynb diff --git a/setup.cfg b/setup.cfg index 188ab1768..903ef3943 100644 --- a/setup.cfg +++ b/setup.cfg @@ -55,9 +55,10 @@ testing = mypy >= 0.910 docs = sphinx >= 5.1.0 - nbsphinx >= 0.8.9 - sphinx-rtd-theme >= 1.0.0 - m2r2 >= 0.3 + sphinx-book-theme>=0.2.0 + numpydoc >= 1.2.1 + myst_nb >= 0.13.1 + sphinx_design >= 0.4.1 [flake8] max-line-length = 120