diff --git a/docs/_static/notebook_figures/replicability.jpg b/docs/_static/notebook_figures/replicability.jpg
new file mode 100644
index 0000000..2de2706
Binary files /dev/null and b/docs/_static/notebook_figures/replicability.jpg differ
diff --git a/docs/refs.bib b/docs/refs.bib
index 664ca1a..5866e38 100644
--- a/docs/refs.bib
+++ b/docs/refs.bib
@@ -240,4 +240,46 @@ @article{jackson1980principal
pages={201--213},
year={1980},
publisher={Taylor \& Francis}
+}
+
+@article{reprref1,
+ author={Adali, Tülay and Kantar, Furkan and Akhonda, Mohammad Abu Baker Siddique and Strother, Stephen and Calhoun, Vince D. and Acar, Evrim},
+ journal={IEEE Signal Processing Magazine},
+ title={Reproducibility in Matrix and Tensor Decompositions: Focus on model match, interpretability, and uniqueness},
+ year={2022},
+ volume={39},
+ number={4},
+ pages={8-24},
+ keywords={Problem-solving;Reproducibility of results;Data models;Matrix decomposition},
+ doi={10.1109/MSP.2022.3163870}}
+
+@article{reprref2,
+ title={Characterizing human postprandial metabolic response using multiway data analysis},
+ author={Yan, Shi and Li, Lu and Horner, David and Ebrahimi, Parvaneh and Chawes, Bo and Dragsted, Lars O and Rasmussen, Morten A and Smilde, Age K and Acar, Evrim},
+ journal={Metabolomics},
+ volume={20},
+ number={3},
+ pages={50},
+ year={2024},
+ publisher={Springer}
+}
+
+@article{reprref3,
+author = {Fog Froriep Halberg, Helene and Bevilacqua, Marta and Rinnan, Åsmund},
+title = {Resampling as a Robust Measure of Model Complexity in PARAFAC Models},
+journal = {Journal of Chemometrics},
+volume = {38},
+number = {12},
+pages = {e3601},
+doi = {https://doi.org/10.1002/cem.3601},
+year = {2024}
+}
+
+@article {reprref4,
+ author = {Erd{\H o}s, Bal{\'a}zs and Chatzis, Christos and Thorsen, Jonathan and Stokholm, Jakob and Smilde, Age K. and Rasmussen, Morten A. and Acar, Evrim},
+ title = {Extracting host-specific developmental signatures from longitudinal microbiome data},
+ elocation-id = {2025.11.22.689760},
+ year = {2025},
+ doi = {10.1101/2025.11.22.689760},
+ journal = {bioRxiv}
}
\ No newline at end of file
diff --git a/docs/sg_execution_times.rst b/docs/sg_execution_times.rst
new file mode 100644
index 0000000..a66f7e1
--- /dev/null
+++ b/docs/sg_execution_times.rst
@@ -0,0 +1,61 @@
+
+:orphan:
+
+.. _sphx_glr_sg_execution_times:
+
+
+Computation times
+=================
+**01:57.453** total execution time for 9 files **from all galleries**:
+
+.. container::
+
+ .. raw:: html
+
+
+
+
+
+
+
+ .. list-table::
+ :header-rows: 1
+ :class: table table-striped sg-datatable
+
+ * - Example
+ - Time
+ - Mem (MB)
+ * - :ref:`sphx_glr_auto_examples_plot_replicability_analysis.py` (``../examples/plot_replicability_analysis.py``)
+ - 01:57.453
+ - 0.0
+ * - :ref:`sphx_glr_auto_examples_plot_bike_plotly.py` (``../examples/plot_bike_plotly.py``)
+ - 00:00.000
+ - 0.0
+ * - :ref:`sphx_glr_auto_examples_plot_core_consistency.py` (``../examples/plot_core_consistency.py``)
+ - 00:00.000
+ - 0.0
+ * - :ref:`sphx_glr_auto_examples_plot_labelled_decompositions.py` (``../examples/plot_labelled_decompositions.py``)
+ - 00:00.000
+ - 0.0
+ * - :ref:`sphx_glr_auto_examples_plot_optimisation_diagnostic.py` (``../examples/plot_optimisation_diagnostic.py``)
+ - 00:00.000
+ - 0.0
+ * - :ref:`sphx_glr_auto_examples_plot_outlier_detection.py` (``../examples/plot_outlier_detection.py``)
+ - 00:00.000
+ - 0.0
+ * - :ref:`sphx_glr_auto_examples_plot_selecting_aminoacids_components.py` (``../examples/plot_selecting_aminoacids_components.py``)
+ - 00:00.000
+ - 0.0
+ * - :ref:`sphx_glr_auto_examples_plot_split_half_analysis.py` (``../examples/plot_split_half_analysis.py``)
+ - 00:00.000
+ - 0.0
+ * - :ref:`sphx_glr_auto_examples_plot_working_with_xarray.py` (``../examples/plot_working_with_xarray.py``)
+ - 00:00.000
+ - 0.0
diff --git a/examples/plot_replicability_analysis.py b/examples/plot_replicability_analysis.py
new file mode 100644
index 0000000..8d03f2a
--- /dev/null
+++ b/examples/plot_replicability_analysis.py
@@ -0,0 +1,215 @@
+"""
+.. _replicability_analysis:
+
+Replicability analysis
+----------------
+
+This example desrcibes how replicability of patterns can be used to guide the component selection process for PARAFAC models :cite:p:`reprref1, reprref2, reprref3`.
+
+This process evaluates the consistency of the uncovered patterns by fitting the model to different subsets of the data. The rationale is that if the appropriate number of components is used, the uncovered patterns should be consistent. This can be seen as an extension of `split-half analysis `_ where a higher number of smaller subsets of the input are removed.
+"""
+
+###############################################################################
+# Imports and utilities
+# ^^^^^^^^^^^^^^^^^^^^^
+
+import matplotlib.pyplot as plt
+import numpy as np
+import tensorly as tl
+from tensorly.decomposition import parafac
+
+import sklearn
+from sklearn.model_selection import RepeatedKFold
+
+import tlviz
+
+rng = np.random.default_rng(1)
+
+###############################################################################
+# To fit PARAFAC models, we need to solve a non-convex optimization problem, possibly with local minima. It is
+# therefore useful to fit several models with the same number of components using many different random
+# initialisations.
+
+
+def fit_many_parafac(X, num_components, num_inits=5):
+ return [
+ parafac(
+ X,
+ num_components,
+ n_iter_max=1000,
+ tol=1e-8,
+ init="random",
+ linesearch=True,
+ random_state=i,
+ )
+ for i in range(num_inits)
+ ]
+
+
+###############################################################################
+# Creating simulated data
+# ^^^^^^^^^^^^^^^^^^^^^^^
+#
+# We start with some simulated data, since then, we know exactly how many components there are in the data.
+
+cp_tensor, dataset = tlviz.data.simulated_random_cp_tensor((30, 40, 25), 3, noise_level=0.3, labelled=True)
+
+###############################################################################
+# .. figure:: /_static/notebook_figures/replicability.jpg
+# Illustration of the replicability check, taken from :cite:p:`reprref3`.
+#
+
+###############################################################################
+# The replicability analysis boils down to the following steps:
+#
+# 1. Split the data in a (user-chosen) mode into :math:`N` folds (user-chosen).
+# 2. Create :math:`N` subsets by subtracting each fold from the complete dataset.
+# 3. Fit multiple initializations to each subset and choose the *best* run
+# according to lowest loss (total of :math:`N` *best* runs).
+# 4. Compare, in terms of FMS, the best runs across the different subsets
+# to evaluate the replicability of the uncovered patterns (:math:`\binom{N}{2}` comparisons).
+# 5. Repeat the above process :math:`M` times (user-chosen), to find a total of
+# :math:`M \binom{N}{2}` comparisons.
+
+
+###############################################################################
+# Splitting the data
+# ^^^^^^^^^^^^^^^^^^
+#
+
+splits = 5 # N
+repeats = 10 # M
+
+models = {}
+split_indices = {} # Keeps track of which indices are used in each subset
+
+for rank in [2, 3, 4, 5]:
+
+ print(f"{rank} components")
+
+ rskf = RepeatedKFold(n_splits=splits, n_repeats=repeats, random_state=1)
+
+ models[rank] = {}
+ split_indices[rank] = {}
+
+ for split_no, (train_index, _) in enumerate(rskf.split(dataset)):
+ repeat_no = split_no // splits
+
+ # Sort rows for consistent ordering (not necessary)
+
+ sorted_train_index = sorted(train_index)
+ train = dataset[sorted_train_index]
+
+ train = train / tl.norm(train) # Pre-process the tensor without leaking info from other folds
+
+ current_models = fit_many_parafac(train.data, rank)
+ current_model = tlviz.multimodel_evaluation.get_model_with_lowest_error(current_models, train)
+
+ if repeat_no not in models[rank].keys():
+ models[rank][repeat_no] = []
+
+ models[rank][repeat_no].append(current_model)
+
+ if repeat_no not in split_indices[rank].keys():
+ split_indices[rank][repeat_no] = []
+
+ split_indices[rank][repeat_no].append(sorted_train_index)
+
+
+###############################################################################
+# Often, the mode one will be splitting within refers to different samples
+# Depending on the use-case, it might be deemed reasonable to retain the
+# distributions of some properties in each subset. For this goal,
+# `RepeatedStratifiedKFold `_
+# can be used.
+#
+# Each subset might require certain pre-processing. It is important to pre-process
+# each subset in isolation to avoid leaking information from the omitted part of the input.
+# For example, in this case we normalize each subset to unit norm independently.
+# Also, notice that ``for train_index, _ in rskf.split(dataset):`` is embarrassingly parallel.
+
+###############################################################################
+# Computing and plotting factor similarity
+# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+# Here, we are skipping the mode we split (``mode=0``).
+
+replicability_stability = {}
+for rank in [2, 3, 4, 5]:
+ replicability_stability[rank] = []
+ for repeat_no in models[rank].keys():
+ for i, cp_i in enumerate(models[rank][repeat_no]):
+ for j, cp_j in enumerate(models[rank][repeat_no]):
+ if i < j: # include every pair only once and omit i == j
+ fms = tlviz.factor_tools.factor_match_score(cp_i, cp_j, consider_weights=False, skip_mode=0)
+ replicability_stability[rank].append(fms)
+
+ranks = sorted(replicability_stability.keys())
+data = [np.ravel(replicability_stability[r]) for r in ranks]
+
+fig, ax = plt.subplots()
+ax.axhline(0.9, linestyle="--", color="gray")
+ax.boxplot(data, positions=ranks)
+ax.set_xlabel("Number of components")
+ax.set_ylabel("Replicability stability")
+plt.show()
+
+###############################################################################
+# Here, we can observe that over-estimating the number of components
+# results in not replicable patterns, indicated by low FMS.
+
+###############################################################################
+# Computing and plotting factor similarity (alt.)
+# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+# There is an alternative way to estimate the replicability of the uncovered patterns
+# that includes the mode we are splitting within :cite:p:`reprref4`. When comparing two factorizations in
+# terms of FMS, we can include the previously skipped factor by using only the indices
+# present in both subsets.
+
+replicability_stability_alt = {}
+for rank in [2, 3, 4, 5]:
+ replicability_stability_alt[rank] = []
+ for repeat_no in models[rank].keys():
+ for i, cp_i in enumerate(models[rank][repeat_no]):
+ for j, cp_j in enumerate(models[rank][repeat_no]):
+ if i < j: # include every pair only once and omit i == j
+
+ weights_i, (A_i, B_i, C_i) = cp_i
+ weights_j, (A_j, B_j, C_j) = cp_j
+
+ indices_subset_i = sorted(split_indices[rank][repeat_no][i])
+ indices_subset_j = sorted(split_indices[rank][repeat_no][j])
+
+ common_indices = sorted(list(set(indices_subset_i).intersection(set(indices_subset_j))))
+
+ indices2use_i = []
+ indices2use_j = []
+
+ for common_idx in common_indices:
+ indices2use_i.append(indices_subset_i.index(common_idx))
+ indices2use_j.append(indices_subset_j.index(common_idx))
+
+ A_i = A_i[indices2use_i, :]
+ A_j = A_j[indices2use_j, :]
+
+ fms = tlviz.factor_tools.factor_match_score(
+ (weights_i, (A_i, B_i, C_i)), (weights_j, (A_j, B_j, C_j)), consider_weights=False
+ )
+ replicability_stability_alt[rank].append(fms)
+
+ranks = sorted(replicability_stability_alt.keys())
+data = [np.ravel(replicability_stability_alt[r]) for r in ranks]
+
+fig, ax = plt.subplots()
+ax.axhline(0.9, linestyle="--", color="gray")
+ax.boxplot(data, positions=ranks)
+ax.set_xlabel("Number of components")
+ax.set_ylabel("Replicability stability")
+plt.show()
+
+###############################################################################
+# ``common_indices`` contains the indices (e.g. samples) present in both subsets,
+# but since the position of each index can change (e.g. sample no 3 is not guaranteeed at
+# the third position in all subsets as the first and second samples might be omitted) we need to
+# utilize the indices in the original tensor input.
+#
+# Similar results can be also observed here in terms of the replicability of the patterns.
diff --git a/setup.cfg b/setup.cfg
index ccf1eb0..217eefc 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -37,6 +37,7 @@ docs =
tensorly-sphinx-theme
plotly>=4.12
torch
+ scikit-learn
test =
pytest