Skip to content

Commit decaae5

Browse files
authored
Merge pull request #24 from cchatzis/replicability
Added replicability check
2 parents 5b631e1 + 8c5591f commit decaae5

File tree

5 files changed

+252
-0
lines changed

5 files changed

+252
-0
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,7 @@ instance/
7070

7171
# Sphinx documentation
7272
docs/_build/
73+
docs/sg_execution_times.rst
7374

7475
# PyBuilder
7576
target/
191 KB
Loading

docs/refs.bib

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -240,4 +240,46 @@ @article{jackson1980principal
240240
pages={201--213},
241241
year={1980},
242242
publisher={Taylor \& Francis}
243+
}
244+
245+
@article{reprref1,
246+
author={Adali, Tülay and Kantar, Furkan and Akhonda, Mohammad Abu Baker Siddique and Strother, Stephen and Calhoun, Vince D. and Acar, Evrim},
247+
journal={IEEE Signal Processing Magazine},
248+
title={Reproducibility in Matrix and Tensor Decompositions: Focus on model match, interpretability, and uniqueness},
249+
year={2022},
250+
volume={39},
251+
number={4},
252+
pages={8-24},
253+
keywords={Problem-solving;Reproducibility of results;Data models;Matrix decomposition},
254+
doi={10.1109/MSP.2022.3163870}}
255+
256+
@article{reprref2,
257+
title={Characterizing human postprandial metabolic response using multiway data analysis},
258+
author={Yan, Shi and Li, Lu and Horner, David and Ebrahimi, Parvaneh and Chawes, Bo and Dragsted, Lars O and Rasmussen, Morten A and Smilde, Age K and Acar, Evrim},
259+
journal={Metabolomics},
260+
volume={20},
261+
number={3},
262+
pages={50},
263+
year={2024},
264+
publisher={Springer}
265+
}
266+
267+
@article{reprref3,
268+
author = {Fog Froriep Halberg, Helene and Bevilacqua, Marta and Rinnan, Åsmund},
269+
title = {Resampling as a Robust Measure of Model Complexity in PARAFAC Models},
270+
journal = {Journal of Chemometrics},
271+
volume = {38},
272+
number = {12},
273+
pages = {e3601},
274+
doi = {https://doi.org/10.1002/cem.3601},
275+
year = {2024}
276+
}
277+
278+
@article {reprref4,
279+
author = {Erd{\H o}s, Bal{\'a}zs and Chatzis, Christos and Thorsen, Jonathan and Stokholm, Jakob and Smilde, Age K. and Rasmussen, Morten A. and Acar, Evrim},
280+
title = {Extracting host-specific developmental signatures from longitudinal microbiome data},
281+
elocation-id = {2025.11.22.689760},
282+
year = {2025},
283+
doi = {10.1101/2025.11.22.689760},
284+
journal = {bioRxiv}
243285
}
Lines changed: 208 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,208 @@
1+
"""
2+
.. _replicability_analysis:
3+
4+
Replicability analysis
5+
----------------
6+
7+
This example desrcibes how replicability of patterns can be used to guide the component selection process for PARAFAC models :cite:p:`reprref1, reprref2, reprref3`.
8+
9+
This process evaluates the consistency of the uncovered patterns by fitting the model to different subsets of the data. The rationale is that if the appropriate number of components is used, the uncovered patterns should be consistent. This can be seen as an extension of `split-half analysis <https://tensorly.org/viz/stable/auto_examples/plot_split_half_analysis.html>`_ where a higher number of smaller subsets of the input are removed.
10+
"""
11+
12+
###############################################################################
13+
# Imports and utilities
14+
# ^^^^^^^^^^^^^^^^^^^^^
15+
16+
import matplotlib.pyplot as plt
17+
import numpy as np
18+
import tensorly as tl
19+
from tensorly.decomposition import parafac
20+
21+
import sklearn
22+
from sklearn.model_selection import RepeatedKFold
23+
24+
import tlviz
25+
26+
rng = np.random.default_rng(1)
27+
28+
###############################################################################
29+
# To fit PARAFAC models, we need to solve a non-convex optimization problem, possibly with local minima. It is
30+
# therefore useful to fit several models with the same number of components using many different random
31+
# initialisations.
32+
33+
34+
def fit_many_parafac(X, num_components, num_inits=5):
35+
return [
36+
parafac(
37+
X,
38+
num_components,
39+
n_iter_max=1000,
40+
tol=1e-8,
41+
init="random",
42+
linesearch=True,
43+
random_state=i,
44+
)
45+
for i in range(num_inits)
46+
]
47+
48+
49+
###############################################################################
50+
# Creating simulated data
51+
# ^^^^^^^^^^^^^^^^^^^^^^^
52+
#
53+
# We start with some simulated data, since then, we know exactly how many components there are in the data.
54+
55+
cp_tensor, dataset = tlviz.data.simulated_random_cp_tensor((30, 40, 25), 3, noise_level=0.3, labelled=True)
56+
57+
###############################################################################
58+
# .. figure:: /_static/notebook_figures/replicability.jpg
59+
# Illustration of the replicability check, taken from :cite:p:`reprref3`.
60+
#
61+
62+
###############################################################################
63+
# The replicability analysis boils down to the following steps:
64+
#
65+
# 1. Split the data in a (user-chosen) mode into :math:`N` folds (user-chosen).
66+
# 2. Create :math:`N` subsets by subtracting each fold from the complete dataset.
67+
# 3. Fit multiple initializations to each subset and choose the *best* run
68+
# according to lowest loss (total of :math:`N` *best* runs).
69+
# 4. Compare, in terms of FMS, the best runs across the different subsets
70+
# to evaluate the replicability of the uncovered patterns (:math:`\binom{N}{2}` comparisons).
71+
# 5. Repeat the above process :math:`M` times (user-chosen), to find a total of
72+
# :math:`M \binom{N}{2}` comparisons.
73+
74+
75+
###############################################################################
76+
# Splitting the data
77+
# ^^^^^^^^^^^^^^^^^^
78+
#
79+
80+
splits = 5 # N
81+
repeats = 10 # M
82+
83+
models = {}
84+
split_indices = {} # Keeps track of which indices are used in each subset
85+
86+
for rank in [2, 3, 4, 5]:
87+
88+
print(f"{rank} components")
89+
90+
rskf = RepeatedKFold(n_splits=splits, n_repeats=repeats, random_state=1)
91+
92+
models[rank] = [[] for _ in range(repeats)]
93+
split_indices[rank] = [[] for _ in range(repeats)]
94+
95+
for split_no, (train_index, _) in enumerate(rskf.split(dataset)):
96+
repeat_no = split_no // splits
97+
98+
train = dataset[train_index]
99+
100+
train = train / tl.norm(train) # Pre-process the tensor without leaking info from other folds
101+
102+
current_models = fit_many_parafac(train.data, rank)
103+
current_model = tlviz.multimodel_evaluation.get_model_with_lowest_error(current_models, train)
104+
105+
models[rank][repeat_no].append(current_model)
106+
107+
split_indices[rank][repeat_no].append(train_index) # Keeping track of the indices of each fold
108+
109+
110+
###############################################################################
111+
# Often, the mode one will be splitting within refers to different samples
112+
# Depending on the use-case, it might be deemed reasonable to retain the
113+
# distributions of some properties in each subset. For this goal,
114+
# `RepeatedStratifiedKFold <https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RepeatedStratifiedKFold.html#sklearn.model_selection.RepeatedStratifiedKFold>`_
115+
# can be used.
116+
#
117+
# Each subset might require certain pre-processing. It is important to pre-process
118+
# each subset in isolation to avoid leaking information from the omitted part of the input.
119+
# For example, in this case we normalize each subset to unit norm independently.
120+
# Also, notice that ``for train_index, _ in rskf.split(dataset):`` is embarrassingly parallel.
121+
122+
###############################################################################
123+
# Computing and plotting factor similarity
124+
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
125+
# Here, we are skipping the mode we split (``mode=0``).
126+
127+
replicability_stability = {}
128+
for rank in models.keys():
129+
replicability_stability[rank] = []
130+
for repeat_no, current_models in enumerate(models[rank]):
131+
for i, cp_i in enumerate(current_models):
132+
for j, cp_j in enumerate(current_models):
133+
if i >= j: # include every pair only once and omit i == j
134+
continue
135+
fms = tlviz.factor_tools.factor_match_score(cp_i, cp_j, consider_weights=False, skip_mode=0)
136+
replicability_stability[rank].append(fms)
137+
138+
ranks = sorted(replicability_stability.keys())
139+
data = [np.ravel(replicability_stability[r]) for r in ranks]
140+
141+
fig, ax = plt.subplots()
142+
ax.axhline(0.9, linestyle="--", color="gray")
143+
ax.boxplot(data, positions=ranks)
144+
ax.set_xlabel("Number of components")
145+
ax.set_ylabel("Replicability stability")
146+
plt.show()
147+
148+
###############################################################################
149+
# Here, we can observe that over-estimating the number of components
150+
# results in not replicable patterns, indicated by low FMS.
151+
152+
###############################################################################
153+
# Computing and plotting factor similarity (alt.)
154+
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
155+
# There is an alternative way to estimate the replicability of the uncovered patterns
156+
# that includes the mode we are splitting within :cite:p:`reprref4`. When comparing two factorizations in
157+
# terms of FMS, we can include the previously skipped factor by using only the indices
158+
# present in both subsets.
159+
160+
replicability_stability_alt = {}
161+
for rank in models.keys():
162+
replicability_stability_alt[rank] = []
163+
for repeat_no, current_models in enumerate(models[rank]):
164+
for i, cp_i in enumerate(current_models):
165+
for j, cp_j in enumerate(current_models):
166+
if i >= j: # include every pair only once and omit i == j
167+
continue
168+
169+
weights_i, (A_i, B_i, C_i) = cp_i
170+
weights_j, (A_j, B_j, C_j) = cp_j
171+
172+
indices_subset_i = list(split_indices[rank][repeat_no][i])
173+
indices_subset_j = list(split_indices[rank][repeat_no][j])
174+
175+
common_indices = list(set(indices_subset_i).intersection(set(indices_subset_j)))
176+
177+
indices2use_i = []
178+
indices2use_j = []
179+
180+
for common_idx in common_indices:
181+
indices2use_i.append(indices_subset_i.index(common_idx))
182+
indices2use_j.append(indices_subset_j.index(common_idx))
183+
184+
A_i = A_i[indices2use_i, :]
185+
A_j = A_j[indices2use_j, :]
186+
187+
fms = tlviz.factor_tools.factor_match_score(
188+
(weights_i, (A_i, B_i, C_i)), (weights_j, (A_j, B_j, C_j)), consider_weights=False
189+
)
190+
replicability_stability_alt[rank].append(fms)
191+
192+
ranks = sorted(replicability_stability_alt.keys())
193+
data = [np.ravel(replicability_stability_alt[r]) for r in ranks]
194+
195+
fig, ax = plt.subplots()
196+
ax.axhline(0.9, linestyle="--", color="gray")
197+
ax.boxplot(data, positions=ranks)
198+
ax.set_xlabel("Number of components")
199+
ax.set_ylabel("Replicability stability")
200+
plt.show()
201+
202+
###############################################################################
203+
# ``common_indices`` contains the indices (e.g. samples) present in both subsets,
204+
# but since the position of each index can change (e.g. sample no 3 is not guaranteeed at
205+
# the third position in all subsets as the first and second samples might be omitted) we need to
206+
# utilize the indices in the original tensor input.
207+
#
208+
# Similar results can be also observed here in terms of the replicability of the patterns.

setup.cfg

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ docs =
3737
tensorly-sphinx-theme
3838
plotly>=4.12
3939
torch
40+
scikit-learn
4041

4142
test =
4243
pytest

0 commit comments

Comments
 (0)