Skip to content

Commit eb5068b

Browse files
authored
Merge pull request #34 from Aharoni-Lab/restructure-tests
Restructure Testing Framework
2 parents 42a9fe0 + ee1767c commit eb5068b

File tree

79 files changed

+807
-211896
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

79 files changed

+807
-211896
lines changed

.github/workflows/ci.yml

Lines changed: 59 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -10,17 +10,15 @@ jobs:
1010
format:
1111
runs-on: ubuntu-latest
1212
steps:
13-
- uses: actions/checkout@v3
14-
with:
15-
ref: ${{ github.head_ref }}
13+
- uses: actions/checkout@v4
1614

1715
- name: Set up Python
18-
uses: actions/setup-python@v4
16+
uses: actions/setup-python@v5
1917
with:
2018
python-version: "3.11"
2119

2220
- name: Cache PDM dependencies
23-
uses: actions/cache@v3
21+
uses: actions/cache@v4
2422
with:
2523
path: |
2624
~/.cache/pdm
@@ -36,31 +34,32 @@ jobs:
3634
3735
- name: Install dependencies
3836
run: |
39-
pdm install --no-lock -G test
37+
pdm install --check --no-lock -G test
4038
41-
- name: Run black
39+
- name: Check code formatting (Black)
40+
id: black
4241
run: |
43-
pdm run black .
42+
pdm run black --check .
4443
45-
- name: Commit changes
46-
uses: stefanzweifel/git-auto-commit-action@v4
47-
with:
48-
commit_message: "style: format code with black"
49-
branch: ${{ github.head_ref }}
44+
- name: Help - Formatting
45+
if: failure() && steps.black.outcome == 'failure'
46+
run: |
47+
echo "::error::❌ Code formatting check failed."
48+
echo "To fix this, run the following command locally:"
49+
echo " pdm run black ."
5050
5151
lint:
52-
needs: format
5352
runs-on: ubuntu-latest
5453
steps:
55-
- uses: actions/checkout@v3
54+
- uses: actions/checkout@v4
5655

5756
- name: Set up Python
58-
uses: actions/setup-python@v4
57+
uses: actions/setup-python@v5
5958
with:
6059
python-version: "3.11"
6160

6261
- name: Cache PDM dependencies
63-
uses: actions/cache@v3
62+
uses: actions/cache@v4
6463
with:
6564
path: |
6665
~/.cache/pdm
@@ -74,43 +73,58 @@ jobs:
7473
python -m pip install --upgrade pip
7574
pip install pdm
7675
77-
- name: Lock dependencies
78-
run: |
79-
pdm fix
80-
pdm lock --group :all
81-
8276
- name: Install dependencies
8377
run: |
84-
pdm install --no-lock -G test
78+
pdm install --check --no-lock -G test
8579
8680
- name: Run flake8
81+
id: flake8
8782
run: |
8883
pdm run flake8
8984
85+
- name: Help - Flake8
86+
if: failure() && steps.flake8.outcome == 'failure'
87+
run: |
88+
echo "::error::❌ Flake8 check failed."
89+
echo "To fix this, run the following command locally:"
90+
echo " pdm run flake8"
91+
92+
- name: Run mypy
93+
id: mypy
94+
continue-on-error: true # Allow failure until codebase is fully typed
95+
run: |
96+
pdm run mypy .
97+
98+
- name: Help - Mypy
99+
if: steps.mypy.outcome == 'failure'
100+
run: |
101+
echo "::warning::⚠️ Mypy check failed (non-blocking)."
102+
echo "To view these errors locally, run:"
103+
echo " pdm run mypy ."
104+
90105
test:
91-
needs: lint
92106
runs-on: ubuntu-latest
93-
timeout-minutes: 30 # Add timeout to prevent hanging
107+
timeout-minutes: 30
94108
strategy:
95109
fail-fast: false
96110
matrix:
97111
python-version: ["3.11"]
98112

99113
steps:
100-
- uses: actions/checkout@v3
114+
- uses: actions/checkout@v4
101115

102116
- name: Set up Python ${{ matrix.python-version }}
103-
uses: actions/setup-python@v4
117+
uses: actions/setup-python@v5
104118
with:
105119
python-version: ${{ matrix.python-version }}
106120

107121
- name: Cache PDM dependencies
108-
uses: actions/cache@v3
122+
uses: actions/cache@v4
109123
with:
110124
path: |
111125
~/.cache/pdm
112126
.pdm-build
113-
key: ${{ runner.os }}-pdm-${{ hashFiles('**/pdm.lock') }}-${{ matrix.python-version }}
127+
key: ${{ runner.os }}-pdm-${{ matrix.python-version }}-${{ hashFiles('**/pdm.lock') }}
114128
restore-keys: |
115129
${{ runner.os }}-pdm-${{ matrix.python-version }}-
116130
@@ -119,28 +133,30 @@ jobs:
119133
python -m pip install --upgrade pip
120134
pip install pdm
121135
122-
- name: Lock dependencies
123-
run: |
124-
pdm fix
125-
pdm lock --group :all
126-
127136
- name: Install dependencies
128137
run: |
129-
pdm install --no-lock -G test
138+
pdm install --check --no-lock -G test
130139
131-
- name: Run tests
132-
run: |
133-
pdm run pytest tests/ -v -m "not slow" -n auto --cov=indeca --cov-report=xml
134-
pdm run plot_test
140+
- name: Run Unit Tests
141+
run: pdm run pytest tests/unit -v -n auto --cov=indeca --cov-report=xml
142+
143+
- name: Run Integration & Regression Tests
144+
run: pdm run pytest tests/integration tests/regression -v -n auto -m "not slow" --cov=indeca --cov-report=xml --cov-append
145+
146+
- name: Run Other Tests (Robustness, Validation, Demo)
147+
run: pdm run pytest tests/robustness tests/validation tests/demo -v -n auto -m "not slow" --cov=indeca --cov-report=xml --cov-append
148+
149+
- name: Generate Plot Artifacts
150+
run: pdm run plot_test
135151

136152
- uses: actions/upload-artifact@v4
137-
continue-on-error: true
153+
if: always() # Upload artifacts even if tests fail
138154
with:
139-
name: test_output
155+
name: test_output_${{ matrix.python-version }}
140156
path: tests/output
141157

142158
- name: Upload coverage to Codecov
143-
uses: codecov/codecov-action@v3
159+
uses: codecov/codecov-action@v4
144160
with:
145161
file: ./coverage.xml
146-
fail_ci_if_error: false # Don't fail if coverage upload fails
162+
fail_ci_if_error: false

benchmarks/routine/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
from scipy.spatial.distance import cdist
1313
from sklearn.metrics.pairwise import cosine_similarity
1414

15-
from indeca.deconv import max_thres
15+
from indeca.core.deconv import max_thres
1616
from indeca.utils import norm
1717

1818

benchmarks/s00_benchmark_realds.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
from routine.utils import compute_ROC
1515

1616
from indeca import set_package_log_level
17-
from indeca.deconv import construct_R
17+
from indeca.core.deconv import construct_R
1818
from indeca.pipeline import pipeline_bin
1919
from tests.testing_utils.io import download_realds, load_gt_ds
2020

benchmarks/s00_simulate_data.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
import numpy as np
66

7-
from indeca.simulation import generate_data
7+
from indeca.core.simulation import generate_data
88

99
OUT_PATH = "./intermediate/simulated/"
1010
PARAM_TAU_D = 6

benchmarks/s01_benchmark_AR.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,14 +7,14 @@
77
import xarray as xr
88
from tqdm.auto import tqdm
99

10-
from indeca.AR_kernel import (
10+
from indeca.core.AR_kernel import (
1111
convolve_g,
1212
convolve_h,
1313
estimate_coefs,
1414
solve_fit_h,
1515
solve_g,
1616
)
17-
from indeca.simulation import AR2exp, eval_exp, find_dhm, tau2AR
17+
from indeca.core.simulation import AR2exp, eval_exp, find_dhm, tau2AR
1818
from indeca.utils import scal_like
1919

2020
IN_PATH = "./intermediate/simulated/simulated.nc"

benchmarks/s01_benchmark_bin.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,8 @@
1616
)
1717
from tqdm.auto import tqdm
1818

19-
from indeca.AR_kernel import estimate_coefs
20-
from indeca.deconv import construct_G, construct_R, max_thres
19+
from indeca.core.AR_kernel import estimate_coefs
20+
from indeca.core.deconv import construct_G, construct_R, max_thres
2121
from tests.testing_utils.cnmf import solve_deconv
2222

2323
IN_PATH = {

benchmarks/s01_benchmark_pipeline.py

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
from routine.utils import compute_ROC
2424

2525
from indeca.pipeline import pipeline_bin
26-
from indeca.simulation import exp_pulse
26+
from indeca.core.simulation import exp_pulse
2727
from tests.testing_utils.cnmf import pipeline_cnmf
2828

2929
IN_PATH = {
@@ -325,8 +325,11 @@
325325
from plotly.subplots import make_subplots
326326
from scipy.linalg import convolution_matrix
327327

328-
from indeca.simulation import ar_pulse, exp_pulse, tau2AR
329-
from indeca.update_bin import max_thres, scal_lstsq, solve_deconv
328+
from indeca.core.simulation import ar_pulse, exp_pulse, tau2AR
329+
from indeca.core.deconv import max_thres
330+
331+
# from indeca.core.deconv import solve_deconv # TODO: missing function
332+
from indeca.utils.utils import scal_lstsq
330333

331334
uid = 0
332335
iiter = 0
@@ -422,9 +425,12 @@
422425
from plotly.subplots import make_subplots
423426
from scipy.linalg import convolution_matrix
424427

425-
from indeca.AR_kernel import solve_fit_h
426-
from indeca.simulation import ar_pulse, exp_pulse, tau2AR
427-
from indeca.update_bin import max_thres, scal_lstsq, solve_deconv, solve_deconv_l0
428+
from indeca.core.AR_kernel import solve_fit_h
429+
from indeca.core.simulation import ar_pulse, exp_pulse, tau2AR
430+
from indeca.core.deconv import max_thres
431+
432+
# from indeca.core.deconv import solve_deconv, solve_deconv_l0 # TODO: missing functions
433+
from indeca.utils.utils import scal_lstsq
428434

429435
uid = 0
430436
iiter = 0

pyproject.toml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,11 @@ addopts = "-n auto -v --cov=indeca --cov-report=term-missing"
7373
markers = [
7474
"slow: marks tests as slow (deselect with '-m \"not slow\"')",
7575
"integration: marks tests as integration tests",
76+
"unit: marks tests as unit tests",
77+
"regression: marks tests as regression tests",
78+
"robustness: marks tests as robustness tests",
79+
"validation: marks tests as validation tests",
80+
"demo: marks tests as demo tests",
7681
]
7782

7883
[build-system]

tests/demo/__init__.py

Whitespace-only changes.
Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
import numpy as np
2+
import pandas as pd
3+
import pytest
4+
5+
from indeca.core.deconv import DeconvBin
6+
from tests.conftest import fixt_deconv
7+
from tests.testing_utils.metrics import compute_f1_metrics, df_assign_metadata
8+
from tests.testing_utils.plotting import plot_met_ROC_thres
9+
10+
pytestmark = pytest.mark.demo
11+
12+
13+
@pytest.mark.slow
14+
class TestDemoDeconv:
15+
@pytest.mark.parametrize("taus", [(6, 1), (10, 3)])
16+
@pytest.mark.parametrize("rand_seed", np.arange(15))
17+
@pytest.mark.parametrize("upsamp", [1, 2])
18+
@pytest.mark.parametrize("ns_lev", [0, 0.2, 0.5])
19+
@pytest.mark.parametrize("y_scaling", [False])
20+
def test_demo_solve_penal(
21+
self, taus, rand_seed, upsamp, ns_lev, y_scaling, test_fig_path_svg, results_bag
22+
):
23+
# act
24+
deconv, y, c, c_org, s, s_org, scale = fixt_deconv(
25+
taus=taus,
26+
rand_seed=rand_seed,
27+
upsamp=upsamp,
28+
ns_lev=ns_lev,
29+
y_scaling=y_scaling,
30+
)
31+
_, _, _, _, intm_free = deconv.solve_thres(
32+
scaling=False, amp_constraint=False, return_intm=True
33+
)
34+
_, _, _, _, intm_nopn = deconv.solve_thres(scaling=True, return_intm=True)
35+
_, _, _, _, opt_penal, intm_pn = deconv.solve_penal(
36+
scaling=True, return_intm=True
37+
)
38+
# save results
39+
intms = {"CNMF": intm_free, "No Penalty": intm_nopn, "Penalty": intm_pn}
40+
metdf = []
41+
for grp, cur_intm in intms.items():
42+
if grp == "Penalty":
43+
cur_svals = []
44+
oidx = intm_pn[7]
45+
for sv in intm_pn[2]:
46+
s_pad = np.zeros(deconv.T)
47+
s_pad[deconv.nzidx_s] = sv
48+
cur_svals.append(s_pad)
49+
else:
50+
cur_svals = cur_intm[2]
51+
cur_met = compute_f1_metrics(
52+
s_org,
53+
cur_svals,
54+
{
55+
"group": grp,
56+
"thres": cur_intm[1],
57+
"scals": cur_intm[5],
58+
"objs": cur_intm[6],
59+
"penal": opt_penal if grp == "Penalty" else 0,
60+
"opt_idx": cur_intm[7],
61+
},
62+
tdist_thres=2,
63+
)
64+
metdf.append(cur_met)
65+
metdf = pd.concat(metdf, ignore_index=True)
66+
metdf = df_assign_metadata(
67+
metdf,
68+
{"tau_d": taus[0], "tau_r": taus[1]},
69+
)
70+
results_bag.data = metdf
71+
# plotting
72+
fig = plot_met_ROC_thres(metdf, grad_color=False)
73+
fig.savefig(test_fig_path_svg)
74+
# assertion
75+
if ns_lev == 0 and upsamp == 1:
76+
assert (cur_svals[oidx][:-1] == s[:-1]).all()

0 commit comments

Comments
 (0)