Skip to content

Commit 975afa6

Browse files
committed
CLI first commit (drevalpy --help and drevalpy-report --help
1 parent 7540bb0 commit 975afa6

File tree

5 files changed

+225
-123
lines changed

5 files changed

+225
-123
lines changed

create_report.py

Lines changed: 0 additions & 115 deletions
This file was deleted.

drevalpy/cli.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
"""Main script to run the drug response evaluation pipeline."""
2+
3+
from drevalpy.utils import get_parser, main
4+
5+
6+
def cli_main():
7+
"""Command line interface entry point for the drug response evaluation pipeline."""
8+
args = get_parser().parse_args()
9+
main(args)
10+
11+
12+
if __name__ == "__main__":
13+
cli_main()
Lines changed: 208 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,208 @@
1+
"""Generate evaluation reports after running a drug response experiment."""
2+
3+
import argparse
4+
import os
5+
import pathlib
6+
from collections.abc import Iterable
7+
from typing import Union
8+
9+
import pandas as pd
10+
11+
from drevalpy.visualization.utils import (
12+
create_html,
13+
create_index_html,
14+
create_output_directories,
15+
draw_algorithm_plots,
16+
draw_test_mode_plots,
17+
parse_results,
18+
prep_results,
19+
write_results,
20+
)
21+
22+
23+
def generate_reports_for_test_mode(
24+
test_mode: str,
25+
evaluation_results: pd.DataFrame,
26+
evaluation_results_per_drug: pd.DataFrame,
27+
evaluation_results_per_cell_line: pd.DataFrame,
28+
true_vs_pred: pd.DataFrame,
29+
run_id: str,
30+
path_data: Union[str, pathlib.Path],
31+
result_path: Union[str, pathlib.Path],
32+
) -> None:
33+
"""
34+
Generate reports (plots and HTML) for a single test mode.
35+
36+
:param test_mode: The test mode to generate reports for.
37+
:param evaluation_results: Aggregated evaluation results.
38+
:param evaluation_results_per_drug: Evaluation results per drug.
39+
:param evaluation_results_per_cell_line: Evaluation results per cell line.
40+
:param true_vs_pred: True vs predicted values.
41+
:param run_id: Unique run identifier.
42+
:param path_data: Path to the dataset directory.
43+
:param result_path: Path to the results directory.
44+
"""
45+
print(f"Generating report for {test_mode} ...")
46+
unique_algos: list[str] = draw_test_mode_plots(
47+
test_mode=test_mode,
48+
ev_res=evaluation_results,
49+
ev_res_per_drug=evaluation_results_per_drug,
50+
ev_res_per_cell_line=evaluation_results_per_cell_line,
51+
custom_id=run_id,
52+
path_data=path_data,
53+
result_path=result_path,
54+
)
55+
unique_algos_set = set(unique_algos) - {
56+
"NaiveMeanEffectsPredictor",
57+
"NaivePredictor",
58+
"NaiveCellLineMeansPredictor",
59+
"NaiveTissueMeansPredictor",
60+
"NaiveDrugMeanPredictor",
61+
}
62+
for algorithm in unique_algos_set:
63+
draw_algorithm_plots(
64+
model=algorithm,
65+
ev_res=evaluation_results,
66+
ev_res_per_drug=evaluation_results_per_drug,
67+
ev_res_per_cell_line=evaluation_results_per_cell_line,
68+
t_vs_p=true_vs_pred,
69+
test_mode=test_mode,
70+
custom_id=run_id,
71+
result_path=result_path,
72+
)
73+
74+
all_files: list[str] = []
75+
for _, _, files in os.walk(f"{result_path}/{run_id}"):
76+
for file in files:
77+
if file.endswith("json") or (
78+
file.endswith(".html") and file not in ["index.html", "LPO.html", "LCO.html", "LDO.html"]
79+
):
80+
all_files.append(file)
81+
82+
create_html(
83+
run_id=run_id,
84+
test_mode=test_mode,
85+
files=all_files,
86+
prefix_results=f"{result_path}/{run_id}",
87+
)
88+
89+
90+
def generate_reports_for_all_test_modes(
91+
test_modes: Iterable[str],
92+
evaluation_results: pd.DataFrame,
93+
evaluation_results_per_drug: pd.DataFrame,
94+
evaluation_results_per_cell_line: pd.DataFrame,
95+
true_vs_pred: pd.DataFrame,
96+
run_id: str,
97+
path_data: Union[str, pathlib.Path],
98+
result_path: Union[str, pathlib.Path],
99+
) -> None:
100+
"""
101+
Generate reports for all test modes.
102+
103+
:param test_modes: Iterable of test modes to process.
104+
:param evaluation_results: Aggregated evaluation results.
105+
:param evaluation_results_per_drug: Evaluation results per drug.
106+
:param evaluation_results_per_cell_line: Evaluation results per cell line.
107+
:param true_vs_pred: True vs predicted values.
108+
:param run_id: Unique run identifier.
109+
:param path_data: Path to the dataset directory.
110+
:param result_path: Path to the results directory.
111+
"""
112+
for test_mode in test_modes:
113+
generate_reports_for_test_mode(
114+
test_mode=test_mode,
115+
evaluation_results=evaluation_results,
116+
evaluation_results_per_drug=evaluation_results_per_drug,
117+
evaluation_results_per_cell_line=evaluation_results_per_cell_line,
118+
true_vs_pred=true_vs_pred,
119+
run_id=run_id,
120+
path_data=path_data,
121+
result_path=result_path,
122+
)
123+
124+
125+
def render_report(
126+
run_id: str,
127+
dataset: str,
128+
path_data: Union[str, pathlib.Path] = "data",
129+
result_path: Union[str, pathlib.Path] = "results",
130+
) -> None:
131+
"""
132+
Render a full evaluation report pipeline.
133+
134+
:param run_id: Unique run identifier for locating results.
135+
:param dataset: Dataset name to filter results.
136+
:param path_data: Path to the dataset directory. Defaults to "data".
137+
:param result_path: Path to the results directory. Defaults to "results".
138+
139+
:raises AssertionError: If the folder with the run_id does not exist under result_path.
140+
"""
141+
path_data = pathlib.Path(path_data).resolve()
142+
result_path = pathlib.Path(result_path).resolve()
143+
144+
if not os.path.exists(f"{result_path}/{run_id}"):
145+
raise AssertionError(f"Folder {result_path}/{run_id} does not exist. The pipeline has to be run first.")
146+
147+
(
148+
evaluation_results,
149+
evaluation_results_per_drug,
150+
evaluation_results_per_cell_line,
151+
true_vs_pred,
152+
) = parse_results(path_to_results=f"{result_path}/{run_id}", dataset=dataset)
153+
154+
(
155+
evaluation_results,
156+
evaluation_results_per_drug,
157+
evaluation_results_per_cell_line,
158+
true_vs_pred,
159+
) = prep_results(
160+
evaluation_results, evaluation_results_per_drug, evaluation_results_per_cell_line, true_vs_pred, path_data
161+
)
162+
163+
write_results(
164+
path_out=f"{result_path}/{run_id}/",
165+
eval_results=evaluation_results,
166+
eval_results_per_drug=evaluation_results_per_drug,
167+
eval_results_per_cl=evaluation_results_per_cell_line,
168+
t_vs_p=true_vs_pred,
169+
)
170+
171+
create_output_directories(result_path, run_id)
172+
test_modes = evaluation_results["test_mode"].unique()
173+
174+
generate_reports_for_all_test_modes(
175+
test_modes=test_modes,
176+
evaluation_results=evaluation_results,
177+
evaluation_results_per_drug=evaluation_results_per_drug,
178+
evaluation_results_per_cell_line=evaluation_results_per_cell_line,
179+
true_vs_pred=true_vs_pred,
180+
run_id=run_id,
181+
path_data=path_data,
182+
result_path=result_path,
183+
)
184+
185+
create_index_html(
186+
custom_id=run_id,
187+
test_modes=test_modes,
188+
prefix_results=f"{result_path}/{run_id}",
189+
)
190+
191+
192+
def main() -> None:
193+
"""
194+
Command line interface entry point for rendering evaluation reports.
195+
196+
Parses command line arguments and calls render_report.
197+
"""
198+
parser = argparse.ArgumentParser(description="Generate reports from evaluation results")
199+
parser.add_argument("--run_id", required=True, help="Run ID for the current execution")
200+
parser.add_argument("--dataset", required=True, help="Dataset name for which to render the result file")
201+
parser.add_argument("--path_data", default="data", help="Path to the data")
202+
parser.add_argument("--result_path", default="results", help="Path to the results")
203+
args = parser.parse_args()
204+
render_report(args.run_id, args.dataset, args.path_data, args.result_path)
205+
206+
207+
if __name__ == "__main__":
208+
main()

pyproject.toml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,10 @@ authors = ["DrEvalPy development team"]
66
license = "GPL-3.0"
77
readme = "README.md"
88

9+
[tool.poetry.scripts]
10+
drevalpy = "drevalpy.cli:cli_main"
11+
drevalpy-report = "drevalpy.visualization.create_report:main"
12+
913
[tool.poetry.dependencies]
1014
python = ">=3.11,<3.14"
1115
numpy = ">=1.20"

run_suite.py

Lines changed: 0 additions & 8 deletions
This file was deleted.

0 commit comments

Comments
 (0)