Skip to content

Commit 2d29247

Browse files
committed
pages: Add overview table
1 parent aca4a6e commit 2d29247

File tree

3 files changed

+123
-3
lines changed

3 files changed

+123
-3
lines changed

.github/workflows/gh-pages.yml

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,15 @@ jobs:
2929
uses: actions/setup-python@v5
3030
with:
3131
python-version: 3.13
32+
- name: Install dependencies
33+
run: |
34+
cd src/python
35+
pip install --upgrade pip
36+
pip install -e .[site]
3237
- name: Build site
3338
run: src/python/build_site.py
39+
- name: Create overview
40+
run: bmp-create-overview --html-file=_site/index.html
3441
- name: Setup Pages
3542
uses: actions/configure-pages@v5
3643
- name: Upload artifact

src/python/benchmark_models_petab/overview.py

Lines changed: 115 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
import sympy as sp
1616
from sympy.core.relational import Relational
1717

18+
REPO_URL = "https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab/"
1819

1920
readme_md = Path(__file__).resolve().parent.parent / "README.md"
2021

@@ -324,19 +325,130 @@ def show_overview_table(
324325
print(df)
325326

326327

328+
def create_html_table(dest: Path) -> None:
329+
"""Create HTML table with stats for all benchmark PEtab problems.
330+
331+
:param dest: Path to the output HTML file.
332+
"""
333+
from bokeh.io import output_file, save
334+
from bokeh.layouts import column
335+
from bokeh.models import (
336+
ColumnDataSource,
337+
DataTable,
338+
TableColumn,
339+
Div,
340+
StringFormatter,
341+
NumberFormatter,
342+
HTMLTemplateFormatter,
343+
InlineStyleSheet,
344+
)
345+
346+
dest.parent.mkdir(parents=True, exist_ok=True)
347+
348+
# get the overview dataframe and prettify it
349+
df = get_overview_df()
350+
df["possible_discontinuities"] = df["possible_discontinuities"].apply(
351+
lambda x: "✓" if x else ""
352+
)
353+
df.fillna({"objective_prior_distributions": ""}, inplace=True)
354+
355+
def get_formatter(col: str):
356+
"""Get the appropriate formatter for the column."""
357+
if col not in df.columns:
358+
# index column
359+
return HTMLTemplateFormatter(
360+
template=f"""
361+
<a href="{REPO_URL}tree/master/Benchmark-Models/<%= value %>"><%= value %></a>
362+
"""
363+
)
364+
if pd.api.types.is_integer_dtype(df[col].dtype):
365+
return NumberFormatter(text_align="right")
366+
if col in ["reference_uris", "sbml4humans_urls"]:
367+
return HTMLTemplateFormatter(
368+
template="""
369+
<%
370+
if (Array.isArray(value)) {
371+
urls = value;
372+
} else {
373+
const sanitizedValue = value.replace(/'/g, '"');
374+
const urls = JSON.parse(sanitizedValue);
375+
console.log('Parsed JSON:', urls);
376+
}
377+
for (let i = 0; i < urls.length; i++) {
378+
%>
379+
<a href="<%= urls[i] %>" target="_blank"><%= urls[i] %></a>
380+
<% } %>
381+
"""
382+
)
383+
384+
return StringFormatter()
385+
386+
columns = [
387+
TableColumn(
388+
field=col,
389+
title=markdown_columns.get(col, col),
390+
formatter=get_formatter(col),
391+
width=len(col),
392+
)
393+
for col in df.reset_index().columns
394+
]
395+
396+
source = ColumnDataSource(df)
397+
398+
css = InlineStyleSheet(
399+
css="""
400+
.slick-header-column {
401+
background-color: #f4f4f4;
402+
font-weight: bold;
403+
}
404+
"""
405+
)
406+
407+
data_table = DataTable(
408+
source=source,
409+
columns=columns,
410+
sortable=True,
411+
sizing_mode="stretch_both",
412+
)
413+
data_table.stylesheets.append(css)
414+
415+
heading = Div(text="<h1>Benchmark Problems</h1>")
416+
preamble = Div(
417+
text=f"""
418+
<p>
419+
This table provides an overview of the benchmark problems
420+
available in the <a href="{REPO_URL}">Benchmark-Models-PEtab</a>
421+
repository.
422+
</p>
423+
"""
424+
)
425+
layout = column(heading, preamble, data_table, sizing_mode="stretch_both")
426+
output_file(dest, title="Benchmark Problems")
427+
save(layout)
428+
429+
327430
def main():
328431
parser = argparse.ArgumentParser(
329432
description="Show overview table for benchmark PEtab problems"
330433
)
331-
parser.add_argument(
434+
group1 = parser.add_mutually_exclusive_group()
435+
group1.add_argument(
332436
"--markdown", action="store_true", help="Output in markdown format"
333437
)
334-
parser.add_argument(
438+
group1.add_argument(
335439
"--update",
336440
action="store_true",
337441
help="Update the README.md file with the overview table",
338442
)
339443

444+
group2 = parser.add_mutually_exclusive_group()
445+
group2.add_argument(
446+
"--html-file", help="Output the overview table to an HTML file"
447+
)
448+
340449
args = parser.parse_args()
341450

342-
show_overview_table(markdown=args.markdown, update_readme=args.update)
451+
if args.html_file:
452+
create_html_table(dest=Path(args.html_file))
453+
else:
454+
show_overview_table(markdown=args.markdown, update_readme=args.update)

src/python/pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ dependencies = [
2828

2929
[project.optional-dependencies]
3030
dev = ["pre-commit", "pytest", "ruff"]
31+
site = ["bokeh>=3.7.3"]
3132

3233
[project.scripts]
3334
bmp-petablint = "benchmark_models_petab.check_petablint:main"

0 commit comments

Comments
 (0)