Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 1 addition & 4 deletions extras/benchmark/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
# SPDX-License-Identifier: MPL-2.0

[project]
name = ""
name = "traccc-bench-tools"
version = "0.0.1"
requires-python = "~=3.9"
dependencies = [
Expand All @@ -15,9 +15,6 @@ dependencies = [
[dependency-groups]
dev = ["black>=25.1.0,<26"]

[tool.uv]
package = false

[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
5 changes: 5 additions & 0 deletions extras/cut_optimiser/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
*.csv
*.csv.bak
*.json
.venv/
!example.json
18 changes: 18 additions & 0 deletions extras/cut_optimiser/example.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
{
"input": {
"event_dir": "/data/Acts/odd-simulations-20240506/geant4_ttbar_mu200",
"digitization_file": "geometries/odd/odd-digi-geometric-config.json",
"detector_file": "geometries/odd/odd-detray_geometry_detray.json",
"grid_file": "geometries/odd/odd-detray_surface_grids_detray.json",
"material_file": "geometries/odd/odd-detray_material_detray.json"
},
"config": {
"truth-finding-min-track-candidates": 7,
"track-candidates-range": "7:100"
},
"parameters": {
"max-num-branches-per-surface": [1, 2, 3],
"max-num-skipping-per-cand": [2, 3],
"chi2-max": [2.5, 5.0, 7.5, 10.0, 15.0, 20.0]
}
}
97 changes: 97 additions & 0 deletions extras/cut_optimiser/find_pareto_set.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
import argparse
import csv
import logging
import pathlib


log = logging.getLogger("find_pareto_set")


def main():
parser = argparse.ArgumentParser()

parser.add_argument(
"db",
type=pathlib.Path,
help="the CSV database file",
)

parser.add_argument(
"-v",
"--verbose",
help="enable verbose output",
action="store_true",
)

args = parser.parse_args()

logging.basicConfig(
level=logging.DEBUG if (args.verbose or False) else logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)

results = []

total_results = 0

with open(args.db, "r") as f:
reader = csv.DictReader(f)
for i in reader:
total_results += 1
if i["success"] != "0":
results.append({k: float(v) for k, v in i.items()})

log.info(
"Database contained %d results of which %d are valid",
total_results,
len(results),
)

pareto_set = []

for i, m in enumerate(results):
for j, n in enumerate(results):
if i == j:
continue

if (
n["rec_throughput"] <= m["rec_throughput"]
and n["efficiency"] >= m["efficiency"]
and n["fake_rate"] <= m["fake_rate"]
and n["duplicate_rate"] <= m["duplicate_rate"]
):
log.debug(
"Removing %s from the Pareto set because %s is superior",
str(n),
str(m),
)
break
else:
pareto_set.append(m)

log.info("Pareto set contains %d elements:", len(pareto_set))

for i in sorted(pareto_set, key=lambda x: x["rec_throughput"], reverse=True):
log.info(
" Eff. %.2f, fake rate %.2f, duplicate rate %.2f with reciprocal througput %.1fms is achieved by setup {%s}",
100.0 * i["efficiency"],
i["fake_rate"],
i["duplicate_rate"],
i["rec_throughput"] * 1000.0,
", ".join(
"%s: %s" % (k, str(v))
for k, v in i.items()
if k
not in [
"efficiency",
"fake_rate",
"duplicate_rate",
"rec_throughput",
"success",
]
),
)


if __name__ == "__main__":
main()
Loading
Loading