Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 39 additions & 0 deletions benchmarks/data/group_size_scaling.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
N_PARTICLES=13824
msm_6: 72600.0 +/- 500.0 particles/sec
msm_8: 58846 +/- 0.0 particles/sec
msm_12: 40047 +/- 0.0 particles/sec
Cs (order=2): 52402.4 +/- 1114.4 particles/sec
Ch (order=2): 52591.7 +/- 397.6 particles/sec
Ci (order=2): 52601.6 +/- 382.0 particles/sec
C2 (order=2): 52655.2 +/- 230.0 particles/sec
C3 (order=3): 52935.4 +/- 471.9 particles/sec
C4 (order=4): 52179.4 +/- 1947.9 particles/sec
C5 (order=5): 51936.4 +/- 529.7 particles/sec
C6 (order=6): 52050.1 +/- 444.7 particles/sec
C2h (order=4): 52870.7 +/- 160.4 particles/sec
C3h (order=6): 52792.1 +/- 282.4 particles/sec
C4h (order=8): 52282.2 +/- 437.6 particles/sec
C2v (order=4): 53220.6 +/- 454.7 particles/sec
C3v (order=6): 52633.7 +/- 912.1 particles/sec
C4v (order=8): 52363.0 +/- 500.1 particles/sec
C5v (order=10): 51678.3 +/- 267.2 particles/sec
C6v (order=12): 51448.8 +/- 483.5 particles/sec
S4 (order=4): 52861.1 +/- 628.6 particles/sec
S6 (order=6): 53297.4 +/- 213.5 particles/sec
D2 (order=4): 53136.5 +/- 258.3 particles/sec
D3 (order=6): 52671.2 +/- 525.9 particles/sec
D4 (order=8): 52104.9 +/- 327.4 particles/sec
D2h (order=8): 53101.3 +/- 182.4 particles/sec
D2d (order=8): 52123.0 +/- 445.4 particles/sec
C5v (order=10): 51678.3 +/- 267.2 particles/sec
D5 (order=10): 51652.8 +/- 306.0 particles/sec
C6v (order=12): 51448.8 +/- 483.5 particles/sec
D6 (order=12): 51443.9 +/- 272.7 particles/sec
D3h (order=12): 51583.2 +/- 741.5 particles/sec
D3d (order=12): 50946.7 +/- 658.7 particles/sec
T (order=12): 50904.8 +/- 164.9 particles/sec
D4h (order=16): 50637.7 +/- 429.9 particles/sec
Td (order=24): 48876.9 +/- 303.3 particles/sec
Th (order=24): 48292.9 +/- 309.9 particles/sec
O (order=24): 48295.1 +/- 143.4 particles/sec
Oh (order=48): 44409.5 +/- 120.3 particles/sec
22 changes: 22 additions & 0 deletions benchmarks/data/m1.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
N_PARTICLES=13824
N_SAMPLES=5
threads=8
msm: 80.9339ms +/- 3.7259ms (0.0059ms/particle)
pgop: 6292.0852ms +/- 142.7809ms (0.4552ms/particle)
pgop_60: 1425.7687ms +/- 8.3377ms (0.1031ms/particle)
pgop_none: 81.6757ms +/- 2.0341ms (0.0059ms/particle)
threads=4
msm: 80.2477ms +/- 3.1067ms (0.0058ms/particle)
pgop: 9864.4966ms +/- 1870.5612ms (0.7136ms/particle)
pgop_60: 2283.1693ms +/- 142.5297ms (0.1652ms/particle)
pgop_none: 89.0181ms +/- 3.8211ms (0.0064ms/particle)
threads=2
msm: 82.8373ms +/- 20.5660ms (0.0060ms/particle)
pgop: 17635.5839ms +/- 155.5413ms (1.2757ms/particle)
pgop_60: 4105.8049ms +/- 7.4065ms (0.2970ms/particle)
pgop_none: 97.0240ms +/- 0.6562ms (0.0070ms/particle)
threads=1
msm: 101.1376ms +/- 6.9573ms (0.0073ms/particle)
pgop: 34573.7700ms +/- 124.5629ms (2.5010ms/particle)
pgop_60: 8181.1460ms +/- 62.3668ms (0.5918ms/particle)
pgop_none: 123.4048ms +/- 0.9036ms (0.0089ms/particle)
42 changes: 42 additions & 0 deletions benchmarks/data/purdue_anvil.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
N_PARTICLES=13824
N_SAMPLES=4
threads=128
msm: 51.9090ms +/- 0.5022ms (0.0038ms/particle)
pgop: 529.6289ms +/- 12.6028ms (0.0383ms/particle)
pgop_60: 295.6043ms +/- 6.6048ms (0.0214ms/particle)
pgop_none: 245.4471ms +/- 3.2900ms (0.0178ms/particle)
threads=64
msm: 64.8922ms +/- 1.0058ms (0.0047ms/particle)
pgop: 822.3226ms +/- 0.7015ms (0.0595ms/particle)
pgop_60: 360.9771ms +/- 0.3573ms (0.0261ms/particle)
pgop_none: 229.0684ms +/- 1.8090ms (0.0166ms/particle)
threads=32
msm: 92.3175ms +/- 0.9687ms (0.0067ms/particle)
pgop: 1321.8952ms +/- 0.7228ms (0.0956ms/particle)
pgop_60: 479.0818ms +/- 0.1181ms (0.0347ms/particle)
pgop_none: 224.2708ms +/- 1.4200ms (0.0162ms/particle)
threads=16
msm: 130.0362ms +/- 4.5255ms (0.0094ms/particle)
pgop: 2434.1192ms +/- 17.3761ms (0.1761ms/particle)
pgop_60: 736.6101ms +/- 3.3744ms (0.0533ms/particle)
pgop_none: 226.1373ms +/- 1.4849ms (0.0164ms/particle)
threads=8
msm: 198.9804ms +/- 17.5086ms (0.0144ms/particle)
pgop: 4607.9953ms +/- 3.2402ms (0.3333ms/particle)
pgop_60: 1242.5406ms +/- 1.2996ms (0.0899ms/particle)
pgop_none: 229.2853ms +/- 0.6645ms (0.0166ms/particle)
threads=4
msm: 150.4918ms +/- 13.5604ms (0.0109ms/particle)
pgop: 9156.2837ms +/- 63.8262ms (0.6623ms/particle)
pgop_60: 2310.9283ms +/- 1.4845ms (0.1672ms/particle)
pgop_none: 236.5472ms +/- 1.5490ms (0.0171ms/particle)
threads=2
msm: 124.6271ms +/- 0.3919ms (0.0090ms/particle)
pgop: 17631.9658ms +/- 4.5110ms (1.2755ms/particle)
pgop_60: 4265.1867ms +/- 2.0683ms (0.3085ms/particle)
pgop_none: 252.2952ms +/- 2.5180ms (0.0183ms/particle)
threads=1
msm: 190.3596ms +/- 0.7450ms (0.0138ms/particle)
pgop: 35862.4779ms +/- 1112.7420ms (2.5942ms/particle)
pgop_60: 8371.2325ms +/- 12.2431ms (0.6056ms/particle)
pgop_none: 291.2379ms +/- 0.8136ms (0.0211ms/particle)
168 changes: 168 additions & 0 deletions benchmarks/group_size_scaling.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,168 @@
# Copyright (c) 2021-2026 The Regents of the University of Michigan
# Part of spatula, released under the BSD 3-Clause License.

r"""Generate benchmark data comparing PGOP runtime across different symmetry groups.

Run:
```bash
python benchmarks/group_size_scaling.py \
--data-file path/to/file.gsd --output benchmarks/data/group_size_scaling.txt
```
"""

# ruff: noqa: D103, B023, N806
import argparse
import timeit
import warnings
from pathlib import Path

import freud
import gsd.hoomd
import numpy as np

import spatula
from spatula.representations import CartesianRepMatrix

warnings.filterwarnings("ignore")

OPTIMIZER = spatula.optimize.NoOptimization()

# All point groups to benchmark, organized by family
# Orders are computed dynamically from CartesianRepMatrix
POINT_GROUPS = [
# C family (low order)
"Cs",
"Ch",
"Ci",
"C2",
"C3",
"C4",
"C5",
"C6",
"C2h",
"C3h",
"C4h",
"C2v",
"C3v",
"C4v",
"C5v",
"C6v",
# S family
"S4",
"S6",
# D family
"D2",
"D3",
"D4",
"D5",
"D6",
"D2h",
"D3h",
"D4h",
"D2d",
"D3d",
# Polyhedral groups
"T",
"Td",
"Th",
"O",
"Oh",
]


def get_symmetry_order(symmetry: str) -> int:
"""Get the order (number of operations) for a symmetry group."""
return len(CartesianRepMatrix(symmetry).matrices)


def make_voronoi(box, points):
voronoi = freud.locality.Voronoi()
voronoi.compute((box, points))
return voronoi


def compute_pgop(symmetry, system, voronoi: freud.locality.Voronoi):
pgop = spatula.PGOP([symmetry], optimizer=OPTIMIZER, mode="full")
return pgop.compute(system, sigmas=0.0175, neighbors=voronoi.nlist)


def compute_msm(l: int, system, voronoi: freud.locality.Voronoi):
msm = freud.order.Steinhardt(l=l, weighted=True)
return msm.compute(system, neighbors=voronoi.nlist)


if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--data-file",
type=Path,
required=True,
help="Input GSD file",
)
parser.add_argument(
"--output",
type=Path,
required=True,
help="Output file for benchmark data",
)
parser.add_argument(
"--threads",
type=int,
default=1,
help="Number of threads (default: 1)",
)
args = parser.parse_args()

SAMPLES = 1
REPEATS = 10
MSM_L_VALUES = [6, 8, 12]

with gsd.hoomd.open(args.data_file) as f:
frame = f[-1]
box, points = (
freud.Box(*frame.configuration.box),
frame.particles.position,
)
N_PARTICLES = len(points)

voronoi = make_voronoi(box, points)
freud.set_num_threads(args.threads)
spatula.util.set_num_threads(args.threads)

# Collect results
results = []

# Benchmark MSM for each L value
for l in MSM_L_VALUES:
msm_times = timeit.repeat(
lambda l=l: compute_msm(l, (box, points), voronoi),
number=SAMPLES,
repeat=REPEATS * 50,
)
msm_times_ms = np.array(msm_times) * 1000 # ms
msm_particles_per_sec_arr = N_PARTICLES * 1000 / msm_times_ms
results.append((f"msm_{l}", 0, msm_particles_per_sec_arr)) # order=0 for MSM

for symmetry in POINT_GROUPS:
order = get_symmetry_order(symmetry)
times = timeit.repeat(
lambda: compute_pgop(symmetry, (box, points), voronoi),
number=SAMPLES,
repeat=REPEATS,
)
times_ms = np.array(times) * 1000 # ms
particles_per_sec_arr = N_PARTICLES * 1000 / times_ms
results.append((symmetry, order, particles_per_sec_arr))

# Write results to output file
args.output.parent.mkdir(parents=True, exist_ok=True)
with Path.open(args.output, "w") as f:
f.write(f"N_PARTICLES={N_PARTICLES}\n")
f.write(f"THREADS={args.threads}\n")
for method, order, arr in results:
f.write(
f" {method} (order={order}): "
f"{arr.mean():.1f} +/- {arr.std():.1f} particles/sec\n"
)

print(f"Benchmark data written to {args.output}")
Loading