|
| 1 | +#!/usr/bin/env python3 |
| 2 | +""" |
| 3 | +Benchmark: Persist latency per event and per action cycle. |
| 4 | +
|
| 5 | +Extracts real event payloads from SWE-Bench evaluation conversation traces |
| 6 | +and replays them through the SDK's LocalFileStore lock-and-write path to |
| 7 | +measure per-event and per-cycle persist latency. |
| 8 | +
|
| 9 | +Usage: |
| 10 | + python bench_persist_latency.py --eval-dir <path-to-eval-run> |
| 11 | +""" |
| 12 | + |
| 13 | +import argparse |
| 14 | +import gc |
| 15 | +import json |
| 16 | +import os |
| 17 | +import shutil |
| 18 | +import statistics |
| 19 | +import tempfile |
| 20 | +import time |
| 21 | + |
| 22 | +from benchmark_utils import extract_conversation, read_event_files |
| 23 | + |
| 24 | +from openhands.sdk.io import LocalFileStore |
| 25 | + |
| 26 | + |
| 27 | +EVENTS_DIR_NAME = "events" |
| 28 | +LOCK_FILE = "events/.eventlog.lock" |
| 29 | + |
| 30 | + |
| 31 | +def measure_persist_latencies(event_files: list[dict]) -> list[dict]: |
| 32 | + """Replay the persist path EventLog.append() uses: |
| 33 | + lock -> write JSON file -> release lock |
| 34 | +
|
| 35 | + Uses LocalFileStore directly with real event payloads. |
| 36 | + """ |
| 37 | + tmpdir = tempfile.mkdtemp(prefix="bench_persist_") |
| 38 | + try: |
| 39 | + fs = LocalFileStore(tmpdir, cache_limit_size=len(event_files) + 100) |
| 40 | + |
| 41 | + results = [] |
| 42 | + for i, ef in enumerate(event_files): |
| 43 | + target_path = f"{EVENTS_DIR_NAME}/{ef['filename']}" |
| 44 | + |
| 45 | + gc.disable() |
| 46 | + t0 = time.perf_counter() |
| 47 | + with fs.lock(LOCK_FILE, timeout=30.0): |
| 48 | + fs.write(target_path, ef["json_str"]) |
| 49 | + t1 = time.perf_counter() |
| 50 | + gc.enable() |
| 51 | + |
| 52 | + results.append( |
| 53 | + { |
| 54 | + "kind": ef["kind"], |
| 55 | + "size_bytes": ef["size_bytes"], |
| 56 | + "persist_ms": (t1 - t0) * 1000, |
| 57 | + "event_idx": i, |
| 58 | + } |
| 59 | + ) |
| 60 | + return results |
| 61 | + finally: |
| 62 | + shutil.rmtree(tmpdir, ignore_errors=True) |
| 63 | + |
| 64 | + |
| 65 | +def main(): |
| 66 | + import logging |
| 67 | + |
| 68 | + logging.getLogger("openhands").setLevel(logging.ERROR) |
| 69 | + |
| 70 | + parser = argparse.ArgumentParser( |
| 71 | + description="Benchmark persist latency per event/action cycle" |
| 72 | + ) |
| 73 | + parser.add_argument( |
| 74 | + "--eval-dir", |
| 75 | + required=True, |
| 76 | + help="Path to evaluation run directory", |
| 77 | + ) |
| 78 | + parser.add_argument( |
| 79 | + "--output", |
| 80 | + default="bench_persist_latency_results.json", |
| 81 | + help="Output JSON file path", |
| 82 | + ) |
| 83 | + parser.add_argument( |
| 84 | + "--sample-step", |
| 85 | + type=int, |
| 86 | + default=15, |
| 87 | + help="Sample every Nth conversation (default: 15)", |
| 88 | + ) |
| 89 | + args = parser.parse_args() |
| 90 | + |
| 91 | + # Load instance metadata |
| 92 | + instances = {} |
| 93 | + with open(os.path.join(args.eval_dir, "output.jsonl")) as f: |
| 94 | + for line in f: |
| 95 | + d = json.loads(line) |
| 96 | + instances[d["instance_id"]] = d |
| 97 | + |
| 98 | + conv_dir = os.path.join(args.eval_dir, "conversations") |
| 99 | + tarballs = sorted(os.listdir(conv_dir)) |
| 100 | + sample_tarballs = tarballs[:: args.sample_step] |
| 101 | + print(f"Sampling {len(sample_tarballs)} of {len(tarballs)} conversations\n") |
| 102 | + |
| 103 | + all_persist: list[dict] = [] |
| 104 | + conv_summaries: list[dict] = [] |
| 105 | + |
| 106 | + for tarname in sample_tarballs: |
| 107 | + instance_id = tarname.replace(".tar.gz", "") |
| 108 | + instance_data = instances.get(instance_id) |
| 109 | + if not instance_data: |
| 110 | + continue |
| 111 | + |
| 112 | + tarpath = os.path.join(conv_dir, tarname) |
| 113 | + tmpdir = tempfile.mkdtemp(prefix="bench_persist_") |
| 114 | + try: |
| 115 | + events_dir = extract_conversation(tarpath, tmpdir) |
| 116 | + if not events_dir: |
| 117 | + continue |
| 118 | + event_files = read_event_files(events_dir) |
| 119 | + if not event_files: |
| 120 | + continue |
| 121 | + |
| 122 | + persist_results = measure_persist_latencies(event_files) |
| 123 | + all_persist.extend(persist_results) |
| 124 | + |
| 125 | + # Per-cycle persist time (action + observation pairs) |
| 126 | + action_p = [r for r in persist_results if r["kind"] == "ActionEvent"] |
| 127 | + obs_p = [r for r in persist_results if r["kind"] == "ObservationEvent"] |
| 128 | + n_cycles = min(len(action_p), len(obs_p)) |
| 129 | + cycle_persist = [ |
| 130 | + action_p[i]["persist_ms"] + obs_p[i]["persist_ms"] |
| 131 | + for i in range(n_cycles) |
| 132 | + ] |
| 133 | + |
| 134 | + total_persist_ms = sum(r["persist_ms"] for r in persist_results) |
| 135 | + |
| 136 | + conv_summaries.append( |
| 137 | + { |
| 138 | + "instance_id": instance_id, |
| 139 | + "n_events": len(event_files), |
| 140 | + "n_cycles": n_cycles, |
| 141 | + "total_persist_ms": total_persist_ms, |
| 142 | + "mean_cycle_persist_ms": ( |
| 143 | + statistics.mean(cycle_persist) if cycle_persist else 0 |
| 144 | + ), |
| 145 | + } |
| 146 | + ) |
| 147 | + n_ev = len(event_files) |
| 148 | + print( |
| 149 | + f" {instance_id[:50]:50s} events={n_ev:>4}" |
| 150 | + f" persist={total_persist_ms:>7.1f}ms" |
| 151 | + ) |
| 152 | + |
| 153 | + finally: |
| 154 | + shutil.rmtree(tmpdir, ignore_errors=True) |
| 155 | + |
| 156 | + # --- Analysis --- |
| 157 | + print(f"\n{'=' * 70}") |
| 158 | + print("RESULTS: Persist Latency per Event / Action Cycle") |
| 159 | + print(f"{'=' * 70}") |
| 160 | + |
| 161 | + by_kind: dict[str, list[dict]] = {} |
| 162 | + for r in all_persist: |
| 163 | + by_kind.setdefault(r["kind"], []).append(r) |
| 164 | + |
| 165 | + print("\n--- Per-Event Persist Latency ---") |
| 166 | + header = ( |
| 167 | + f" {'Event Type':<35} {'N':>5} {'Median':>10}" |
| 168 | + f" {'Mean':>10} {'P95':>10} {'MedSize':>10}" |
| 169 | + ) |
| 170 | + print(header) |
| 171 | + print(f" {'-' * 80}") |
| 172 | + for kind in [ |
| 173 | + "SystemPromptEvent", |
| 174 | + "MessageEvent", |
| 175 | + "ActionEvent", |
| 176 | + "ObservationEvent", |
| 177 | + "ConversationStateUpdateEvent", |
| 178 | + "AgentErrorEvent", |
| 179 | + ]: |
| 180 | + if kind not in by_kind: |
| 181 | + continue |
| 182 | + entries = by_kind[kind] |
| 183 | + lats = sorted([e["persist_ms"] for e in entries]) |
| 184 | + sizes = sorted([e["size_bytes"] for e in entries]) |
| 185 | + n = len(lats) |
| 186 | + print( |
| 187 | + f" {kind:<35} {n:>5}" |
| 188 | + f" {lats[n // 2]:>9.3f}ms" |
| 189 | + f" {statistics.mean(lats):>9.3f}ms" |
| 190 | + f" {lats[int(n * 0.95)]:>9.3f}ms" |
| 191 | + f" {sizes[n // 2]:>8,}B" |
| 192 | + ) |
| 193 | + |
| 194 | + all_lats = sorted([r["persist_ms"] for r in all_persist]) |
| 195 | + all_sizes = sorted([r["size_bytes"] for r in all_persist]) |
| 196 | + n = len(all_lats) |
| 197 | + print(f" {'-' * 80}") |
| 198 | + print( |
| 199 | + f" {'ALL EVENTS':<35} {n:>5}" |
| 200 | + f" {all_lats[n // 2]:>9.3f}ms" |
| 201 | + f" {statistics.mean(all_lats):>9.3f}ms" |
| 202 | + f" {all_lats[int(n * 0.95)]:>9.3f}ms" |
| 203 | + f" {all_sizes[n // 2]:>8,}B" |
| 204 | + ) |
| 205 | + |
| 206 | + # Per action cycle |
| 207 | + print("\n--- Per Action Cycle (Action + Observation) ---") |
| 208 | + cycle_persists = [ |
| 209 | + s["mean_cycle_persist_ms"] for s in conv_summaries if s["n_cycles"] > 0 |
| 210 | + ] |
| 211 | + med = statistics.median(cycle_persists) |
| 212 | + mean = statistics.mean(cycle_persists) |
| 213 | + print(f" Median per-cycle persist time: {med:.2f}ms") |
| 214 | + print(f" Mean per-cycle persist time: {mean:.2f}ms") |
| 215 | + |
| 216 | + # Save |
| 217 | + with open(args.output, "w") as f: |
| 218 | + json.dump( |
| 219 | + {"per_event": all_persist, "conversations": conv_summaries}, |
| 220 | + f, |
| 221 | + indent=2, |
| 222 | + ) |
| 223 | + print(f"\nRaw data saved to {args.output}") |
| 224 | + |
| 225 | + |
| 226 | +if __name__ == "__main__": |
| 227 | + main() |
0 commit comments