Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
134 changes: 134 additions & 0 deletions scripts/coyote_protector_xtc_gui_ready_test/bash_launcher.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
#!/usr/bin/env python3
"""
bash_launcher.py
Launches routine_detection_v3.sh with specified arguments.
Can be run from CDS to orchestrate the complete XTC workflow.

USAGE:
python bash_launcher.py [OPTIONS]
python bash_launcher.py --help

EXAMPLES:
# Run with defaults
python bash_launcher.py

# Run with custom parameters
python bash_launcher.py --run_number=61 --exp_number=mfx101346325 --max_events=80000

# Run with specific user
python bash_launcher.py --user=pmonteil --run_number=100
"""

import argparse
import subprocess
import sys
from pathlib import Path


def eprint(msg):
"""Write message to stderr (compatible with older Python)."""
sys.stderr.write(str(msg) + "\n")


def main():
parser = argparse.ArgumentParser(
description="Launch coyote XTC detection routine via bash",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__
)

parser.add_argument(
"--user",
default="pmonteil",
help="Username for SSH to SDF (default: pmonteil)"
)
parser.add_argument(
"--run_number",
type=int,
default=61,
help="LCLS run number to process (default: 61)"
)
parser.add_argument(
"--exp_number",
default="mfx101346325",
help="Experiment identifier (default: mfx101346325)"
)
parser.add_argument(
"--save_normalized",
type=int,
choices=[0, 1],
default=1,
help="Save normalized images (1=yes, 0=no, default: 1)"
)
parser.add_argument(
"--max_events",
type=int,
default=10000,
help="Maximum events to process (default: 10000)"
)
parser.add_argument(
"--use_normalized",
type=int,
choices=[0, 1],
default=1,
help="Use normalized images for inference (1=yes, 0=no, default: 1)"
)
parser.add_argument(
"--dry_run",
action="store_true",
help="Print command without executing"
)

args = parser.parse_args()

# Get the script path (should be in same directory)
script_path = Path(__file__).parent / "routine_detection_v3.sh"

if not script_path.exists():
eprint("[ERROR] Script not found: {}".format(script_path))
sys.exit(1)

# Build command
cmd = [
"bash",
str(script_path),
"--user={}".format(args.user),
"RUN_NUMBER={}".format(args.run_number),
"EXP_NUMBER={}".format(args.exp_number),
"SAVE_NORMALIZED={}".format(args.save_normalized),
"MAX_EVENTS={}".format(args.max_events),
"USE_NORMALIZED={}".format(args.use_normalized),
]

print("[INFO] ============================================")
print("[INFO] COYOTE XTC DETECTION LAUNCHER")
print("[INFO] ============================================")
print("[INFO] User: {}".format(args.user))
print("[INFO] Run Number: {}".format(args.run_number))
print("[INFO] Experiment: {}".format(args.exp_number))
print("[INFO] Save Normalized: {}".format(args.save_normalized))
print("[INFO] Max Events: {}".format(args.max_events))
print("[INFO] Use Normalized: {}".format(args.use_normalized))
print("[INFO] ============================================")
print()

print("[INFO] Command: {}".format(" ".join(cmd)))
print()

if args.dry_run:
print("[INFO] DRY RUN MODE - Command not executed")
return 0

try:
result = subprocess.run(cmd, check=False)
return result.returncode
except KeyboardInterrupt:
eprint("\n[INFO] Interrupted by user")
return 130
except Exception as e:
eprint("[ERROR] Failed to execute command: {}".format(e))
return 1


if __name__ == "__main__":
sys.exit(main())
105 changes: 59 additions & 46 deletions scripts/coyote_protector_xtc_gui_ready_test/inference_coyote_xtc.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,16 @@

## same as test_xtc/inference_coyote.py but with argument parsing
#!/usr/bin/env python3
"""
YOLOv8 Inference Script → CSV (sizes)
YOLOv8 Inference Script → CSV (sizes) + above-threshold CSV
------------------------------------------------------------
- Runs YOLO inference on a folder of images
- Computes longest side per detection in px and μm
- Flags detections larger than a given μm threshold
- Writes:
1) results_csv/measurements_complete.csv (ALL detections)
2) results_csv/measurements_above_threshold.csv (ONLY detections above threshold)

USAGE:
python inference_coyote_xtc.py <chip_pic_dir>

EDIT THESE BEFORE RUNNING:
- weights_path: Path to your trained YOLO weights (.pt)
- px_to_um: Pixel-to-micron conversion factor for your setup
"""

import os
Expand All @@ -37,27 +35,23 @@
# Paths / parameters
# -------------------------
weights_path = (
"/sdf/home/p/pmonteil/coyote_protector_test_PL_labeling_tries_v3_run61_mfx101346325_200_random/scripts/runs/detect/train_150epochs_v11_merged/weights/best.pt"
"/sdf/home/p/pmonteil/coyote_protector_test_PL_labeling_tries_v3_run61_mfx101346325_200_random/scripts/"
"runs/detect/train_150epochs_v11_merged/weights/best.pt"
)

'''
weights_path = (
"/sdf/home/p/pmonteil/prjlumine22/results/pmonteil/"
"coyote_beamtime_19jan/weight_yolov11n_150epochs.pt"
)
'''

mag_factor = 5.56 # optical magnification
px_size = 3.45 # pixel size (μm)
dowsamp_factor = 2 # if images were downsampled before inference
px_to_um = px_size*dowsamp_factor / mag_factor
alert_um = 50.0 #threshold, in μm, above which to flag detections
dowsamp_factor = 2 # if images were downsampled before inference
px_to_um = px_size * dowsamp_factor / mag_factor
alert_um = 50.0 # threshold, in μm, above which to flag detections

# Output CSV directory
#out_dir = Path("runs/size_measurements")
out_dir = Path("results_csv")
out_dir.mkdir(parents=True, exist_ok=True)
csv_path = out_dir / "measurements.csv"

csv_all_tmp = out_dir / "measurements.csv"
csv_all_final = out_dir / "measurements_complete.csv"
csv_above = out_dir / "measurements_above_threshold.csv"

# -------------------------
# Load model
Expand All @@ -74,24 +68,32 @@
)

# -------------------------
# Write CSV
# CSV header
# -------------------------
with open(csv_path, "w", newline="") as f:
writer = csv.writer(f)
writer.writerow([
"image",
"det_idx",
"class_id",
"class_name",
"confidence",
"x_center_px",
"y_center_px",
"width_px",
"height_px",
"longest_px",
"longest_um",
"alert"
])
header = [
"image",
"det_idx",
"class_id",
"class_name",
"confidence",
"x_center_px",
"y_center_px",
"width_px",
"height_px",
"longest_px",
"longest_um",
"alert"
]

# -------------------------
# Write BOTH CSVs
# -------------------------
with open(csv_all_tmp, "w", newline="") as f_all, open(csv_above, "w", newline="") as f_above:
w_all = csv.writer(f_all)
w_above = csv.writer(f_above)

w_all.writerow(header)
w_above.writerow(header)

for r in results:
img_name = os.path.basename(r.path)
Expand All @@ -111,14 +113,16 @@
cls_name = model.names.get(cls_id, str(cls_id))
conf = float(confs[i]) if confs.size > i else float("nan")

alert_flag = "STOP" if longest_um > alert_um else ""
if alert_flag:
is_above = longest_um > alert_um
alert_flag = "STOP" if is_above else ""

if is_above:
print(
f"[STOP] {img_name} — det {i+1}: "
f"{longest_um:.2f} μm > {alert_um} μm → Stop the beam"
)

writer.writerow([
row = [
img_name,
i + 1,
cls_id,
Expand All @@ -131,12 +135,21 @@
f"{longest_px:.2f}",
f"{longest_um:.2f}",
alert_flag
])
]

# Write to "all detections"
w_all.writerow(row)

# Write to "above threshold only"
if is_above:
w_above.writerow(row)

print(f"\nCSV saved to: {csv_path}")
print(f"\nCSV (all) saved to: {csv_all_tmp}")
print(f"CSV (above threshold only) saved to: {csv_above}")

# Rename CSV after completion
final_csv_path = out_dir / "measurements_complete.csv"
csv_path.rename(final_csv_path)
# Rename "all" CSV after completion
if csv_all_final.exists():
csv_all_final.unlink()
csv_all_tmp.rename(csv_all_final)

print(f"CSV renamed to: {final_csv_path}")
print(f"CSV (all) renamed to: {csv_all_final}")
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
results_csv_dir = Path(f"run_{run_number}/results_csv")
results_csv_dir = Path("results_csv")
event_data_csv = results_csv_dir / "event_data.csv"
measurements_csv = results_csv_dir / "measurements_complete.csv"
measurements_csv = results_csv_dir / "measurements_above_threshold.csv"

# Check if files exist
if not event_data_csv.exists():
Expand Down
Loading