diff --git a/scripts/coyote_protector_xtc_gui_ready_test/bash_launcher.py b/scripts/coyote_protector_xtc_gui_ready_test/bash_launcher.py new file mode 100644 index 0000000..e4aca2a --- /dev/null +++ b/scripts/coyote_protector_xtc_gui_ready_test/bash_launcher.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python3 +""" +bash_launcher.py +Launches routine_detection_v3.sh with specified arguments. +Can be run from CDS to orchestrate the complete XTC workflow. + +USAGE: + python bash_launcher.py [OPTIONS] + python bash_launcher.py --help + +EXAMPLES: + # Run with defaults + python bash_launcher.py + + # Run with custom parameters + python bash_launcher.py --run_number=61 --exp_number=mfx101346325 --max_events=80000 + + # Run with specific user + python bash_launcher.py --user=pmonteil --run_number=100 +""" + +import argparse +import subprocess +import sys +from pathlib import Path + + +def eprint(msg): + """Write message to stderr (compatible with older Python).""" + sys.stderr.write(str(msg) + "\n") + + +def main(): + parser = argparse.ArgumentParser( + description="Launch coyote XTC detection routine via bash", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=__doc__ + ) + + parser.add_argument( + "--user", + default="pmonteil", + help="Username for SSH to SDF (default: pmonteil)" + ) + parser.add_argument( + "--run_number", + type=int, + default=61, + help="LCLS run number to process (default: 61)" + ) + parser.add_argument( + "--exp_number", + default="mfx101346325", + help="Experiment identifier (default: mfx101346325)" + ) + parser.add_argument( + "--save_normalized", + type=int, + choices=[0, 1], + default=1, + help="Save normalized images (1=yes, 0=no, default: 1)" + ) + parser.add_argument( + "--max_events", + type=int, + default=10000, + help="Maximum events to process (default: 10000)" + ) + parser.add_argument( + "--use_normalized", + type=int, + choices=[0, 1], + default=1, + help="Use normalized images for inference (1=yes, 0=no, default: 1)" + ) + parser.add_argument( + "--dry_run", + action="store_true", + help="Print command without executing" + ) + + args = parser.parse_args() + + # Get the script path (should be in same directory) + script_path = Path(__file__).parent / "routine_detection_v3.sh" + + if not script_path.exists(): + eprint("[ERROR] Script not found: {}".format(script_path)) + sys.exit(1) + + # Build command + cmd = [ + "bash", + str(script_path), + "--user={}".format(args.user), + "RUN_NUMBER={}".format(args.run_number), + "EXP_NUMBER={}".format(args.exp_number), + "SAVE_NORMALIZED={}".format(args.save_normalized), + "MAX_EVENTS={}".format(args.max_events), + "USE_NORMALIZED={}".format(args.use_normalized), + ] + + print("[INFO] ============================================") + print("[INFO] COYOTE XTC DETECTION LAUNCHER") + print("[INFO] ============================================") + print("[INFO] User: {}".format(args.user)) + print("[INFO] Run Number: {}".format(args.run_number)) + print("[INFO] Experiment: {}".format(args.exp_number)) + print("[INFO] Save Normalized: {}".format(args.save_normalized)) + print("[INFO] Max Events: {}".format(args.max_events)) + print("[INFO] Use Normalized: {}".format(args.use_normalized)) + print("[INFO] ============================================") + print() + + print("[INFO] Command: {}".format(" ".join(cmd))) + print() + + if args.dry_run: + print("[INFO] DRY RUN MODE - Command not executed") + return 0 + + try: + result = subprocess.run(cmd, check=False) + return result.returncode + except KeyboardInterrupt: + eprint("\n[INFO] Interrupted by user") + return 130 + except Exception as e: + eprint("[ERROR] Failed to execute command: {}".format(e)) + return 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/coyote_protector_xtc_gui_ready_test/inference_coyote_xtc.py b/scripts/coyote_protector_xtc_gui_ready_test/inference_coyote_xtc.py index 1972353..610eda9 100644 --- a/scripts/coyote_protector_xtc_gui_ready_test/inference_coyote_xtc.py +++ b/scripts/coyote_protector_xtc_gui_ready_test/inference_coyote_xtc.py @@ -1,18 +1,16 @@ - -## same as test_xtc/inference_coyote.py but with argument parsing +#!/usr/bin/env python3 """ -YOLOv8 Inference Script → CSV (sizes) +YOLOv8 Inference Script → CSV (sizes) + above-threshold CSV ------------------------------------------------------------ - Runs YOLO inference on a folder of images - Computes longest side per detection in px and μm - Flags detections larger than a given μm threshold +- Writes: + 1) results_csv/measurements_complete.csv (ALL detections) + 2) results_csv/measurements_above_threshold.csv (ONLY detections above threshold) USAGE: python inference_coyote_xtc.py - -EDIT THESE BEFORE RUNNING: - - weights_path: Path to your trained YOLO weights (.pt) - - px_to_um: Pixel-to-micron conversion factor for your setup """ import os @@ -37,27 +35,23 @@ # Paths / parameters # ------------------------- weights_path = ( - "/sdf/home/p/pmonteil/coyote_protector_test_PL_labeling_tries_v3_run61_mfx101346325_200_random/scripts/runs/detect/train_150epochs_v11_merged/weights/best.pt" + "/sdf/home/p/pmonteil/coyote_protector_test_PL_labeling_tries_v3_run61_mfx101346325_200_random/scripts/" + "runs/detect/train_150epochs_v11_merged/weights/best.pt" ) -''' -weights_path = ( - "/sdf/home/p/pmonteil/prjlumine22/results/pmonteil/" - "coyote_beamtime_19jan/weight_yolov11n_150epochs.pt" -) -''' - mag_factor = 5.56 # optical magnification px_size = 3.45 # pixel size (μm) -dowsamp_factor = 2 # if images were downsampled before inference -px_to_um = px_size*dowsamp_factor / mag_factor -alert_um = 50.0 #threshold, in μm, above which to flag detections +dowsamp_factor = 2 # if images were downsampled before inference +px_to_um = px_size * dowsamp_factor / mag_factor +alert_um = 50.0 # threshold, in μm, above which to flag detections # Output CSV directory -#out_dir = Path("runs/size_measurements") out_dir = Path("results_csv") out_dir.mkdir(parents=True, exist_ok=True) -csv_path = out_dir / "measurements.csv" + +csv_all_tmp = out_dir / "measurements.csv" +csv_all_final = out_dir / "measurements_complete.csv" +csv_above = out_dir / "measurements_above_threshold.csv" # ------------------------- # Load model @@ -74,24 +68,32 @@ ) # ------------------------- -# Write CSV +# CSV header # ------------------------- -with open(csv_path, "w", newline="") as f: - writer = csv.writer(f) - writer.writerow([ - "image", - "det_idx", - "class_id", - "class_name", - "confidence", - "x_center_px", - "y_center_px", - "width_px", - "height_px", - "longest_px", - "longest_um", - "alert" - ]) +header = [ + "image", + "det_idx", + "class_id", + "class_name", + "confidence", + "x_center_px", + "y_center_px", + "width_px", + "height_px", + "longest_px", + "longest_um", + "alert" +] + +# ------------------------- +# Write BOTH CSVs +# ------------------------- +with open(csv_all_tmp, "w", newline="") as f_all, open(csv_above, "w", newline="") as f_above: + w_all = csv.writer(f_all) + w_above = csv.writer(f_above) + + w_all.writerow(header) + w_above.writerow(header) for r in results: img_name = os.path.basename(r.path) @@ -111,14 +113,16 @@ cls_name = model.names.get(cls_id, str(cls_id)) conf = float(confs[i]) if confs.size > i else float("nan") - alert_flag = "STOP" if longest_um > alert_um else "" - if alert_flag: + is_above = longest_um > alert_um + alert_flag = "STOP" if is_above else "" + + if is_above: print( f"[STOP] {img_name} — det {i+1}: " f"{longest_um:.2f} μm > {alert_um} μm → Stop the beam" ) - writer.writerow([ + row = [ img_name, i + 1, cls_id, @@ -131,12 +135,21 @@ f"{longest_px:.2f}", f"{longest_um:.2f}", alert_flag - ]) + ] + + # Write to "all detections" + w_all.writerow(row) + + # Write to "above threshold only" + if is_above: + w_above.writerow(row) -print(f"\nCSV saved to: {csv_path}") +print(f"\nCSV (all) saved to: {csv_all_tmp}") +print(f"CSV (above threshold only) saved to: {csv_above}") -# Rename CSV after completion -final_csv_path = out_dir / "measurements_complete.csv" -csv_path.rename(final_csv_path) +# Rename "all" CSV after completion +if csv_all_final.exists(): + csv_all_final.unlink() +csv_all_tmp.rename(csv_all_final) -print(f"CSV renamed to: {final_csv_path}") +print(f"CSV (all) renamed to: {csv_all_final}") diff --git a/scripts/coyote_protector_xtc_gui_ready_test/merge_crystals_data.py b/scripts/coyote_protector_xtc_gui_ready_test/merge_crystals_data.py index 0bf6cea..342450c 100644 --- a/scripts/coyote_protector_xtc_gui_ready_test/merge_crystals_data.py +++ b/scripts/coyote_protector_xtc_gui_ready_test/merge_crystals_data.py @@ -31,7 +31,7 @@ results_csv_dir = Path(f"run_{run_number}/results_csv") results_csv_dir = Path("results_csv") event_data_csv = results_csv_dir / "event_data.csv" -measurements_csv = results_csv_dir / "measurements_complete.csv" +measurements_csv = results_csv_dir / "measurements_above_threshold.csv" # Check if files exist if not event_data_csv.exists(): diff --git a/scripts/coyote_protector_xtc_gui_ready_test/routine_detection_v3.sh b/scripts/coyote_protector_xtc_gui_ready_test/routine_detection_v3.sh new file mode 100644 index 0000000..9a0b29c --- /dev/null +++ b/scripts/coyote_protector_xtc_gui_ready_test/routine_detection_v3.sh @@ -0,0 +1,133 @@ +#!/bin/bash +# +# routine_detection_v3.sh +# Orchestrates the XTC export and inference workflow +# Runs on CDS and launches export_infer_xtc.sh on SDF via SSH +# + +# ------- DEFAULT INPUTS --------- +USER="pmonteil" +RUN_NUMBER=61 +EXP_NUMBER="mfx101346325" +SAVE_NORMALIZED=1 +MAX_EVENTS=10000 +USE_NORMALIZED=1 +# -------------------------------- + +DEST_HOST="psana.sdf.slac.stanford.edu" +SDF_BASE="/sdf/home/p/pmonteil/coyote_protector_xtc_gui_ready_test" +RESULTS_BACK_BASE="/cds/home/p/pmonteil" + +# ------- PARSE key=value arguments --------- +for arg in "$@"; do + case $arg in + --user=*) + USER="${arg#*=}" + ;; + RUN_NUMBER=*) + RUN_NUMBER="${arg#*=}" + ;; + EXP_NUMBER=*) + EXP_NUMBER="${arg#*=}" + ;; + SAVE_NORMALIZED=*) + SAVE_NORMALIZED="${arg#*=}" + ;; + MAX_EVENTS=*) + MAX_EVENTS="${arg#*=}" + ;; + USE_NORMALIZED=*) + USE_NORMALIZED="${arg#*=}" + ;; + *) + echo "[WARN] Unknown argument: $arg" + ;; + esac +done + +echo "[INFO] ==============================================" +echo "[INFO] COYOTE PROTECTOR XTC DETECTION ROUTINE v3" +echo "[INFO] ==============================================" +echo "[INFO] User: ${USER}" +echo "[INFO] SDF Host: ${DEST_HOST}" +echo "[INFO] SDF Base: ${SDF_BASE}" +echo "[INFO] Run Number: ${RUN_NUMBER}" +echo "[INFO] Experiment: ${EXP_NUMBER}" +echo "[INFO] Save Normalized: ${SAVE_NORMALIZED}" +echo "[INFO] Max Events: ${MAX_EVENTS}" +echo "[INFO] Use Normalized for Inference: ${USE_NORMALIZED}" +echo "[INFO] Results back to: ${RESULTS_BACK_BASE}" +echo + +# ==================================== +# STEP 1: Launch export_infer_xtc.sh on SDF via SSH +# ==================================== +echo "[STEP 1/3] Launching export_infer_xtc.sh on SDF..." + +JOB_ID="$(ssh "${USER}@${DEST_HOST}" "bash -lc ' + set -euo pipefail + cd \"${SDF_BASE}\" + jid=\$(sbatch --parsable run_export_infer_xtc.sh RUN_NUMBER=${RUN_NUMBER} EXP_NUMBER=${EXP_NUMBER} SAVE_NORMALIZED=${SAVE_NORMALIZED} MAX_EVENTS=${MAX_EVENTS} USE_NORMALIZED=${USE_NORMALIZED}) + echo \$jid +'")" + +echo "[INFO] Submitted job on SDF: ${JOB_ID}" +echo + +# ==================================== +# STEP 2: Wait for results on SDF +# ==================================== +echo "[STEP 2/3] Waiting for merged results on SDF..." + +REMOTE_CSV="${SDF_BASE}/run_${RUN_NUMBER}/results_csv/merged_crystals.csv" + +max_attempts=360 # 3 hours with 30-second intervals +attempt=0 + +while [ $attempt -lt $max_attempts ]; do + if ssh "${USER}@${DEST_HOST}" "bash -lc 'test -f \"${REMOTE_CSV}\"'"; then + echo "[INFO] Merged CSV detected on SDF." + break + fi + attempt=$((attempt + 1)) + if [ $((attempt % 4)) -eq 0 ]; then + echo "[INFO] Waiting... (attempt $attempt/$max_attempts)" + fi + sleep 30 +done + +if [ $attempt -eq $max_attempts ]; then + echo "[ERROR] Timeout waiting for results after 3 hours" + exit 1 +fi + +echo + +# ==================================== +# STEP 3: Copy all results back to CDS +# ==================================== +echo "[STEP 3/3] Copying results back to CDS..." + +RESULTS_DIR="run_${RUN_NUMBER}_results" +LOCAL_DEST_DIR="${RESULTS_BACK_BASE}/${RESULTS_DIR}" +mkdir -p "${LOCAL_DEST_DIR}" + +echo "[INFO] Copying from: ${DEST_HOST}:${SDF_BASE}/run_${RUN_NUMBER}/results_csv/" +echo "[INFO] Copying to: ${LOCAL_DEST_DIR}/" + +rsync -avr \ + "${USER}@${DEST_HOST}:${SDF_BASE}/run_${RUN_NUMBER}/results_csv/" \ + "${LOCAL_DEST_DIR}/" + +echo +echo "[DONE] ==============================================" +echo "[INFO] Complete workflow finished successfully." +echo "[INFO] Results available at:" +echo "[INFO] ${LOCAL_DEST_DIR}/" +echo "[INFO] ==============================================" +echo "[INFO] Key files:" +echo "[INFO] - merged_crystals.csv (all data merged)" +echo "[INFO] - measurements_complete.csv (all detections)" +echo "[INFO] - measurements_above_threshold_complete.csv (above threshold only)" +echo "[INFO] - event_data.csv (trajectory data)" +echo "[DONE]" diff --git a/scripts/coyote_protector_xtc_gui_ready_test/run_export_infer_xtc.sh b/scripts/coyote_protector_xtc_gui_ready_test/run_export_infer_xtc.sh new file mode 100644 index 0000000..b0db199 --- /dev/null +++ b/scripts/coyote_protector_xtc_gui_ready_test/run_export_infer_xtc.sh @@ -0,0 +1,114 @@ +#!/bin/bash +#SBATCH --partition=turing +#SBATCH --account=lcls:prjlumine22 +#SBATCH --job-name=export_infer_xtc +#SBATCH --nodes=1 +#SBATCH --ntasks=1 +#SBATCH --cpus-per-task=4 +#SBATCH --gpus=1 +#SBATCH --time=04:00:00 +#SBATCH --output=logs_export/export_infer_turing_%j.out +#SBATCH --error=logs_export/export_infer_turing_%j.err + +set -euo pipefail + +export OMP_PROC_BIND=close +export OMP_PLACES=cores +export OMP_NUM_THREADS="${SLURM_CPUS_PER_TASK}" +export MKL_NUM_THREADS="${SLURM_CPUS_PER_TASK}" +export CUDA_DEVICE_ORDER=PCI_BUS_ID + +echo "[SLURM] Host: $(hostname)" +echo "[SLURM] CUDA_VISIBLE_DEVICES: ${CUDA_VISIBLE_DEVICES:-"(not set)"}" +echo "[SLURM] Starting job..." + +PSCONDA_SH="/sdf/group/lcls/ds/ana/sw/conda2/manage/bin/psconda.sh" +YOLO_PYTHON="/sdf/data/lcls/ds/prj/prjlumine22/results/coyote_protector/miniconda3_coyote/envs/env_coyote/bin/python" + +# ------- DEFAULT INPUTS --------- +RUN_NUMBER=61 +EXP_NUMBER="mfx101346325" +SAVE_NORMALIZED=1 +MAX_EVENTS=10000 +USE_NORMALIZED=1 +# -------------------------------- + +# ------- PARSE key=value arguments --------- +for arg in "$@"; do + case $arg in + RUN_NUMBER=*) + RUN_NUMBER="${arg#*=}" + ;; + EXP_NUMBER=*) + EXP_NUMBER="${arg#*=}" + ;; + SAVE_NORMALIZED=*) + SAVE_NORMALIZED="${arg#*=}" + ;; + MAX_EVENTS=*) + MAX_EVENTS="${arg#*=}" + ;; + USE_NORMALIZED=*) + USE_NORMALIZED="${arg#*=}" + ;; + *) + echo "[WARN] Unknown argument: $arg" + ;; + esac +done + +echo "[ARGS] run=${RUN_NUMBER} exp=${EXP_NUMBER} save_norm=${SAVE_NORMALIZED} max_events=${MAX_EVENTS} use_normalized=${USE_NORMALIZED}" +echo + +# ==================================== +# NEW STEP 0: Create run folder and cd into it +# ==================================== +RUN_DIR="run_${RUN_NUMBER}" +mkdir -p "${RUN_DIR}" +cd "${RUN_DIR}" + +echo "[STEP 0/3] Created and entered: $(pwd)" +echo + +# ==================================== +# STEP 1: Export XTC Data +# ==================================== +echo "[STEP 1/3] Running export_xtc_normalized_args.py" +set +u +source "${PSCONDA_SH}" +set -u + +# run from parent directory (scripts live one level up) +python ../export_xtc_normalized_args.py "${RUN_NUMBER}" "${EXP_NUMBER}" "${SAVE_NORMALIZED}" "${MAX_EVENTS}" + +echo "[STEP 1/3] Export completed." +echo + +# ==================================== +# STEP 2: Run YOLO Inference +# ==================================== +echo "[STEP 2/3] Running inference_coyote_xtc.py" + +# Determine which image directory to use (now relative to run_${RUN_NUMBER}/) +if [ "${USE_NORMALIZED}" = "1" ]; then + IMAGE_DIR="run_${RUN_NUMBER}_png_norm" +else + IMAGE_DIR="run_${RUN_NUMBER}_png" +fi + +"${YOLO_PYTHON}" ../inference_coyote_xtc.py "${IMAGE_DIR}" + +echo "[STEP 2/3] Inference completed." +echo + +# ==================================== +# STEP 3: Merge Results +# ==================================== +echo "[STEP 3/3] Running merge_crystals_data.py" +"${YOLO_PYTHON}" ../merge_crystals_data.py "${RUN_NUMBER}" + +echo "[STEP 3/3] Merging completed." +echo + +echo "[SLURM] Complete workflow finished successfully." +echo "Results saved to: ${RUN_DIR}/results_csv/" diff --git a/scripts/coyote_protector_xtc_gui_ready_test/run_export_xtc.sh b/scripts/coyote_protector_xtc_gui_ready_test/run_export_xtc.sh new file mode 100644 index 0000000..ed26091 --- /dev/null +++ b/scripts/coyote_protector_xtc_gui_ready_test/run_export_xtc.sh @@ -0,0 +1,63 @@ +#!/bin/bash +#SBATCH --partition=turing +#SBATCH --account=lcls:prjlumine22 +#SBATCH --job-name=export_xtc +#SBATCH --nodes=1 +#SBATCH --ntasks=1 +#SBATCH --cpus-per-task=4 +#SBATCH --time=02:00:00 +#SBATCH --output=logs_export/export_turing_%j.out +#SBATCH --error=logs_export/export_turing_%j.err + +set -euo pipefail + +export OMP_PROC_BIND=close +export OMP_PLACES=cores +export OMP_NUM_THREADS="${SLURM_CPUS_PER_TASK}" +export MKL_NUM_THREADS="${SLURM_CPUS_PER_TASK}" + +echo "[SLURM] Host: $(hostname)" +echo "[SLURM] Starting job..." + +PSCONDA_SH="/sdf/group/lcls/ds/ana/sw/conda2/manage/bin/psconda.sh" + +# ------- DEFAULT INPUTS --------- +RUN_NUMBER=61 +EXP_NUMBER="mfx101346325" +SAVE_NORMALIZED=1 +MAX_EVENTS=10000 +# -------------------------------- + +# ------- PARSE key=value arguments --------- +for arg in "$@"; do + case $arg in + RUN_NUMBER=*) + RUN_NUMBER="${arg#*=}" + ;; + EXP_NUMBER=*) + EXP_NUMBER="${arg#*=}" + ;; + SAVE_NORMALIZED=*) + SAVE_NORMALIZED="${arg#*=}" + ;; + MAX_EVENTS=*) + MAX_EVENTS="${arg#*=}" + ;; + *) + echo "[WARN] Unknown argument: $arg" + ;; + esac +done + +echo "[ARGS] run=${RUN_NUMBER} exp=${EXP_NUMBER} save_norm=${SAVE_NORMALIZED} max_events=${MAX_EVENTS}" +echo + +echo "[STEP 1/1] Running export_xtc_normalized_args.py" +set +u +source "${PSCONDA_SH}" +set -u + +python ./export_xtc_normalized_args.py "${RUN_NUMBER}" "${EXP_NUMBER}" "${SAVE_NORMALIZED}" "${MAX_EVENTS}" + +echo +echo "[SLURM] Export completed successfully." diff --git a/scripts/coyote_protector_xtc_gui_ready_test/run_inference_xtc.sh b/scripts/coyote_protector_xtc_gui_ready_test/run_inference_xtc.sh new file mode 100644 index 0000000..16e5c8e --- /dev/null +++ b/scripts/coyote_protector_xtc_gui_ready_test/run_inference_xtc.sh @@ -0,0 +1,65 @@ +#!/bin/bash +#SBATCH --partition=turing +#SBATCH --account=lcls:prjlumine22 +#SBATCH --job-name=inference_xtc +#SBATCH --nodes=1 +#SBATCH --ntasks=1 +#SBATCH --cpus-per-task=4 +#SBATCH --gpus=1 +#SBATCH --time=02:00:00 +#SBATCH --output=logs_export/inference_turing_%j.out +#SBATCH --error=logs_export/inference_turing_%j.err + +set -euo pipefail + +export OMP_PROC_BIND=close +export OMP_PLACES=cores +export OMP_NUM_THREADS="${SLURM_CPUS_PER_TASK}" +export MKL_NUM_THREADS="${SLURM_CPUS_PER_TASK}" +export CUDA_DEVICE_ORDER=PCI_BUS_ID + +echo "[SLURM] Host: $(hostname)" +echo "[SLURM] CUDA_VISIBLE_DEVICES: ${CUDA_VISIBLE_DEVICES:-"(not set)"}" +echo "[SLURM] Starting job..." + +YOLO_PYTHON="/sdf/data/lcls/ds/prj/prjlumine22/results/coyote_protector/miniconda3_coyote/envs/env_coyote/bin/python" + +# ------- DEFAULT INPUTS --------- +RUN_NUMBER=61 +USE_NORMALIZED=1 +# -------------------------------- + +# ------- PARSE key=value arguments --------- +for arg in "$@"; do + case $arg in + RUN_NUMBER=*) + RUN_NUMBER="${arg#*=}" + ;; + USE_NORMALIZED=*) + USE_NORMALIZED="${arg#*=}" + ;; + *) + echo "[WARN] Unknown argument: $arg" + ;; + esac +done + +echo "[ARGS] run=${RUN_NUMBER} use_normalized=${USE_NORMALIZED}" +echo + +# Determine which image directory to use +if [ "${USE_NORMALIZED}" = "1" ]; then + IMAGE_DIR="run_${RUN_NUMBER}/run_${RUN_NUMBER}_png_norm" +else + IMAGE_DIR="run_${RUN_NUMBER}/run_${RUN_NUMBER}_png" +fi + +echo "[STEP 1/2] Running inference_coyote_xtc.py" +"${YOLO_PYTHON}" ./inference_coyote_xtc.py "${IMAGE_DIR}" + +echo +echo "[STEP 2/2] Running merge_crystals_data.py" +"${YOLO_PYTHON}" ./merge_crystals_data.py "${RUN_NUMBER}" + +echo +echo "[SLURM] Inference and merging completed successfully."