Skip to content
Open
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
139 changes: 139 additions & 0 deletions .github/scripts/monitor_slurm_job.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
#!/bin/bash
# Monitor a SLURM job and stream its output in real-time
# Usage: monitor_slurm_job.sh <job_id> <output_file>

set -e

if [ $# -ne 2 ]; then
echo "Usage: $0 <job_id> <output_file>"
exit 1
fi

job_id="$1"
output_file="$2"

echo "Submitted batch job $job_id"
echo "Monitoring output file: $output_file"

# Wait for file to appear with retry logic for transient squeue failures
echo "Waiting for job to start..."
squeue_retries=0
max_squeue_retries=5
while [ ! -f "$output_file" ]; do
# Check if job is still queued/running
if squeue -j "$job_id" &>/dev/null; then
squeue_retries=0 # Reset on success
sleep 5
else
squeue_retries=$((squeue_retries + 1))
if [ $squeue_retries -ge $max_squeue_retries ]; then
# Job not in queue and output file doesn't exist
if [ ! -f "$output_file" ]; then
echo "ERROR: Job $job_id not in queue and output file not created"
exit 1
fi
break
fi
# Exponential backoff
sleep_time=$((2 ** squeue_retries))
echo "Warning: squeue check failed, retrying in ${sleep_time}s..."
sleep $sleep_time
fi
done

echo "=== Streaming output for job $job_id ==="
# Stream output while job runs
tail -f "$output_file" &
tail_pid=$!

# Wait for job to complete with retry logic for transient squeue failures
squeue_failures=0
while true; do
if squeue -j "$job_id" &>/dev/null; then
squeue_failures=0
else
squeue_failures=$((squeue_failures + 1))
# Check if job actually completed using sacct (if available)
if [ $squeue_failures -ge 3 ]; then
if command -v sacct >/dev/null 2>&1; then
state=$(sacct -j "$job_id" --format=State --noheader 2>/dev/null | head -n1 | awk '{print $1}')
# Consider job done only if it reached a terminal state
case "$state" in
COMPLETED|FAILED|CANCELLED|TIMEOUT|OUT_OF_MEMORY)
break
;;
*)
# treat as transient failure, reset failures and continue polling
squeue_failures=0
;;
esac
else
# No sacct: avoid false positive by doing an extra check cycle
squeue_failures=2
fi
fi
fi
sleep 5
done

# Wait for output file to finish growing (stabilize) before stopping tail
if [ -f "$output_file" ]; then
last_size=-1
same_count=0
while true; do
size=$(stat -c%s "$output_file" 2>/dev/null || echo -1)
if [ "$size" -eq "$last_size" ] && [ "$size" -ge 0 ]; then
same_count=$((same_count + 1))
else
same_count=0
last_size=$size
fi
# two consecutive stable checks (~10s) implies file likely flushed
if [ $same_count -ge 2 ]; then
break
fi
sleep 5
done
fi

# Stop tailing
kill $tail_pid 2>/dev/null || true

echo ""
echo "=== Final output ==="
cat "$output_file"

# Check exit status with sacct fallback
exit_code=""

# Try scontrol first (works for recent jobs)
scontrol_output=$(scontrol show job "$job_id" 2>/dev/null || echo "")
if [ -n "$scontrol_output" ]; then
exit_code=$(echo "$scontrol_output" | grep -oE 'ExitCode=[0-9]+:[0-9]+' | cut -d= -f2 || echo "")
fi

# If scontrol failed or returned invalid job, try sacct (for completed/aged-out jobs)
if [ -z "$exit_code" ]; then
echo "Warning: scontrol failed to get exit code, trying sacct..."
sacct_output=$(sacct -j "$job_id" --format=ExitCode --noheader --parsable2 2>/dev/null | head -n1 || echo "")
if [ -n "$sacct_output" ]; then
exit_code="$sacct_output"
fi
fi

# If we still can't determine exit code, fail explicitly
if [ -z "$exit_code" ]; then
echo "ERROR: Unable to determine exit status for job $job_id"
echo "Both scontrol and sacct failed to return valid exit code"
exit 1
fi

# Check if job succeeded
if [ "$exit_code" != "0:0" ]; then
echo "ERROR: Job $job_id failed with exit code $exit_code"
exit 1
fi

echo "Job $job_id completed successfully"
exit 0

59 changes: 54 additions & 5 deletions .github/workflows/bench.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ jobs:
filters: ".github/file-filter.yml"

self:
name: "${{ matrix.name }} (${{ matrix.device }})"
name: "${{ matrix.name }} (${{ matrix.device }}${{ matrix.interface != 'none' && format('-{0}', matrix.interface) || '' }})"
if: ${{ github.repository=='MFlowCode/MFC' && needs.file-changes.outputs.checkall=='true' && ((github.event_name=='pull_request_review' && github.event.review.state=='approved') || (github.event_name=='pull_request' && (github.event.pull_request.user.login=='sbryngelson' || github.event.pull_request.user.login=='wilfonba'))) }}
needs: file-changes
strategy:
Expand Down Expand Up @@ -73,7 +73,7 @@ jobs:
runs-on:
group: ${{ matrix.group }}
labels: ${{ matrix.labels }}
timeout-minutes: 1400
timeout-minutes: 480
env:
ACTIONS_RUNNER_FORCE_ACTIONS_NODE_VERSION: node16
ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true
Expand All @@ -99,9 +99,58 @@ jobs:

- name: Bench (Master v. PR)
run: |
(cd pr && bash .github/workflows/${{ matrix.cluster }}/submit-bench.sh .github/workflows/${{ matrix.cluster }}/bench.sh ${{ matrix.device }} ${{ matrix.interface }}) &
(cd master && bash .github/workflows/${{ matrix.cluster }}/submit-bench.sh .github/workflows/${{ matrix.cluster }}/bench.sh ${{ matrix.device }} ${{ matrix.interface }}) &
wait %1 && wait %2
set -e

# Function to submit and monitor using extracted script
submit_and_monitor() {
local dir=$1
local device=$2
local interface=$3
local cluster=$4

cd "$dir"

# Submit job
submit_output=$(bash .github/workflows/$cluster/submit-bench.sh \
.github/workflows/$cluster/bench.sh $device $interface 2>&1)

job_id=$(echo "$submit_output" | sed -n 's/.*Submitted batch job \([0-9][0-9]*\).*/\1/p')
job_slug="bench-$device-$interface"
output_file="${job_slug}.out"

if [ -z "$job_id" ]; then
echo "ERROR: Failed to submit job"
echo "$submit_output"
return 1
fi

# Use the monitoring script
bash .github/scripts/monitor_slurm_job.sh "$job_id" "$output_file"
}

# Run both jobs with monitoring
(submit_and_monitor pr ${{ matrix.device }} ${{ matrix.interface }} ${{ matrix.cluster }}) &
pr_pid=$!

(submit_and_monitor master ${{ matrix.device }} ${{ matrix.interface }} ${{ matrix.cluster }}) &
master_pid=$!

# Wait and capture exit codes reliably
pr_exit=0
master_exit=0

if ! wait "$pr_pid"; then
pr_exit=$?
fi
if ! wait "$master_pid"; then
master_exit=$?
fi

# Explicitly check and quote to avoid test errors
if [ "${pr_exit}" -ne 0 ] || [ "${master_exit}" -ne 0 ]; then
echo "One or both benchmark jobs failed: pr_exit=${pr_exit}, master_exit=${master_exit}"
exit 1
fi

- name: Generate & Post Comment
run: |
Expand Down
35 changes: 26 additions & 9 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -93,23 +93,40 @@ jobs:
OPT2: ${{ matrix.debug == 'debug' && '-% 20' || '' }}

self:
name: Self Hosted
name: "${{ matrix.cluster_name }} (${{ matrix.device }}${{ matrix.interface != 'none' && format('-{0}', matrix.interface) || '' }})"
if: github.repository == 'MFlowCode/MFC' && needs.file-changes.outputs.checkall == 'true'
needs: file-changes
continue-on-error: false
timeout-minutes: 1400
timeout-minutes: 480
strategy:
matrix:
device: ['gpu']
interface: ['acc', 'omp']
lbl: ['gt', 'frontier']
include:
- device: 'cpu'
# Phoenix (GT)
- lbl: 'gt'
cluster_name: 'Georgia Tech | Phoenix'
device: 'gpu'
interface: 'acc'
- lbl: 'gt'
cluster_name: 'Georgia Tech | Phoenix'
device: 'gpu'
interface: 'omp'
- lbl: 'gt'
cluster_name: 'Georgia Tech | Phoenix'
device: 'cpu'
interface: 'none'
lbl: 'gt'
- device: 'cpu'
# Frontier (ORNL)
- lbl: 'frontier'
cluster_name: 'Oak Ridge | Frontier'
device: 'gpu'
interface: 'acc'
- lbl: 'frontier'
cluster_name: 'Oak Ridge | Frontier'
device: 'gpu'
interface: 'omp'
- lbl: 'frontier'
cluster_name: 'Oak Ridge | Frontier'
device: 'cpu'
interface: 'none'
lbl: 'frontier'
runs-on:
group: phoenix
labels: ${{ matrix.lbl }}
Expand Down
Loading
Loading