conda env create -f env.yaml
conda activate avt# SWAG-SP* on Cholec80 (30-min horizon)
python train_net.py dataset_name=cholec80 model_name=supra eval_horizons=[30]
# SWAG-SP* on AutoLaparo21
python train_net.py dataset_name=autolaparo21 model_name=supra eval_horizons=[30]tensorboard --logdir OUTPUTS/expts/| Model | Command | Description |
|---|---|---|
| SWAG-SP* | model_name=supra conditional_probs_embeddings=true |
Single-pass with prior knowledge (best) |
| SWAG-SP | model_name=supra conditional_probs_embeddings=false |
Single-pass without priors |
| SWAG-AR | model_name=lstm decoder_type=autoregressive |
Autoregressive decoder |
| R2C | model_name=supra decoder_anticipation=regression |
Regression-to-Classification |
dataset_name: cholec80 # or autolaparo21
split_idx: 1 # 1-4 for k-fold CV
train_start: 1 # Starting video index
train_end: 10 # Ending video index
test_start: 15 # Test split start
test_end: 21 # Test split endmodel_name: supra # Model architecture
conditional_probs_embeddings: true # Enable prior knowledge
pooling_dim: 32 # Feature pooling dimensionnum_epochs: 40 # Training epochs
eval_horizons: [30] # Anticipation horizons (minutes)
batch_size: 4 # Batch size
learning_rate: 0.0001 # Learning ratetest_only: false # Set true for evaluation only
finetune_ckpt: best # Checkpoint to load (best/latest)
main_metric: acc_curr_future # Primary metric for model selectionpython train_net.py dataset_name=cholec80python train_net.py dataset_name=cholec80 num_epochs=50 batch_size=8python train_net.py eval_horizons=[15,30,60]python train_net.py model.hidden_dim=512 opt.lr=0.0001| Directory | Purpose |
|---|---|
conf/ |
Configuration files (Hydra) |
src/models/ |
Model architectures |
src/datasets/ |
Dataset loaders |
src/func/ |
Training/evaluation code |
src/loss_fn/ |
Loss functions |
src/common/ |
Utilities and helpers |
scripts/ |
Execution scripts |
experiments/configs/ |
Experiment config files (formerly expts/) |
experiments/ |
Results tracking |
docs/ |
Documentation and assets |
baselines/ |
Baseline implementations (R2A2, Informer) |
OUTPUTS/ |
Training outputs (auto-generated) |
Format: {model}_{dataset}_{description}.txt
Examples:
skit_al21_base_g12_nemb_rt18_r2c.txtlstm_c80_ct144_at60_ls442_eosw_loc24.txt
Format: {config_file_name}/local/
- F1: Frame-wise F1 score
- SegF1: Segment-wise F1 score (IoU ≥ 0.5)
- Accuracy: Frame-wise accuracy
- wMAE: Weighted Mean Absolute Error
- inMAE: In-phase Mean Absolute Error
- outMAE: Out-of-phase Mean Absolute Error
# Create config file: experiments/configs/my_sweep.txt
python scripts/launch.py -c experiments/configs/my_sweep.txt -l# TensorBoard
tensorboard --logdir OUTPUTS/expts/{experiment_name}/local/logs/
# Check outputs
ls OUTPUTS/expts/{experiment_name}/local/python train_net.py \
dataset_name=cholec80 \
finetune_ckpt=latest # or path to specific checkpoint- Reduce
num_epochsfor quick tests - Use smaller
eval_horizons(e.g.,[15]instead of[30]) - Increase
batch_sizeif GPU memory allows - Use
save_video_labels_to_npy=trueto cache labels
# Run single epoch to test pipeline
python train_net.py num_epochs=1 test_only=false
# Use debug flag with launcher
python scripts/launch.py -c expts/test.txt -l -g- Outputs are stored in
OUTPUTS/(gitignored) - Delete old experiment outputs:
rm -rf OUTPUTS/expts/old_experiment/ - Keep only best checkpoints, remove intermediate ones
- Check configuration:
python train_net.py --cfg job - View available config groups:
python train_net.py --help - See Hydra docs: https://hydra.cc/
- Refer to paper for methodology details
- Check
CLEANUP_RECOMMENDATIONS.mdfor codebase maintenance
@article{boels2025swag,
title={SWAG: long-term surgical workflow prediction with generative-based anticipation},
author={Boels, Maxence and Liu, Yang and Dasgupta, Prokar and Granados, Alejandro and Ourselin, Sebastien},
journal={International Journal of Computer Assisted Radiology and Surgery},
year={2025},
publisher={Springer}
}