-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathlaunch.sh
More file actions
199 lines (177 loc) · 6.19 KB
/
launch.sh
File metadata and controls
199 lines (177 loc) · 6.19 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
#!/bin/sh
#SBATCH -J vqvae
#SBATCH --gres=gpu:1
#SBATCH --partition=besteffort
#SBATCH --nodes=1
#SBATCH -t 7-0
#SBATCH --output=out%j.out # nom du fichier de sortie
#SBATCH --error=out%j.err # nom du fichier d'erreur (ici commun avec la sortie)
set -x
nvidia-smi
# --- Top-level Configuration ---
# DATASET="fashion_mnist"
# DATASET="mnist"
DATASET="cifar10"
BATCH_SIZE=64
NUM_WORKERS=4
SAVE_EVERY=25
# --- Dataset-Specific Hyperparameters ---
if [ "$DATASET" = "mnist" ]; then
# VQ-VAE settings for MNIST
EMBEDDING_DIM=64
NUM_EMBEDDINGS=256
COMMITMENT_COST=1.0
HIDDEN_DIMS=128
VQVAE_MAX_EPOCHS=250
VQVAE_LR="1e-3"
# Prior settings for MNIST
PRIOR_MAX_EPOCHS=500
PRIOR_LR="1e-4"
# Note: Prior's internal embedding dim is VQ-VAE's EMBEDDING_DIM
GPT_EMDEDDING_DIM=64
N_HEADS=8
N_LAYERS=6
elif [ "$DATASET" = "fashion_mnist" ]; then
# VQ-VAE settings for Fashion-MNIST
EMBEDDING_DIM=128
NUM_EMBEDDINGS=256
COMMITMENT_COST=1.0
HIDDEN_DIMS=128
VQVAE_MAX_EPOCHS=500
VQVAE_LR="1e-4"
# Prior settings for Fashion-MNIST
PRIOR_MAX_EPOCHS=800
PRIOR_LR="1e-4"
# Note: Prior's internal embedding dim is VQ-VAE's EMBEDDING_DIM
GPT_EMDEDDING_DIM=128
N_HEADS=8
N_LAYERS=8
elif [ "$DATASET" = "cifar10" ]; then
# VQ-VAE settings for CIFAR-10
EMBEDDING_DIM=256
NUM_EMBEDDINGS=512
COMMITMENT_COST=1.0
HIDDEN_DIMS=256
VQVAE_MAX_EPOCHS=1000
VQVAE_LR="1e-4"
# Prior settings for CIFAR-10
PRIOR_MAX_EPOCHS=1200
PRIOR_LR="1e-4"
# Note: Prior's internal embedding dim is VQ-VAE's EMBEDDING_DIM
GPT_EMDEDDING_DIM=512
N_HEADS=16
N_LAYERS=8
else
echo "ERROR: Unknown DATASET: ${DATASET}"
exit 1
fi
VQVAE_SAVE_BASE_DIR="vqvae_outputs4"
PRIOR_SAVE_BASE_DIR="prior_outputs4"
UNCOND_PRIOR_SAVE_BASE_DIR="uncond_prior_outputs4"
# --- Echo all configured variables ---
echo "--- Configuration Variables ---"
echo "DATASET: ${DATASET}"
echo "BATCH_SIZE: ${BATCH_SIZE}"
echo "NUM_WORKERS: ${NUM_WORKERS}"
echo "SAVE_EVERY (epochs): ${SAVE_EVERY}"
echo ""
echo "--- VQ-VAE Specific (used for VQ-VAE training AND Prior's VQ-VAE instantiation) ---"
echo "EMBEDDING_DIM (VQ-VAE & Prior internal): ${EMBEDDING_DIM}"
echo "NUM_EMBEDDINGS (VQ-VAE & Prior vocab): ${NUM_EMBEDDINGS}"
echo "COMMITMENT_COST (VQ-VAE): ${COMMITMENT_COST}"
echo "HIDDEN_DIMS (VQ-VAE): ${HIDDEN_DIMS}"
echo "VQVAE_MAX_EPOCHS: ${VQVAE_MAX_EPOCHS}"
echo "VQVAE_LR: ${VQVAE_LR}"
echo ""
echo "--- Prior Specific (GPT structure) ---"
echo "PRIOR_MAX_EPOCHS: ${PRIOR_MAX_EPOCHS}"
echo "PRIOR_LR: ${PRIOR_LR}"
echo "N_HEADS (Prior): ${N_HEADS}"
echo "N_LAYERS (Prior): ${N_LAYERS}"
echo ""
echo "--- Paths ---"
echo "EXPERIMENT_BASE_DIR: ${EXPERIMENT_BASE_DIR}"
echo "VQVAE_SAVE_BASE_DIR (for train_vqvae.py --save-plots-dir): ${VQVAE_SAVE_BASE_DIR}"
echo "VQVAE_MODEL_LOAD_DIR (for train_conditioned_prior.py --trained-vqvae-path, expected to contain best_vqvae.pt): ${VQVAE_MODEL_LOAD_DIR}"
echo "PRIOR_SAVE_BASE_DIR (for train_conditioned_prior.py --save-plots-dir): ${PRIOR_SAVE_BASE_DIR}"
echo "--------------------------------------"
echo ""
# --- Train VQ-VAE ---
echo "Starting VQ-VAE training..."
python train_vqvae.py \
--dataset "${DATASET}" \
--batch-size ${BATCH_SIZE} \
--num-workers ${NUM_WORKERS} \
--embedding-dim ${EMBEDDING_DIM} \
--num-embeddings ${NUM_EMBEDDINGS} \
--commitment-cost ${COMMITMENT_COST} \
--hidden-dims ${HIDDEN_DIMS} \
--save-plots-dir "${VQVAE_SAVE_BASE_DIR}" \
--max-epochs ${VQVAE_MAX_EPOCHS} \
--lr "${VQVAE_LR}" \
--save-every ${SAVE_EVERY}
echo "VQ-VAE training finished."
# --- Train Prior ---
echo "Starting Cond Prior training..."
python train_conditioned_prior.py \
--dataset "${DATASET}" \
--batch-size ${BATCH_SIZE} \
--num-workers ${NUM_WORKERS} \
--embedding-dim ${EMBEDDING_DIM} \
--num-embeddings ${NUM_EMBEDDINGS} \
--commitment-cost ${COMMITMENT_COST} \
--hidden-dims ${HIDDEN_DIMS} \
--save-plots-dir "${PRIOR_SAVE_BASE_DIR}" \
--max-epochs "${PRIOR_MAX_EPOCHS}" \
--lr "${PRIOR_LR}" \
--save-every ${SAVE_EVERY} \
--n-heads ${N_HEADS} \
--n-layers ${N_LAYERS} \
--gpt-embedding-dim ${GPT_EMDEDDING_DIM} \
--trained-vqvae-path "${VQVAE_SAVE_BASE_DIR}"
echo "Cond Prior training finished."
echo "Starting Prior training..."
python train_prior.py \
--dataset "${DATASET}" \
--batch-size ${BATCH_SIZE} \
--num-workers ${NUM_WORKERS} \
--embedding-dim ${EMBEDDING_DIM} \
--num-embeddings ${NUM_EMBEDDINGS} \
--commitment-cost ${COMMITMENT_COST} \
--hidden-dims ${HIDDEN_DIMS} \
--save-plots-dir "${UNCOND_PRIOR_SAVE_BASE_DIR}" \
--max-epochs ${PRIOR_MAX_EPOCHS} \
--lr "${PRIOR_LR}" \
--save-every ${SAVE_EVERY} \
--n-heads ${N_HEADS} \
--n-layers ${N_LAYERS} \
--gpt-embedding-dim ${GPT_EMDEDDING_DIM} \
--trained-vqvae-path "${VQVAE_SAVE_BASE_DIR}"
echo "Prior training finished."
# echo "Generating samples from unconditioned prior model"
# python generate_examples.py \
# --dataset "${DATASET}" \
# --batch-size ${BATCH_SIZE} \
# --num-workers ${NUM_WORKERS} \
# --embedding-dim ${EMBEDDING_DIM} \
# --num-embeddings ${NUM_EMBEDDINGS} \
# --hidden-dims ${HIDDEN_DIMS} \
# --n-heads ${N_HEADS} \
# --n-layers ${N_LAYERS} \
# --gpt-embedding-dim ${GPT_EMDEDDING_DIM} \
# --trained-vqvae-path "${DATASET}/${VQVAE_SAVE_BASE_DIR}" \
# --trained-prior-path "${DATASET}/${UNCOND_PRIOR_SAVE_BASE_DIR}"
# echo "Generating samples from conditioned prior model"
# python generate_examples.py \
# --conditioned \
# --dataset "${DATASET}" \
# --batch-size ${BATCH_SIZE} \
# --num-workers ${NUM_WORKERS} \
# --embedding-dim ${EMBEDDING_DIM} \
# --num-embeddings ${NUM_EMBEDDINGS} \
# --hidden-dims ${HIDDEN_DIMS} \
# --n-heads ${N_HEADS} \
# --n-layers ${N_LAYERS} \
# --gpt-embedding-dim ${GPT_EMDEDDING_DIM} \
# --trained-vqvae-path "${DATASET}/${VQVAE_SAVE_BASE_DIR}" \
# --trained-prior-path "${DATASET}/${PRIOR_SAVE_BASE_DIR}" \