We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 190c66b commit 8b3e4b2Copy full SHA for 8b3e4b2
ch05/05_bonus_hparam_tuning/hparam_search.py
@@ -64,8 +64,7 @@ def evaluate_model(model, train_loader, val_loader, device, eval_iter):
64
65
66
def train_model(model, train_loader, val_loader, optimizer, device,
67
- n_epochs, eval_freq, eval_iter,
68
- encoded_start_context, tokenizer, warmup_iters=10,
+ n_epochs, eval_iter, warmup_iters=10,
69
initial_lr=3e-05, min_lr=1e-6):
70
global_step = 0
71
@@ -192,9 +191,7 @@ def train_model(model, train_loader, val_loader, optimizer, device,
192
191
train_loss, val_loss = train_model(
193
model, train_loader, val_loader, optimizer, device,
194
n_epochs=HPARAM_CONFIG["n_epochs"],
195
- eval_freq=5, eval_iter=1,
196
- encoded_start_context=encoded_tensor,
197
- tokenizer=tokenizer,
+ eval_iter=1,
198
warmup_iters=HPARAM_CONFIG["warmup_iters"],
199
initial_lr=HPARAM_CONFIG["initial_lr"],
200
min_lr=HPARAM_CONFIG["min_lr"]
0 commit comments