Skip to content

Commit 0c2da3c

Browse files
committed
Automated tutorials push
1 parent fed6e56 commit 0c2da3c

File tree

196 files changed

+13556
-12892
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

196 files changed

+13556
-12892
lines changed

_downloads/13cdb386a4b0dc48c626f32e6cf8681d/amp_recipe.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -261,7 +261,7 @@
261261
"# The same ``GradScaler`` instance should be used for the entire convergence run.\n",
262262
"# If you perform multiple convergence runs in the same script, each run should use\n",
263263
"# a dedicated fresh ``GradScaler`` instance. ``GradScaler`` instances are lightweight.\n",
264-
"scaler = torch.cuda.amp.GradScaler()\n",
264+
"scaler = torch.amp.GradScaler(\"cuda\")\n",
265265
"\n",
266266
"for epoch in range(0): # 0 epochs, this section is for illustration only\n",
267267
" for input, target in zip(data, targets):\n",
@@ -308,7 +308,7 @@
308308
"\n",
309309
"net = make_model(in_size, out_size, num_layers)\n",
310310
"opt = torch.optim.SGD(net.parameters(), lr=0.001)\n",
311-
"scaler = torch.cuda.amp.GradScaler(enabled=use_amp)\n",
311+
"scaler = torch.amp.GradScaler(\"cuda\" ,enabled=use_amp)\n",
312312
"\n",
313313
"start_timer()\n",
314314
"for epoch in range(epochs):\n",

_downloads/cadb3a57e7a6d7c149b5ae377caf36a8/amp_recipe.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ def make_model(in_size, out_size, num_layers):
150150
# The same ``GradScaler`` instance should be used for the entire convergence run.
151151
# If you perform multiple convergence runs in the same script, each run should use
152152
# a dedicated fresh ``GradScaler`` instance. ``GradScaler`` instances are lightweight.
153-
scaler = torch.cuda.amp.GradScaler()
153+
scaler = torch.amp.GradScaler("cuda")
154154

155155
for epoch in range(0): # 0 epochs, this section is for illustration only
156156
for input, target in zip(data, targets):
@@ -182,7 +182,7 @@ def make_model(in_size, out_size, num_layers):
182182

183183
net = make_model(in_size, out_size, num_layers)
184184
opt = torch.optim.SGD(net.parameters(), lr=0.001)
185-
scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
185+
scaler = torch.amp.GradScaler("cuda" ,enabled=use_amp)
186186

187187
start_timer()
188188
for epoch in range(epochs):
-2 Bytes
Loading
168 Bytes
Loading

_images/sphx_glr_coding_ddpg_001.png

2.52 KB
Loading
-2.84 KB
Loading
74 Bytes
Loading
-6 Bytes
Loading
167 Bytes
Loading
694 Bytes
Loading

0 commit comments

Comments
 (0)