Skip to content

Commit 360f818

Browse files
committed
Automated tutorials push
1 parent b4a197e commit 360f818

File tree

196 files changed

+9548
-8840
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

196 files changed

+9548
-8840
lines changed

_downloads/19879e6777280194639314bd79851483/knowledge_distillation_tutorial.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,10 @@
3737
import torchvision.transforms as transforms
3838
import torchvision.datasets as datasets
3939

40-
# Check if GPU is available, and if not, use the CPU
41-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
40+
# Check if the current `accelerator <https://pytorch.org/docs/stable/torch.html#accelerators>`__
41+
# is available, and if not, use the CPU
42+
device = torch.accelerator.current_accelerator().type if torch.accelerator.is_available() else "cpu"
43+
print(f"Using {device} device")
4244

4345
######################################################################
4446
# Loading CIFAR-10

_downloads/44a84f8c1764dbf61662d306ff9ed43a/chatbot_tutorial.ipynb

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -136,8 +136,10 @@
136136
"import json\n",
137137
"\n",
138138
"\n",
139-
"USE_CUDA = torch.cuda.is_available()\n",
140-
"device = torch.device(\"cuda\" if USE_CUDA else \"cpu\")"
139+
"# If the current `accelerator <https://pytorch.org/docs/stable/torch.html#accelerators>`__ is available,\n",
140+
"# we will use it. Otherwise, we use the CPU.\n",
141+
"device = torch.accelerator.current_accelerator().type if torch.accelerator.is_available() else \"cpu\"\n",
142+
"print(f\"Using {device} device\")"
141143
]
142144
},
143145
{
@@ -1531,16 +1533,16 @@
15311533
" encoder_optimizer.load_state_dict(encoder_optimizer_sd)\n",
15321534
" decoder_optimizer.load_state_dict(decoder_optimizer_sd)\n",
15331535
"\n",
1534-
"# If you have CUDA, configure CUDA to call\n",
1536+
"# If you have an accelerator, configure it to call\n",
15351537
"for state in encoder_optimizer.state.values():\n",
15361538
" for k, v in state.items():\n",
15371539
" if isinstance(v, torch.Tensor):\n",
1538-
" state[k] = v.cuda()\n",
1540+
" state[k] = v.to(device)\n",
15391541
"\n",
15401542
"for state in decoder_optimizer.state.values():\n",
15411543
" for k, v in state.items():\n",
15421544
" if isinstance(v, torch.Tensor):\n",
1543-
" state[k] = v.cuda()\n",
1545+
" state[k] = v.to(device)\n",
15441546
"\n",
15451547
"# Run training iterations\n",
15461548
"print(\"Starting Training!\")\n",

_downloads/63a0f0fc7b3ffb15d3a5ac8db3d521ee/tensors_deeper_tutorial.ipynb

Lines changed: 33 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -857,23 +857,23 @@
857857
"does this *without* changing `a` - you can see that when we print `a`\n",
858858
"again at the end, it retains its `requires_grad=True` property.\n",
859859
"\n",
860-
"Moving to GPU\n",
861-
"=============\n",
860+
"Moving to\n",
861+
"[Accelerator](https://pytorch.org/docs/stable/torch.html#accelerators)\n",
862+
"\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\--\n",
862863
"\n",
863-
"One of the major advantages of PyTorch is its robust acceleration on\n",
864-
"CUDA-compatible Nvidia GPUs. (\"CUDA\" stands for *Compute Unified Device\n",
865-
"Architecture*, which is Nvidia's platform for parallel computing.) So\n",
866-
"far, everything we've done has been on CPU. How do we move to the faster\n",
867-
"hardware?\n",
864+
"One of the major advantages of PyTorch is its robust acceleration on an\n",
865+
"[accelerator](https://pytorch.org/docs/stable/torch.html#accelerators)\n",
866+
"such as CUDA, MPS, MTIA, or XPU. So far, everything we've done has been\n",
867+
"on CPU. How do we move to the faster hardware?\n",
868868
"\n",
869-
"First, we should check whether a GPU is available, with the\n",
869+
"First, we should check whether an accelerator is available, with the\n",
870870
"`is_available()` method.\n",
871871
"\n",
872872
"<div style=\"background-color: #54c7ec; color: #fff; font-weight: 700; padding-left: 10px; padding-top: 5px; padding-bottom: 5px\"><strong>NOTE:</strong></div>\n",
873873
"\n",
874874
"<div style=\"background-color: #f3f4f7; padding-left: 10px; padding-top: 10px; padding-bottom: 10px; padding-right: 10px\">\n",
875875
"\n",
876-
"<p>If you do not have a CUDA-compatible GPU and CUDA driversinstalled, the executable cells in this section will not execute anyGPU-related code.</p>\n",
876+
"<p>If you do not have an accelerator, the executable cells in this section will not execute anyaccelerator-related code.</p>\n",
877877
"\n",
878878
"</div>\n",
879879
"\n"
@@ -887,8 +887,8 @@
887887
},
888888
"outputs": [],
889889
"source": [
890-
"if torch.cuda.is_available():\n",
891-
" print('We have a GPU!')\n",
890+
"if torch.accelerator.is_available():\n",
891+
" print('We have an accelerator!')\n",
892892
"else:\n",
893893
" print('Sorry, CPU only.')"
894894
]
@@ -897,13 +897,14 @@
897897
"cell_type": "markdown",
898898
"metadata": {},
899899
"source": [
900-
"Once we've determined that one or more GPUs is available, we need to put\n",
901-
"our data someplace where the GPU can see it. Your CPU does computation\n",
902-
"on data in your computer's RAM. Your GPU has dedicated memory attached\n",
903-
"to it. Whenever you want to perform a computation on a device, you must\n",
904-
"move *all* the data needed for that computation to memory accessible by\n",
905-
"that device. (Colloquially, \"moving the data to memory accessible by the\n",
906-
"GPU\" is shorted to, \"moving the data to the GPU\".)\n",
900+
"Once we've determined that one or more accelerators is available, we\n",
901+
"need to put our data someplace where the accelerator can see it. Your\n",
902+
"CPU does computation on data in your computer's RAM. Your accelerator\n",
903+
"has dedicated memory attached to it. Whenever you want to perform a\n",
904+
"computation on a device, you must move *all* the data needed for that\n",
905+
"computation to memory accessible by that device. (Colloquially, \"moving\n",
906+
"the data to memory accessible by the GPU\" is shorted to, \"moving the\n",
907+
"data to the GPU\".)\n",
907908
"\n",
908909
"There are multiple ways to get your data onto your target device. You\n",
909910
"may do it at creation time:\n"
@@ -917,8 +918,8 @@
917918
},
918919
"outputs": [],
919920
"source": [
920-
"if torch.cuda.is_available():\n",
921-
" gpu_rand = torch.rand(2, 2, device='cuda')\n",
921+
"if torch.accelerator.is_available():\n",
922+
" gpu_rand = torch.rand(2, 2, device=torch.accelerator.current_accelerator())\n",
922923
" print(gpu_rand)\n",
923924
"else:\n",
924925
" print('Sorry, CPU only.')"
@@ -929,19 +930,20 @@
929930
"metadata": {},
930931
"source": [
931932
"By default, new tensors are created on the CPU, so we have to specify\n",
932-
"when we want to create our tensor on the GPU with the optional `device`\n",
933-
"argument. You can see when we print the new tensor, PyTorch informs us\n",
934-
"which device it's on (if it's not on CPU).\n",
933+
"when we want to create our tensor on the accelerator with the optional\n",
934+
"`device` argument. You can see when we print the new tensor, PyTorch\n",
935+
"informs us which device it's on (if it's not on CPU).\n",
935936
"\n",
936-
"You can query the number of GPUs with `torch.cuda.device_count()`. If\n",
937-
"you have more than one GPU, you can specify them by index:\n",
937+
"You can query the number of accelerators with\n",
938+
"`torch.accelerator.device_count()`. If you have more than one\n",
939+
"accelerator, you can specify them by index, take CUDA for example:\n",
938940
"`device='cuda:0'`, `device='cuda:1'`, etc.\n",
939941
"\n",
940942
"As a coding practice, specifying our devices everywhere with string\n",
941943
"constants is pretty fragile. In an ideal world, your code would perform\n",
942-
"robustly whether you're on CPU or GPU hardware. You can do this by\n",
943-
"creating a device handle that can be passed to your tensors instead of a\n",
944-
"string:\n"
944+
"robustly whether you're on CPU or accelerator hardware. You can do this\n",
945+
"by creating a device handle that can be passed to your tensors instead\n",
946+
"of a string:\n"
945947
]
946948
},
947949
{
@@ -952,10 +954,7 @@
952954
},
953955
"outputs": [],
954956
"source": [
955-
"if torch.cuda.is_available():\n",
956-
" my_device = torch.device('cuda')\n",
957-
"else:\n",
958-
" my_device = torch.device('cpu')\n",
957+
"my_device = torch.accelerator.current_accelerator() if torch.accelerator.is_available() else torch.device('cpu')\n",
959958
"print('Device: {}'.format(my_device))\n",
960959
"\n",
961960
"x = torch.rand(2, 2, device=my_device)\n",
@@ -991,11 +990,11 @@
991990
"It is important to know that in order to do computation involving two or\n",
992991
"more tensors, *all of the tensors must be on the same device*. The\n",
993992
"following code will throw a runtime error, regardless of whether you\n",
994-
"have a GPU device available:\n",
993+
"have an accelerator device available, take CUDA for example:\n",
995994
"\n",
996995
"``` {.python}\n",
997996
"x = torch.rand(2, 2)\n",
998-
"y = torch.rand(2, 2, device='gpu')\n",
997+
"y = torch.rand(2, 2, device='cuda')\n",
999998
"z = x + y # exception will be thrown\n",
1000999
"```\n"
10011000
]

_downloads/a19d8941b0ebb13c102e41c7e24bc5fb/knowledge_distillation_tutorial.ipynb

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,8 +73,10 @@
7373
"import torchvision.transforms as transforms\n",
7474
"import torchvision.datasets as datasets\n",
7575
"\n",
76-
"# Check if GPU is available, and if not, use the CPU\n",
77-
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")"
76+
"# Check if the current `accelerator <https://pytorch.org/docs/stable/torch.html#accelerators>`__\n",
77+
"# is available, and if not, use the CPU\n",
78+
"device = torch.accelerator.current_accelerator().type if torch.accelerator.is_available() else \"cpu\"\n",
79+
"print(f\"Using {device} device\")"
7880
]
7981
},
8082
{

_downloads/be017e7b39198fdf668c138fd8d57abe/tensors_deeper_tutorial.py

Lines changed: 22 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -632,34 +632,33 @@
632632
# does this *without* changing ``a`` - you can see that when we print
633633
# ``a`` again at the end, it retains its ``requires_grad=True`` property.
634634
#
635-
# Moving to GPU
635+
# Moving to `Accelerator <https://pytorch.org/docs/stable/torch.html#accelerators>`__
636636
# -------------
637637
#
638-
# One of the major advantages of PyTorch is its robust acceleration on
639-
# CUDA-compatible Nvidia GPUs. (“CUDA” stands for *Compute Unified Device
640-
# Architecture*, which is Nvidia’s platform for parallel computing.) So
641-
# far, everything we’ve done has been on CPU. How do we move to the faster
638+
# One of the major advantages of PyTorch is its robust acceleration on an
639+
# `accelerator <https://pytorch.org/docs/stable/torch.html#accelerators>`__
640+
# such as CUDA, MPS, MTIA, or XPU.
641+
# So far, everything we’ve done has been on CPU. How do we move to the faster
642642
# hardware?
643643
#
644-
# First, we should check whether a GPU is available, with the
644+
# First, we should check whether an accelerator is available, with the
645645
# ``is_available()`` method.
646646
#
647647
# .. note::
648-
# If you do not have a CUDA-compatible GPU and CUDA drivers
649-
# installed, the executable cells in this section will not execute any
650-
# GPU-related code.
648+
# If you do not have an accelerator, the executable cells in this section will not execute any
649+
# accelerator-related code.
651650
#
652651

653-
if torch.cuda.is_available():
654-
print('We have a GPU!')
652+
if torch.accelerator.is_available():
653+
print('We have an accelerator!')
655654
else:
656655
print('Sorry, CPU only.')
657656

658657

659658
##########################################################################
660-
# Once we’ve determined that one or more GPUs is available, we need to put
661-
# our data someplace where the GPU can see it. Your CPU does computation
662-
# on data in your computer’s RAM. Your GPU has dedicated memory attached
659+
# Once we’ve determined that one or more accelerators is available, we need to put
660+
# our data someplace where the accelerator can see it. Your CPU does computation
661+
# on data in your computer’s RAM. Your accelerator has dedicated memory attached
663662
# to it. Whenever you want to perform a computation on a device, you must
664663
# move *all* the data needed for that computation to memory accessible by
665664
# that device. (Colloquially, “moving the data to memory accessible by the
@@ -669,34 +668,31 @@
669668
# may do it at creation time:
670669
#
671670

672-
if torch.cuda.is_available():
673-
gpu_rand = torch.rand(2, 2, device='cuda')
671+
if torch.accelerator.is_available():
672+
gpu_rand = torch.rand(2, 2, device=torch.accelerator.current_accelerator())
674673
print(gpu_rand)
675674
else:
676675
print('Sorry, CPU only.')
677676

678677

679678
##########################################################################
680679
# By default, new tensors are created on the CPU, so we have to specify
681-
# when we want to create our tensor on the GPU with the optional
680+
# when we want to create our tensor on the accelerator with the optional
682681
# ``device`` argument. You can see when we print the new tensor, PyTorch
683682
# informs us which device it’s on (if it’s not on CPU).
684683
#
685-
# You can query the number of GPUs with ``torch.cuda.device_count()``. If
686-
# you have more than one GPU, you can specify them by index:
684+
# You can query the number of accelerators with ``torch.accelerator.device_count()``. If
685+
# you have more than one accelerator, you can specify them by index, take CUDA for example:
687686
# ``device='cuda:0'``, ``device='cuda:1'``, etc.
688687
#
689688
# As a coding practice, specifying our devices everywhere with string
690689
# constants is pretty fragile. In an ideal world, your code would perform
691-
# robustly whether you’re on CPU or GPU hardware. You can do this by
690+
# robustly whether you’re on CPU or accelerator hardware. You can do this by
692691
# creating a device handle that can be passed to your tensors instead of a
693692
# string:
694693
#
695694

696-
if torch.cuda.is_available():
697-
my_device = torch.device('cuda')
698-
else:
699-
my_device = torch.device('cpu')
695+
my_device = torch.accelerator.current_accelerator() if torch.accelerator.is_available() else torch.device('cpu')
700696
print('Device: {}'.format(my_device))
701697

702698
x = torch.rand(2, 2, device=my_device)
@@ -718,12 +714,12 @@
718714
# It is important to know that in order to do computation involving two or
719715
# more tensors, *all of the tensors must be on the same device*. The
720716
# following code will throw a runtime error, regardless of whether you
721-
# have a GPU device available:
717+
# have an accelerator device available, take CUDA for example:
722718
#
723719
# .. code-block:: python
724720
#
725721
# x = torch.rand(2, 2)
726-
# y = torch.rand(2, 2, device='gpu')
722+
# y = torch.rand(2, 2, device='cuda')
727723
# z = x + y # exception will be thrown
728724
#
729725

_downloads/d90127b0deeb355be3a350521d770206/chatbot_tutorial.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -108,8 +108,10 @@
108108
import json
109109

110110

111-
USE_CUDA = torch.cuda.is_available()
112-
device = torch.device("cuda" if USE_CUDA else "cpu")
111+
# If the current `accelerator <https://pytorch.org/docs/stable/torch.html#accelerators>`__ is available,
112+
# we will use it. Otherwise, we use the CPU.
113+
device = torch.accelerator.current_accelerator().type if torch.accelerator.is_available() else "cpu"
114+
print(f"Using {device} device")
113115

114116

115117
######################################################################
@@ -1318,16 +1320,16 @@ def evaluateInput(encoder, decoder, searcher, voc):
13181320
encoder_optimizer.load_state_dict(encoder_optimizer_sd)
13191321
decoder_optimizer.load_state_dict(decoder_optimizer_sd)
13201322

1321-
# If you have CUDA, configure CUDA to call
1323+
# If you have an accelerator, configure it to call
13221324
for state in encoder_optimizer.state.values():
13231325
for k, v in state.items():
13241326
if isinstance(v, torch.Tensor):
1325-
state[k] = v.cuda()
1327+
state[k] = v.to(device)
13261328

13271329
for state in decoder_optimizer.state.values():
13281330
for k, v in state.items():
13291331
if isinstance(v, torch.Tensor):
1330-
state[k] = v.cuda()
1332+
state[k] = v.to(device)
13311333

13321334
# Run training iterations
13331335
print("Starting Training!")

0 commit comments

Comments
 (0)