Skip to content

Commit b4a197e

Browse files
committed
Automated tutorials push
1 parent d00f58b commit b4a197e

File tree

189 files changed

+8604
-8517
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

189 files changed

+8604
-8517
lines changed

_downloads/0e6615c5a7bc71e01ff3c51217ea00da/tensorqs_tutorial.ipynb

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -206,19 +206,20 @@
206206
"Operations on Tensors\n",
207207
"=====================\n",
208208
"\n",
209-
"Over 100 tensor operations, including arithmetic, linear algebra, matrix\n",
210-
"manipulation (transposing, indexing, slicing), sampling and more are\n",
211-
"comprehensively described\n",
209+
"Over 1200 tensor operations, including arithmetic, linear algebra,\n",
210+
"matrix manipulation (transposing, indexing, slicing), sampling and more\n",
211+
"are comprehensively described\n",
212212
"[here](https://pytorch.org/docs/stable/torch.html).\n",
213213
"\n",
214-
"Each of these operations can be run on the GPU (at typically higher\n",
215-
"speeds than on a CPU). If you're using Colab, allocate a GPU by going to\n",
216-
"Runtime \\> Change runtime type \\> GPU.\n",
214+
"Each of these operations can be run on the CPU and\n",
215+
"[Accelerator](https://pytorch.org/docs/stable/torch.html#accelerators)\n",
216+
"such as CUDA, MPS, MTIA, or XPU. If you're using Colab, allocate an\n",
217+
"accelerator by going to Runtime \\> Change runtime type \\> GPU.\n",
217218
"\n",
218219
"By default, tensors are created on the CPU. We need to explicitly move\n",
219-
"tensors to the GPU using `.to` method (after checking for GPU\n",
220-
"availability). Keep in mind that copying large tensors across devices\n",
221-
"can be expensive in terms of time and memory!\n"
220+
"tensors to the accelerator using `.to` method (after checking for\n",
221+
"accelerator availability). Keep in mind that copying large tensors\n",
222+
"across devices can be expensive in terms of time and memory!\n"
222223
]
223224
},
224225
{
@@ -229,9 +230,9 @@
229230
},
230231
"outputs": [],
231232
"source": [
232-
"# We move our tensor to the GPU if available\n",
233-
"if torch.cuda.is_available():\n",
234-
" tensor = tensor.to(\"cuda\")"
233+
"# We move our tensor to the current accelerator if available\n",
234+
"if torch.accelerator.is_available():\n",
235+
" tensor = tensor.to(torch.accelerator.current_accelerator())"
235236
]
236237
},
237238
{

_downloads/3fb82dc8278b08d5e5dee31ec1c16170/tensorqs_tutorial.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -99,20 +99,20 @@
9999
# Operations on Tensors
100100
# ~~~~~~~~~~~~~~~~~~~~~~~
101101
#
102-
# Over 100 tensor operations, including arithmetic, linear algebra, matrix manipulation (transposing,
102+
# Over 1200 tensor operations, including arithmetic, linear algebra, matrix manipulation (transposing,
103103
# indexing, slicing), sampling and more are
104104
# comprehensively described `here <https://pytorch.org/docs/stable/torch.html>`__.
105105
#
106-
# Each of these operations can be run on the GPU (at typically higher speeds than on a
107-
# CPU). If you’re using Colab, allocate a GPU by going to Runtime > Change runtime type > GPU.
106+
# Each of these operations can be run on the CPU and `Accelerator <https://pytorch.org/docs/stable/torch.html#accelerators>`__
107+
# such as CUDA, MPS, MTIA, or XPU. If you’re using Colab, allocate an accelerator by going to Runtime > Change runtime type > GPU.
108108
#
109-
# By default, tensors are created on the CPU. We need to explicitly move tensors to the GPU using
110-
# ``.to`` method (after checking for GPU availability). Keep in mind that copying large tensors
109+
# By default, tensors are created on the CPU. We need to explicitly move tensors to the accelerator using
110+
# ``.to`` method (after checking for accelerator availability). Keep in mind that copying large tensors
111111
# across devices can be expensive in terms of time and memory!
112112

113-
# We move our tensor to the GPU if available
114-
if torch.cuda.is_available():
115-
tensor = tensor.to("cuda")
113+
# We move our tensor to the current accelerator if available
114+
if torch.accelerator.is_available():
115+
tensor = tensor.to(torch.accelerator.current_accelerator())
116116

117117

118118
######################################################################

_downloads/51f1e1167acc0fda8f9d8fd8597ee626/quickstart_tutorial.py

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -84,16 +84,10 @@
8484
# To define a neural network in PyTorch, we create a class that inherits
8585
# from `nn.Module <https://pytorch.org/docs/stable/generated/torch.nn.Module.html>`_. We define the layers of the network
8686
# in the ``__init__`` function and specify how data will pass through the network in the ``forward`` function. To accelerate
87-
# operations in the neural network, we move it to the GPU or MPS if available.
88-
89-
# Get cpu, gpu or mps device for training.
90-
device = (
91-
"cuda"
92-
if torch.cuda.is_available()
93-
else "mps"
94-
if torch.backends.mps.is_available()
95-
else "cpu"
96-
)
87+
# operations in the neural network, we move it to the `accelerator <https://pytorch.org/docs/stable/torch.html#accelerators>`__
88+
# such as CUDA, MPS, MTIA, or XPU. If the current accelerator is available, we will use it. Otherwise, we use the CPU.
89+
90+
device = torch.accelerator.current_accelerator().type if torch.accelerator.is_available() else "cpu"
9791
print(f"Using {device} device")
9892

9993
# Define model

_downloads/af0caf6d7af0dda755f4c9d7af9ccc2c/quickstart_tutorial.ipynb

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,10 @@
159159
"We define the layers of the network in the `__init__` function and\n",
160160
"specify how data will pass through the network in the `forward`\n",
161161
"function. To accelerate operations in the neural network, we move it to\n",
162-
"the GPU or MPS if available.\n"
162+
"the\n",
163+
"[accelerator](https://pytorch.org/docs/stable/torch.html#accelerators)\n",
164+
"such as CUDA, MPS, MTIA, or XPU. If the current accelerator is\n",
165+
"available, we will use it. Otherwise, we use the CPU.\n"
163166
]
164167
},
165168
{
@@ -170,14 +173,7 @@
170173
},
171174
"outputs": [],
172175
"source": [
173-
"# Get cpu, gpu or mps device for training.\n",
174-
"device = (\n",
175-
" \"cuda\"\n",
176-
" if torch.cuda.is_available()\n",
177-
" else \"mps\"\n",
178-
" if torch.backends.mps.is_available()\n",
179-
" else \"cpu\"\n",
180-
")\n",
176+
"device = torch.accelerator.current_accelerator().type if torch.accelerator.is_available() else \"cpu\"\n",
181177
"print(f\"Using {device} device\")\n",
182178
"\n",
183179
"# Define model\n",
97 Bytes
Loading
164 Bytes
Loading
-6.65 KB
Loading
2 KB
Loading
213 Bytes
Loading
-955 Bytes
Loading

0 commit comments

Comments
 (0)