Skip to content

Commit 0f41a18

Browse files
committed
Automated tutorials push
1 parent 2e932d7 commit 0f41a18

File tree

224 files changed

+27585
-31436
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

224 files changed

+27585
-31436
lines changed

_downloads/0e4c2becda3dfc54e1816634d49f8e73/introyt1_tutorial.py

Lines changed: 12 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -303,22 +303,21 @@ def num_flat_features(self, x):
303303
# The values passed to the transform are the means (first tuple) and the
304304
# standard deviations (second tuple) of the rgb values of the images in
305305
# the dataset. You can calculate these values yourself by running these
306-
# few lines of code:
307-
# ```
308-
# from torch.utils.data import ConcatDataset
309-
# transform = transforms.Compose([transforms.ToTensor()])
310-
# trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
306+
# few lines of code::
307+
#
308+
# from torch.utils.data import ConcatDataset
309+
# transform = transforms.Compose([transforms.ToTensor()])
310+
# trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
311311
# download=True, transform=transform)
312312
#
313-
# #stack all train images together into a tensor of shape
314-
# #(50000, 3, 32, 32)
315-
# x = torch.stack([sample[0] for sample in ConcatDataset([trainset])])
313+
# # stack all train images together into a tensor of shape
314+
# # (50000, 3, 32, 32)
315+
# x = torch.stack([sample[0] for sample in ConcatDataset([trainset])])
316316
#
317-
# #get the mean of each channel
318-
# mean = torch.mean(x, dim=(0,2,3)) #tensor([0.4914, 0.4822, 0.4465])
319-
# std = torch.std(x, dim=(0,2,3)) #tensor([0.2470, 0.2435, 0.2616])
320-
#
321-
# ```
317+
# # get the mean of each channel
318+
# mean = torch.mean(x, dim=(0,2,3)) # tensor([0.4914, 0.4822, 0.4465])
319+
# std = torch.std(x, dim=(0,2,3)) # tensor([0.2470, 0.2435, 0.2616])
320+
#
322321
#
323322
# There are many more transforms available, including cropping, centering,
324323
# rotation, and reflection.

_downloads/3195443a0ced3cabc0ad643537bdb5cd/introyt1_tutorial.ipynb

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
{
3535
"cell_type": "code",
3636
"execution_count": null,
37-
"id": "2a47f826",
37+
"id": "91ce1b03",
3838
"metadata": {},
3939
"outputs": [],
4040
"source": [
@@ -50,7 +50,7 @@
5050
},
5151
{
5252
"cell_type": "markdown",
53-
"id": "b473eeee",
53+
"id": "7e2c89af",
5454
"metadata": {},
5555
"source": [
5656
"\n",
@@ -445,6 +445,7 @@
445445
"\n",
446446
"- `transforms.ToTensor()` converts images loaded by Pillow into\n",
447447
" PyTorch tensors.\n",
448+
"\n",
448449
"- `transforms.Normalize()` adjusts the values of the tensor so that\n",
449450
" their average is zero and their standard deviation is 1.0. Most\n",
450451
" activation functions have their strongest gradients around x = 0, so\n",
@@ -453,7 +454,19 @@
453454
" deviations (second tuple) of the rgb values of the images in the\n",
454455
" dataset. You can calculate these values yourself by running these\n",
455456
" few lines of code:\n",
456-
" `` ` from torch.utils.data import ConcatDataset transform = transforms.Compose([transforms.ToTensor()]) trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform) #stack all train images together into a tensor of shape #(50000, 3, 32, 32) x = torch.stack([sample[0] for sample in ConcatDataset([trainset])]) #get the mean of each channel mean = torch.mean(x, dim=(0,2,3)) #tensor([0.4914, 0.4822, 0.4465]) std = torch.std(x, dim=(0,2,3)) #tensor([0.2470, 0.2435, 0.2616]) ``\\`\n",
457+
"\n",
458+
" from torch.utils.data import ConcatDataset\n",
459+
" transform = transforms.Compose([transforms.ToTensor()])\n",
460+
" trainset = torchvision.datasets.CIFAR10(root='./data', train=True,\n",
461+
" download=True, transform=transform)\n",
462+
"\n",
463+
" # stack all train images together into a tensor of shape \n",
464+
" # (50000, 3, 32, 32)\n",
465+
" x = torch.stack([sample[0] for sample in ConcatDataset([trainset])])\n",
466+
"\n",
467+
" # get the mean of each channel \n",
468+
" mean = torch.mean(x, dim=(0,2,3)) # tensor([0.4914, 0.4822, 0.4465])\n",
469+
" std = torch.std(x, dim=(0,2,3)) # tensor([0.2470, 0.2435, 0.2616]) \n",
457470
"\n",
458471
"There are many more transforms available, including cropping, centering,\n",
459472
"rotation, and reflection.\n",

_downloads/4355e2cef7d17548f1e25f97a62828c4/template_tutorial.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
{
3232
"cell_type": "code",
3333
"execution_count": null,
34-
"id": "1cc95491",
34+
"id": "26a1f870",
3535
"metadata": {},
3636
"outputs": [],
3737
"source": [
@@ -47,7 +47,7 @@
4747
},
4848
{
4949
"cell_type": "markdown",
50-
"id": "9d9e63f5",
50+
"id": "2d0e0bdc",
5151
"metadata": {},
5252
"source": [
5353
"\n",

_downloads/63a0f0fc7b3ffb15d3a5ac8db3d521ee/tensors_deeper_tutorial.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
{
3535
"cell_type": "code",
3636
"execution_count": null,
37-
"id": "f07dd1e4",
37+
"id": "fcf61cdc",
3838
"metadata": {},
3939
"outputs": [],
4040
"source": [
@@ -50,7 +50,7 @@
5050
},
5151
{
5252
"cell_type": "markdown",
53-
"id": "0fb1c83e",
53+
"id": "33507b4e",
5454
"metadata": {},
5555
"source": [
5656
"\n",

_downloads/770632dd3941d2a51b831c52ded57aa2/trainingyt.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
{
3636
"cell_type": "code",
3737
"execution_count": null,
38-
"id": "fffd7390",
38+
"id": "e0923e32",
3939
"metadata": {},
4040
"outputs": [],
4141
"source": [
@@ -51,7 +51,7 @@
5151
},
5252
{
5353
"cell_type": "markdown",
54-
"id": "12eb4b10",
54+
"id": "3b2b3c77",
5555
"metadata": {},
5656
"source": [
5757
"\n",

_downloads/c28f42852d456daf9af72da6c6909556/captumyt.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
{
3838
"cell_type": "code",
3939
"execution_count": null,
40-
"id": "b08647f2",
40+
"id": "74c62868",
4141
"metadata": {},
4242
"outputs": [],
4343
"source": [
@@ -53,7 +53,7 @@
5353
},
5454
{
5555
"cell_type": "markdown",
56-
"id": "bb195516",
56+
"id": "358826a7",
5757
"metadata": {},
5858
"source": [
5959
"\n",

_downloads/e2e556f6b4693c2cef716dd7f40caaf6/tensorboardyt_tutorial.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
{
3636
"cell_type": "code",
3737
"execution_count": null,
38-
"id": "0602192d",
38+
"id": "be005286",
3939
"metadata": {},
4040
"outputs": [],
4141
"source": [
@@ -51,7 +51,7 @@
5151
},
5252
{
5353
"cell_type": "markdown",
54-
"id": "299047e0",
54+
"id": "c0e01774",
5555
"metadata": {},
5656
"source": [
5757
"\n",

_downloads/ed9d4f94afb79f7dada6742a06c486a5/autogradyt_tutorial.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
{
3535
"cell_type": "code",
3636
"execution_count": null,
37-
"id": "34c9794a",
37+
"id": "0813ff8d",
3838
"metadata": {},
3939
"outputs": [],
4040
"source": [
@@ -50,7 +50,7 @@
5050
},
5151
{
5252
"cell_type": "markdown",
53-
"id": "0262f5ab",
53+
"id": "79c60e38",
5454
"metadata": {},
5555
"source": [
5656
"\n",

_downloads/fe726e041160526cf828806536922cf6/modelsyt_tutorial.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
{
3535
"cell_type": "code",
3636
"execution_count": null,
37-
"id": "f4f0f8be",
37+
"id": "129f8492",
3838
"metadata": {},
3939
"outputs": [],
4040
"source": [
@@ -50,7 +50,7 @@
5050
},
5151
{
5252
"cell_type": "markdown",
53-
"id": "c8220900",
53+
"id": "814173f7",
5454
"metadata": {},
5555
"source": [
5656
"\n",
517 Bytes

0 commit comments

Comments
 (0)