Skip to content

Commit e4dfc2c

Browse files
committed
Fix code indents
1 parent b610673 commit e4dfc2c

File tree

4 files changed

+99
-99
lines changed

4 files changed

+99
-99
lines changed

chapter_programming_model/Bridging_Python_and_C_C++_Functions.md

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -66,33 +66,33 @@ In C++:
6666
**ch02/code2.5.1**
6767
```cpp
6868
//custom_add.cpp
69-
#include <torch/extension.h>
70-
#include <pybind11/pybind11.h>
71-
72-
torch::Tensor custom_add(torch::Tensor a, torch::Tensor b) {
73-
return a + b;
74-
}
75-
76-
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
77-
m.def("custom_add", &custom_add, "A custom add function");
78-
}
69+
#include <torch/extension.h>
70+
#include <pybind11/pybind11.h>
71+
72+
torch::Tensor custom_add(torch::Tensor a, torch::Tensor b) {
73+
return a + b;
74+
}
75+
76+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
77+
m.def("custom_add", &custom_add, "A custom add function");
78+
}
7979
```
8080
8181
In Python:
8282
8383
**ch02/code2.5.2**
8484
```python
8585
import torch
86-
from torch.utils.cpp_extension import load
87-
88-
# Load the C++ extension
89-
custom_extension = load(
90-
name='custom_extension',
91-
sources=['custom_add.cpp'],
92-
verbose=True
93-
)
94-
# Use your custom add function
95-
a = torch.randn(10)
96-
b = torch.randn(10)
97-
c = custom_extension.custom_add(a, b)
86+
from torch.utils.cpp_extension import load
87+
88+
# Load the C++ extension
89+
custom_extension = load(
90+
name='custom_extension',
91+
sources=['custom_add.cpp'],
92+
verbose=True
93+
)
94+
# Use your custom add function
95+
a = torch.randn(10)
96+
b = torch.randn(10)
97+
c = custom_extension.custom_add(a, b)
9898
```

chapter_programming_model/Functional_Programming.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -85,10 +85,10 @@ for distributed parallelism in PyTorch static graphs. Code
8585
**ch02/code2.4**
8686
```
8787
from functorch import combine_state_for_ensemble, vmap
88-
minibatches = data[:num_models]
89-
models = [MLP().to(device) for _ in range(num_models)]
90-
fmodel, params, buffers = combine_state_for_ensemble(models)
91-
predictions1_vmap = vmap(fmodel, out_dims=1)(params, buffers, minibatches)
88+
minibatches = data[:num_models]
89+
models = [MLP().to(device) for _ in range(num_models)]
90+
fmodel, params, buffers = combine_state_for_ensemble(models)
91+
predictions1_vmap = vmap(fmodel, out_dims=1)(params, buffers, minibatches)
9292
```
9393

9494
Functorch introduces *vmap*, standing for \"vectorized map\". Its role

chapter_programming_model/Machine_Learning_Workflow.md

Lines changed: 53 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -18,29 +18,29 @@ APIs are defined to facilitate customization within the workflow
1818
**ch02/code2.2.1**
1919
```python
2020
import pickle
21-
from torch.utils.data import Dataset, DataLoader
22-
data_path = '/path/to/data'
23-
dataset = pickle.load(open(data_path, 'rb')) # Example for a pkl file
24-
batch_size = ... # You can make it an argument of the script
25-
26-
class CustomDataset(Dataset):
27-
def __init__(self, data, labels):
28-
self.data = data
29-
self.labels = labels
30-
31-
def __len__(self):
32-
return len(self.data)
33-
34-
def __getitem__(self, idx):
35-
sample = self.data[idx]
36-
label = self.labels[idx]
37-
return sample, label
38-
39-
training_dataset = CustomDataset(dataset['training_data'], dataset['training_labels'])
40-
testing_dataset = CustomDataset(dataset['testing_data'], dataset['testing_labels'])
41-
42-
training_dataloader = DataLoader(training_dataset, batch_size=batch_size, shuffle=True) # Create a training dataloader
43-
testing_dataloader = DataLoader(testing_dataset, batch_size=batch_size, shuffle=False) # Create a testing dataloader
21+
from torch.utils.data import Dataset, DataLoader
22+
data_path = '/path/to/data'
23+
dataset = pickle.load(open(data_path, 'rb')) # Example for a pkl file
24+
batch_size = ... # You can make it an argument of the script
25+
26+
class CustomDataset(Dataset):
27+
def __init__(self, data, labels):
28+
self.data = data
29+
self.labels = labels
30+
31+
def __len__(self):
32+
return len(self.data)
33+
34+
def __getitem__(self, idx):
35+
sample = self.data[idx]
36+
label = self.labels[idx]
37+
return sample, label
38+
39+
training_dataset = CustomDataset(dataset['training_data'], dataset['training_labels'])
40+
testing_dataset = CustomDataset(dataset['testing_data'], dataset['testing_labels'])
41+
42+
training_dataloader = DataLoader(training_dataset, batch_size=batch_size, shuffle=True) # Create a training dataloader
43+
testing_dataloader = DataLoader(testing_dataset, batch_size=batch_size, shuffle=False) # Create a testing dataloader
4444
```
4545

4646
2. **Model Definition API:** Once the data is preprocessed, users need
@@ -53,13 +53,13 @@ import pickle
5353
**ch02/code2.2.2**
5454
```python
5555
import torch.nn as nn
56-
class CustomModel(nn.Module):
57-
def __init__(self, input_size, output_size):
58-
super(CustomModel, self).__init__()
59-
self.linear = nn.Linear(input_size, output_size) # A single linear layer
60-
61-
def forward(self, x):
62-
return self.linear(x)
56+
class CustomModel(nn.Module):
57+
def __init__(self, input_size, output_size):
58+
super(CustomModel, self).__init__()
59+
self.linear = nn.Linear(input_size, output_size) # A single linear layer
60+
61+
def forward(self, x):
62+
return self.linear(x)
6363
```
6464

6565
3. **Optimizer Definition API:** The outputs of models need to be
@@ -74,11 +74,11 @@ import torch.nn as nn
7474
**ch02/code2.2.3**
7575
```python
7676
import torch.optim as optim
77-
import torch.nn
78-
model = CustomModel(...)
79-
# Optimizer definition (Adam, SGD, etc.)
80-
optimizer = optim.Adam(model.parameters(), lr=1e-4, momentum=0.9)
81-
loss = nn.CrossEntropyLoss() # Loss function definition
77+
import torch.nn
78+
model = CustomModel(...)
79+
# Optimizer definition (Adam, SGD, etc.)
80+
optimizer = optim.Adam(model.parameters(), lr=1e-4, momentum=0.9)
81+
loss = nn.CrossEntropyLoss() # Loss function definition
8282
```
8383

8484
4. **Training API:** Given a dataset, model, loss function, and
@@ -92,17 +92,17 @@ import torch.optim as optim
9292
**ch02/code2.2.4**
9393
```python
9494
device = "cuda:0" if torch.cuda.is_available() else "cpu" # Select your training device
95-
model.to(device) # Move the model to the training device
96-
model.train() # Set the model to train mode
97-
epochs = ... # You can make it an argument of the script
98-
for epoch in range(epochs):
99-
for batch_idx, (data, target) in enumerate(training_dataloader):
100-
data, target = data.to(device), target.to(device)
101-
optimizer.zero_grad() # zero the parameter gradients
102-
output = model(data) # Forward pass
103-
loss_value = loss(output, target) # Compute the loss
104-
loss_value.backward() # Backpropagation
105-
optimizer.step()
95+
model.to(device) # Move the model to the training device
96+
model.train() # Set the model to train mode
97+
epochs = ... # You can make it an argument of the script
98+
for epoch in range(epochs):
99+
for batch_idx, (data, target) in enumerate(training_dataloader):
100+
data, target = data.to(device), target.to(device)
101+
optimizer.zero_grad() # zero the parameter gradients
102+
output = model(data) # Forward pass
103+
loss_value = loss(output, target) # Compute the loss
104+
loss_value.backward() # Backpropagation
105+
optimizer.step()
106106
```
107107

108108
5. **Testing and Debugging APIs:** Throughout the training process,
@@ -116,13 +116,13 @@ device = "cuda:0" if torch.cuda.is_available() else "cpu" # Select your training
116116
**ch02/code2.2.5**
117117
```python
118118
model.eval() # Set the model to evaluation mode
119-
overall_accuracy = []
120-
for batch_idx, (data, target) in enumerate(testing_dataloader):
121-
data, target = data.to(device), target.to(device)
122-
output = model(data) # Forward pass
123-
accuracy = your_metrics(output, target) # Compute the accuracy
124-
overall_accuracy.append(accuracy) # Print the accuracy
125-
# For debugging, you can print logs inside the training or evaluation loop, or use python debugger.
119+
overall_accuracy = []
120+
for batch_idx, (data, target) in enumerate(testing_dataloader):
121+
data, target = data.to(device), target.to(device)
122+
output = model(data) # Forward pass
123+
accuracy = your_metrics(output, target) # Compute the accuracy
124+
overall_accuracy.append(accuracy) # Print the accuracy
125+
# For debugging, you can print logs inside the training or evaluation loop, or use python debugger.
126126
```
127127

128128
![Workflow within a machine learningsystem](../img/ch03/workflow.pdf)

chapter_programming_model/Neural_Network_Programming.md

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -50,11 +50,11 @@ sequential data. Code `ch02/code2.3.1` shows some examples of NN Layers in Pytor
5050
**ch02/code2.3.1**
5151
```python
5252
fc_layer = nn.Linear(16, 5) # A fully connected layer with 16 input features and 5 output features
53-
relu_layer = nn.ReLU() # A ReLU activation layer
54-
conv_layer = nn.Conv2d(3, 16, 3, padding=1) # A convolutional layer with 3 input channels, 16 output channels, and a 3x3 kernel
55-
dropout_layer = nn.Dropout(0.2) # A dropout layer with 20% dropout rate
56-
batch_norm_layer = nn.BatchNorm2d(16) # A batch normalization layer with 16 channels
57-
layers = nn.Sequential(conv_layer, batch_norm_layer, relu_layer, fc_layer, dropout_layer) # A sequential container to combine layers
53+
relu_layer = nn.ReLU() # A ReLU activation layer
54+
conv_layer = nn.Conv2d(3, 16, 3, padding=1) # A convolutional layer with 3 input channels, 16 output channels, and a 3x3 kernel
55+
dropout_layer = nn.Dropout(0.2) # A dropout layer with 20% dropout rate
56+
batch_norm_layer = nn.BatchNorm2d(16) # A batch normalization layer with 16 channels
57+
layers = nn.Sequential(conv_layer, batch_norm_layer, relu_layer, fc_layer, dropout_layer) # A sequential container to combine layers
5858
```
5959

6060
In tasks related to natural language processing, the
@@ -81,21 +81,21 @@ and `torch.nn.Module` in PyTorch. Code
8181
**ch02/code2.3.2**
8282
```python
8383
class MLP(nn.Module):
84-
def __init__(self, input_size, hidden_size, num_classes, dropout_rate=0.5):
85-
super(MLP, self).__init__()
86-
self.fc1 = nn.Linear(input_size, hidden_size)
87-
self.bn1 = nn.BatchNorm1d(hidden_size)
88-
self.relu = nn.ReLU()
89-
self.dropout = nn.Dropout(dropout_rate)
90-
self.fc2 = nn.Linear(hidden_size, num_classes)
91-
92-
def forward(self, x):
93-
out = self.fc1(x)
94-
out = self.bn1(out)
95-
out = self.relu(out)
96-
out = self.dropout(out)
97-
out = self.fc2(out)
98-
return out
84+
def __init__(self, input_size, hidden_size, num_classes, dropout_rate=0.5):
85+
super(MLP, self).__init__()
86+
self.fc1 = nn.Linear(input_size, hidden_size)
87+
self.bn1 = nn.BatchNorm1d(hidden_size)
88+
self.relu = nn.ReLU()
89+
self.dropout = nn.Dropout(dropout_rate)
90+
self.fc2 = nn.Linear(hidden_size, num_classes)
91+
92+
def forward(self, x):
93+
out = self.fc1(x)
94+
out = self.bn1(out)
95+
out = self.relu(out)
96+
out = self.dropout(out)
97+
out = self.fc2(out)
98+
return out
9999
```
100100

101101
Figure :numref:`ch03/model_build` demonstrates the intricate process of

0 commit comments

Comments
 (0)