|
116 | 116 |
|
117 | 117 | with torch.no_grad():
|
118 | 118 | z = torch.matmul(x, w)+b
|
119 |
| -prin(z.requires_grad) |
| 119 | +print(z.requires_grad) |
120 | 120 |
|
121 | 121 |
|
122 | 122 | ######################################################################
|
|
133 | 133 | # - To mark some parameters in your neural network as **frozen parameters**.
|
134 | 134 | # - To **speed up computations** when you are only doing forward pass, because computations on tensors that do
|
135 | 135 | # not track gradients would be more efficient.
|
136 |
| - |
| 136 | +# For additional reference, you can view the autograd mechanics |
| 137 | +# documentation:https://docs.pytorch.org/docs/stable/notes/autograd.html#locally-disabling-gradient-computation |
137 | 138 |
|
138 | 139 | ######################################################################
|
139 | 140 |
|
|
160 | 161 | # - accumulates them in the respective tensor’s ``.grad`` attribute
|
161 | 162 | # - using the chain rule, propagates all the way to the leaf tensors.
|
162 | 163 | #
|
| 164 | +# We can also visualize the computational graph by the following 2 methods: |
| 165 | +# |
| 166 | +# 1. TORCH_LOGS="+autograd" |
| 167 | +# By setting the TORCH_LOGS="+autograd" environment variable, we can enable runtime autograd logs for debugging. |
| 168 | +# |
| 169 | +# We can perform the logging in the following manner: |
| 170 | +# TORCH_LOGS="+autograd" python test.py |
| 171 | +# |
| 172 | +# 2. Torchviz |
| 173 | +# Torchviz is a package to render the computational graph visually. |
| 174 | +# |
| 175 | +# We can generate an image for the computational graph in the example given below: |
| 176 | +# |
| 177 | +# import torch |
| 178 | +# from torch import nn |
| 179 | +# from torchviz import make_dot |
| 180 | +# |
| 181 | +# model = nn.Sequential( |
| 182 | +# nn.Linear(8, 16), |
| 183 | +# nn.ReLU(), |
| 184 | +# nn.Linear(16, 1) |
| 185 | +# ) |
| 186 | + |
| 187 | +# x = torch.randn(1, 8, requires_grad=True) |
| 188 | +# y = model(x).mean() |
| 189 | + |
| 190 | +# log the internal operations using torchviz |
| 191 | +# import os |
| 192 | +# os.environ['TORCH_LOGS'] = "+autograd" |
| 193 | + |
| 194 | +# dot = make_dot(y, params=dict(model.named_parameters()), show_attrs=True, show_saved=True) |
| 195 | +# dot.render('simple_graph', format='png') |
| 196 | +# |
163 | 197 | # .. note::
|
164 | 198 | # **DAGs are dynamic in PyTorch**
|
165 | 199 | # An important thing to note is that the graph is recreated from scratch; after each
|
|
0 commit comments