|
133 | 133 | # - To mark some parameters in your neural network as **frozen parameters**.
|
134 | 134 | # - To **speed up computations** when you are only doing forward pass, because computations on tensors that do
|
135 | 135 | # not track gradients would be more efficient.
|
136 |
| -# For additional reference, you can view the autograd mechanics |
137 |
| -# documentation:https://docs.pytorch.org/docs/stable/notes/autograd.html#locally-disabling-gradient-computation |
| 136 | +# See this `note<https://docs.pytorch.org/docs/stable/notes/autograd.html#locally-disabling-gradient-computation>` for additional reference. |
138 | 137 |
|
139 | 138 | ######################################################################
|
140 | 139 |
|
|
161 | 160 | # - accumulates them in the respective tensor’s ``.grad`` attribute
|
162 | 161 | # - using the chain rule, propagates all the way to the leaf tensors.
|
163 | 162 | #
|
164 |
| -# We can also visualize the computational graph by the following 2 methods: |
| 163 | +# To get a sense of what this computational graph looks like we can use the following tools: |
165 | 164 | #
|
166 |
| -# 1. TORCH_LOGS="+autograd" |
167 |
| -# By setting the TORCH_LOGS="+autograd" environment variable, we can enable runtime autograd logs for debugging. |
| 165 | +# 1. torchviz is a package to visualize computational graphs |
| 166 | +# <https://github.com/szagoruyko/pytorchviz> |
168 | 167 | #
|
169 |
| -# We can perform the logging in the following manner: |
170 |
| -# TORCH_LOGS="+autograd" python test.py |
| 168 | +# 2. TORCH_LOGS="+autograd" enables logging for the backward pass. |
| 169 | +# <https://dev-discuss.pytorch.org/t/highlighting-a-few-recent-autograd-features-h2-2023/1787> |
171 | 170 | #
|
172 |
| -# 2. Torchviz |
173 |
| -# Torchviz is a package to render the computational graph visually. |
174 |
| -# |
175 |
| -# We can generate an image for the computational graph in the example given below: |
176 |
| -# |
177 |
| -# import torch |
178 |
| -# from torch import nn |
179 |
| -# from torchviz import make_dot |
180 |
| -# |
181 |
| -# model = nn.Sequential( |
182 |
| -# nn.Linear(8, 16), |
183 |
| -# nn.ReLU(), |
184 |
| -# nn.Linear(16, 1) |
185 |
| -# ) |
186 |
| - |
187 |
| -# x = torch.randn(1, 8, requires_grad=True) |
188 |
| -# y = model(x).mean() |
189 |
| - |
190 |
| -# log the internal operations using torchviz |
191 |
| -# import os |
192 |
| -# os.environ['TORCH_LOGS'] = "+autograd" |
193 |
| - |
194 |
| -# dot = make_dot(y, params=dict(model.named_parameters()), show_attrs=True, show_saved=True) |
195 |
| -# dot.render('simple_graph', format='png') |
196 | 171 | #
|
197 | 172 | # .. note::
|
198 | 173 | # **DAGs are dynamic in PyTorch**
|
|
0 commit comments