Skip to content

Commit ac41ef2

Browse files
cyyeverzou3519
authored andcommitted
[functorch] Minor fix2 (pytorch/functorch#797)
* use range-based for * mordenize code * reduce variable scope
1 parent ae7f69a commit ac41ef2

File tree

4 files changed

+10
-12
lines changed

4 files changed

+10
-12
lines changed

functorch/functorch/csrc/BatchRulesLoss.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -263,7 +263,6 @@ at::Tensor nll_loss_backward_decomposition(
263263
if (self.dim() < 2) {
264264
channel_dim = 0;
265265
}
266-
auto self_ = self;
267266
auto target_ = target.unsqueeze(channel_dim);
268267

269268
auto grad_output_ = grad_output;
@@ -280,6 +279,7 @@ at::Tensor nll_loss_backward_decomposition(
280279

281280
Tensor weight_;
282281
if (weight && weight->defined()) {
282+
auto self_ = self;
283283
auto shape = weight->sizes();
284284
VmapDimVector new_shape(self_.dim(), 1);
285285
new_shape[channel_dim] = shape[0];

functorch/functorch/csrc/BatchRulesViews.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -217,10 +217,10 @@ std::tuple<Tensor, optional<int64_t>> squeeze_batch_rule(const Tensor& self, opt
217217
int64_t new_batch_idx = 0;
218218
int64_t original_idx = 0;
219219

220-
for (auto it = shape.begin(); it != shape.end(); ++it) {
220+
for (auto it : shape) {
221221
// Keep only dimensions != 1 and the batch dimension (irrespective of size).
222-
if (*it != 1 || original_idx == bdim) {
223-
squeezed_sizes.push_back(*it);
222+
if (it != 1 || original_idx == bdim) {
223+
squeezed_sizes.push_back(it);
224224
if (original_idx == bdim) {
225225
before_batch_idx = false;
226226
}

functorch/functorch/csrc/CustomFunction.cpp

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ class PythonKernelHolder : public c10::OperatorKernel {
2222
// py::object destructor. This is because this object may outlive
2323
// libtorch_python, so we want to disarm the deallocation if that happens.
2424
// PyInterpreter does this correctly, pybind11 does not.
25-
~PythonKernelHolder() {
25+
~PythonKernelHolder() override {
2626
getPyInterpreter()->decref(func_, /*is_tensor*/false);
2727
}
2828

@@ -201,7 +201,7 @@ variable_list GenericPythonBackward::apply(variable_list&& grads) {
201201
return grad_inputs;
202202
}
203203

204-
typedef TensorList (*custom_python_function_t)(TensorList);
204+
using custom_python_function_t = TensorList (*)(TensorList);
205205

206206
using torch::autograd::compute_requires_grad;
207207
using torch::autograd::collect_next_edges;
@@ -216,14 +216,12 @@ void customFunctionBoxed(const c10::OperatorHandle& op, torch::jit::Stack* stack
216216

217217
std::string schema_name = op.schema().name();
218218
std::string vjp_fn_name = schema_name + "_vjp";
219-
auto vjp_fn = c10::Dispatcher::singleton()
220-
.findSchemaOrThrow(vjp_fn_name.c_str(), "");
221219

222220
std::shared_ptr<GenericPythonBackward> grad_fn;
223221
if (_any_requires_grad) {
224222
grad_fn = std::shared_ptr<GenericPythonBackward>(new GenericPythonBackward(), deleteNode);
225223
grad_fn->set_next_edges(collect_next_edges(tensors));
226-
grad_fn->backward_fn_ = std::move(vjp_fn);
224+
grad_fn->backward_fn_ = c10::Dispatcher::singleton().findSchemaOrThrow(vjp_fn_name.c_str(), "");
227225
grad_fn->num_inputs_ = tensors_.size();
228226
}
229227

@@ -246,7 +244,7 @@ void customFunctionBoxed(const c10::OperatorHandle& op, torch::jit::Stack* stack
246244
if (!is_input) {
247245
set_history(tensor, grad_fn);
248246
}
249-
grad_fn->saved_tensors_.push_back(torch::autograd::SavedVariable(tensor, !is_input));
247+
grad_fn->saved_tensors_.emplace_back(tensor, !is_input);
250248
}
251249
}
252250
torch::jit::push(stack, result);

functorch/functorch/csrc/PlumbingHelper.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,8 @@ Tensor makeBatched(const Tensor& tensor, optional<int64_t> bdim, int64_t level)
2121

2222
std::vector<Tensor> makeBatchedVector(const std::vector<Tensor>& tensors, optional<int64_t> bdim, int64_t level) {
2323
std::vector<Tensor> res;
24-
for (size_t idx = 0; idx < tensors.size(); idx++) {
25-
res.emplace_back(makeBatched(tensors[idx], bdim, level));
24+
for (const auto & tensor : tensors) {
25+
res.emplace_back(makeBatched(tensor, bdim, level));
2626
}
2727
return res;
2828
}

0 commit comments

Comments
 (0)