@@ -15695,24 +15695,6 @@ at::Tensor scatter_add_dimname_generated_plumbing(const at::Tensor & self, at::D
15695
15695
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15696
15696
}
15697
15697
template <typename batch_rule_t, batch_rule_t batch_rule>
15698
- at::Tensor scatter_reduce_two_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, c10::string_view reduce, c10::optional<int64_t> output_size) {
15699
- c10::impl::ExcludeDispatchKeyGuard guard(kBatchedKey);
15700
- auto maybe_layer = maybeCurrentDynamicLayer();
15701
- TORCH_INTERNAL_ASSERT(maybe_layer.has_value());
15702
- int64_t cur_level = maybe_layer->layerId();
15703
- if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
15704
- return at::_ops::scatter_reduce_two::call(self, dim, index, reduce, output_size);
15705
- }
15706
- Tensor self_value;
15707
- optional<int64_t> self_bdim;
15708
- std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15709
- Tensor index_value;
15710
- optional<int64_t> index_bdim;
15711
- std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
15712
- auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, reduce, output_size);
15713
- return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15714
- }
15715
- template <typename batch_rule_t, batch_rule_t batch_rule>
15716
15698
at::Tensor & eq__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
15717
15699
c10::impl::ExcludeDispatchKeyGuard guard(kBatchedKey);
15718
15700
auto maybe_layer = maybeCurrentDynamicLayer();
0 commit comments