@@ -10699,13 +10699,15 @@
10699
10699
dispatch:
10700
10700
CompositeExplicitAutograd: foreach_tensor_div_list_kernel_slow
10701
10701
CUDA: foreach_tensor_div_list_kernel_cuda
10702
+ MTIA: foreach_tensor_div_list_kernel_mtia
10702
10703
10703
10704
- func: _foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> ()
10704
10705
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
10705
10706
variants: function
10706
10707
dispatch:
10707
10708
CompositeExplicitAutograd: foreach_tensor_div_list_kernel_slow_
10708
10709
CUDA: foreach_tensor_div_list_kernel_cuda_
10710
+ MTIA: foreach_tensor_div_list_kernel_mtia_
10709
10711
autogen: _foreach_div.List_out
10710
10712
10711
10713
- func: _foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
@@ -10729,13 +10731,15 @@
10729
10731
dispatch:
10730
10732
CompositeExplicitAutograd: foreach_tensor_div_tensor_kernel_slow
10731
10733
CUDA: foreach_tensor_div_tensor_kernel_cuda
10734
+ MTIA: foreach_tensor_div_tensor_kernel_mtia
10732
10735
10733
10736
- func: _foreach_div_.Tensor(Tensor(a!)[] self, Tensor other) -> ()
10734
10737
device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
10735
10738
variants: function
10736
10739
dispatch:
10737
10740
CompositeExplicitAutograd: foreach_tensor_div_tensor_kernel_slow_
10738
10741
CUDA: foreach_tensor_div_tensor_kernel_cuda_
10742
+ MTIA: foreach_tensor_div_tensor_kernel_mtia_
10739
10743
autogen: _foreach_div.Tensor_out
10740
10744
10741
10745
- func: _foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
0 commit comments