Skip to content

Commit e201460

Browse files
cyyeverpytorchmergebot
authored andcommitted
[2/N] Fix Wextra-semi warnings (pytorch#139142)
Fixes #ISSUE_NUMBER Pull Request resolved: pytorch#139142 Approved by: https://github.com/ezyang
1 parent 93d7f90 commit e201460

File tree

18 files changed

+209
-210
lines changed

18 files changed

+209
-210
lines changed

aten/src/ATen/TensorIndexing.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -319,7 +319,7 @@ inline void recordTensorIndex(
319319
outIndices.resize(*dim_ptr + 1);
320320
outIndices[*dim_ptr] = tensor;
321321
(*dim_ptr)++;
322-
};
322+
}
323323

324324
inline c10::List<::std::optional<Tensor>> typeConvertIndices(
325325
const Tensor& /*self*/,

aten/src/ATen/core/DistributionsHelper.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -172,8 +172,8 @@ template <typename RNG, typename ret_type,
172172
C10_HOST_DEVICE inline void maybe_set_next_##TYPE##_normal_sample(RNG* /*generator*/, ret_type /*cache*/) { \
173173
}
174174

175-
DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(double);
176-
DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(float);
175+
DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(double)
176+
DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(float)
177177

178178
/**
179179
* Samples a normal distribution using the Box-Muller method

aten/src/ATen/core/function.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
namespace c10 {
1010
struct FunctionSchema;
11-
};
11+
}
1212

1313
namespace at {
1414
TORCH_API void launch(std::function<void()> func);

aten/src/ATen/core/ivalue_inl.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1728,7 +1728,7 @@ DEFINE_TO(uint64_t, toInt)
17281728
DEFINE_TO(detail::_guarded_unsigned_long, toInt)
17291729
DEFINE_TO(int64_t, toInt)
17301730
DEFINE_TO(bool, toBool)
1731-
DEFINE_TO(c10::intrusive_ptr<caffe2::Blob>, toBlob);
1731+
DEFINE_TO(c10::intrusive_ptr<caffe2::Blob>, toBlob)
17321732
DEFINE_TO(c10::intrusive_ptr<ivalue::ConstantString>, toString)
17331733
DEFINE_TO(c10::intrusive_ptr<ivalue::Object>, toObject)
17341734
DEFINE_TO(at::Scalar, toScalar)

aten/src/ATen/core/jit_type_base.h

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -87,29 +87,29 @@ struct IsSingletonType : public std::integral_constant<bool, false> {};
8787
template <> struct IsSingletonType<Type> : public std::integral_constant<bool, true> {}; \
8888
}
8989

90-
TORCH_DECLARE_SINGLETON(AnyType);
91-
TORCH_DECLARE_SINGLETON(AnyEnumType);
92-
TORCH_DECLARE_SINGLETON(NumberType);
93-
TORCH_DECLARE_SINGLETON(FloatType);
94-
TORCH_DECLARE_SINGLETON(ComplexType);
95-
TORCH_DECLARE_SINGLETON(IntType);
96-
TORCH_DECLARE_SINGLETON(BoolType);
97-
TORCH_DECLARE_SINGLETON(StringType);
98-
TORCH_DECLARE_SINGLETON(StorageType);
99-
TORCH_DECLARE_SINGLETON(NoneType);
100-
TORCH_DECLARE_SINGLETON(GeneratorType);
101-
TORCH_DECLARE_SINGLETON(QuantizerType);
102-
TORCH_DECLARE_SINGLETON(QSchemeType);
103-
TORCH_DECLARE_SINGLETON(DeviceObjType);
104-
TORCH_DECLARE_SINGLETON(StreamObjType);
105-
TORCH_DECLARE_SINGLETON(CapsuleType);
106-
TORCH_DECLARE_SINGLETON(PyObjectType);
107-
TORCH_DECLARE_SINGLETON(ScalarTypeType);
108-
TORCH_DECLARE_SINGLETON(LayoutType);
109-
TORCH_DECLARE_SINGLETON(MemoryFormatType);
110-
TORCH_DECLARE_SINGLETON(AnyListType);
111-
TORCH_DECLARE_SINGLETON(AnyTupleType);
112-
TORCH_DECLARE_SINGLETON(AnyClassType);
90+
TORCH_DECLARE_SINGLETON(AnyType)
91+
TORCH_DECLARE_SINGLETON(AnyEnumType)
92+
TORCH_DECLARE_SINGLETON(NumberType)
93+
TORCH_DECLARE_SINGLETON(FloatType)
94+
TORCH_DECLARE_SINGLETON(ComplexType)
95+
TORCH_DECLARE_SINGLETON(IntType)
96+
TORCH_DECLARE_SINGLETON(BoolType)
97+
TORCH_DECLARE_SINGLETON(StringType)
98+
TORCH_DECLARE_SINGLETON(StorageType)
99+
TORCH_DECLARE_SINGLETON(NoneType)
100+
TORCH_DECLARE_SINGLETON(GeneratorType)
101+
TORCH_DECLARE_SINGLETON(QuantizerType)
102+
TORCH_DECLARE_SINGLETON(QSchemeType)
103+
TORCH_DECLARE_SINGLETON(DeviceObjType)
104+
TORCH_DECLARE_SINGLETON(StreamObjType)
105+
TORCH_DECLARE_SINGLETON(CapsuleType)
106+
TORCH_DECLARE_SINGLETON(PyObjectType)
107+
TORCH_DECLARE_SINGLETON(ScalarTypeType)
108+
TORCH_DECLARE_SINGLETON(LayoutType)
109+
TORCH_DECLARE_SINGLETON(MemoryFormatType)
110+
TORCH_DECLARE_SINGLETON(AnyListType)
111+
TORCH_DECLARE_SINGLETON(AnyTupleType)
112+
TORCH_DECLARE_SINGLETON(AnyClassType)
113113

114114
namespace detail {
115115
template <typename T, typename Enable = void>

aten/src/ATen/functorch/BatchRulesLinearAlgebra.cpp

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -729,22 +729,22 @@ _scaled_dot_product_cudnn_attention_batch_rule(
729729

730730
// These need to be outside. String constant must be declared outside of a macro to be used as template param
731731
// NOLINTBEGIN(*array*)
732-
LINALG_CHECK_MATRIX_UNARY_ONE_OUT(cholesky, cholesky);
733-
LINALG_CHECK_MATRIX_UNARY_ONE_OUT(cholesky_inverse, cholesky_inverse);
734-
LINALG_CHECK_MATRIX_UNARY_TWO_OUT(linalg_cholesky_ex, linalg.cholesky);
735-
LINALG_CHECK_MATRIX_UNARY_TWO_OUT(linalg_eig, linalg.eig);
736-
LINALG_CHECK_MATRIX_UNARY_TWO_OUT(linalg_inv_ex, linalg.inv_ex);
737-
LINALG_CHECK_MATRIX_UNARY_THREE_OUT(linalg_ldl_factor_ex, torch.linalg.ldl_factor_ex);
738-
LINALG_CHECK_MATRIX_UNARY_TWO_OUT(linalg_qr, linalg.qr);
739-
LINALG_CHECK_MATRIX_UNARY_TWO_OUT(linalg_slogdet, linalg.slogdet);
740-
LINALG_CHECK_MATRIX_BINARY_ONE_OUT(linalg_solve_triangular, linalg.solve_triangular);
741-
742-
LINALG_CHECK_MATRIX_UNARY_TWO_OUT(geqrf, geqrf);
743-
LINALG_CHECK_MATRIX_BINARY_TWO_OUT(triangular_solve, triangular_solve);
744-
LINALG_CHECK_MATRIX_UNARY_THREE_OUT(_linalg_det, linalg.det);
745-
LINALG_CHECK_MATRIX_UNARY_TWO_OUT(_linalg_eigh, linalg.eigh);
746-
LINALG_CHECK_MATRIX_UNARY_FOUR_OUT(_linalg_slogdet, linalg.slogdet);
747-
LINALG_CHECK_MATRIX_UNARY_THREE_OUT(_linalg_svd, linalg.svd);
732+
LINALG_CHECK_MATRIX_UNARY_ONE_OUT(cholesky, cholesky)
733+
LINALG_CHECK_MATRIX_UNARY_ONE_OUT(cholesky_inverse, cholesky_inverse)
734+
LINALG_CHECK_MATRIX_UNARY_TWO_OUT(linalg_cholesky_ex, linalg.cholesky)
735+
LINALG_CHECK_MATRIX_UNARY_TWO_OUT(linalg_eig, linalg.eig)
736+
LINALG_CHECK_MATRIX_UNARY_TWO_OUT(linalg_inv_ex, linalg.inv_ex)
737+
LINALG_CHECK_MATRIX_UNARY_THREE_OUT(linalg_ldl_factor_ex, torch.linalg.ldl_factor_ex)
738+
LINALG_CHECK_MATRIX_UNARY_TWO_OUT(linalg_qr, linalg.qr)
739+
LINALG_CHECK_MATRIX_UNARY_TWO_OUT(linalg_slogdet, linalg.slogdet)
740+
LINALG_CHECK_MATRIX_BINARY_ONE_OUT(linalg_solve_triangular, linalg.solve_triangular)
741+
742+
LINALG_CHECK_MATRIX_UNARY_TWO_OUT(geqrf, geqrf)
743+
LINALG_CHECK_MATRIX_BINARY_TWO_OUT(triangular_solve, triangular_solve)
744+
LINALG_CHECK_MATRIX_UNARY_THREE_OUT(_linalg_det, linalg.det)
745+
LINALG_CHECK_MATRIX_UNARY_TWO_OUT(_linalg_eigh, linalg.eigh)
746+
LINALG_CHECK_MATRIX_UNARY_FOUR_OUT(_linalg_slogdet, linalg.slogdet)
747+
LINALG_CHECK_MATRIX_UNARY_THREE_OUT(_linalg_svd, linalg.svd)
748748
// NOLINTEND(*array*)
749749

750750
TORCH_LIBRARY_IMPL(aten, FuncTorchBatched, m) {

aten/src/ATen/native/Activation.h

Lines changed: 28 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -66,33 +66,33 @@ using gelu_fn = void (*)(TensorIteratorBase&, GeluType);
6666
using gelu_backward_fn = void (*)(TensorIteratorBase&, GeluType);
6767
using glu_jvp_fn = void (*)(TensorIteratorBase&);
6868

69-
DECLARE_DISPATCH(elu_fn, elu_stub);
70-
DECLARE_DISPATCH(elu_backward_fn, elu_backward_stub);
71-
DECLARE_DISPATCH(softplus_fn, softplus_stub);
72-
DECLARE_DISPATCH(softplus_backward_fn, softplus_backward_stub);
73-
DECLARE_DISPATCH(log_sigmoid_cpu_fn, log_sigmoid_cpu_stub);
74-
DECLARE_DISPATCH(activation_backward_fn, log_sigmoid_backward_stub);
75-
DECLARE_DISPATCH(threshold_fn, threshold_stub);
76-
DECLARE_DISPATCH(gelu_fn, GeluKernel);
77-
DECLARE_DISPATCH(gelu_backward_fn, GeluBackwardKernel);
78-
DECLARE_DISPATCH(hardtanh_backward_fn, hardtanh_backward_stub);
79-
DECLARE_DISPATCH(hardsigmoid_fn, hardsigmoid_stub);
80-
DECLARE_DISPATCH(hardsigmoid_backward_fn, hardsigmoid_backward_stub);
81-
DECLARE_DISPATCH(hardswish_fn, hardswish_stub);
82-
DECLARE_DISPATCH(hardswish_backward_fn, hardswish_backward_stub);
83-
DECLARE_DISPATCH(shrink_fn, hardshrink_stub);
84-
DECLARE_DISPATCH(softshrink_fn, softshrink_stub);
85-
DECLARE_DISPATCH(shrink_backward_fn, shrink_backward_stub);
86-
DECLARE_DISPATCH(leaky_relu_fn, leaky_relu_stub);
87-
DECLARE_DISPATCH(leaky_relu_backward_fn, leaky_relu_backward_stub);
88-
DECLARE_DISPATCH(structured_activation_fn, glu_stub);
89-
DECLARE_DISPATCH(activation_backward_fn, glu_backward_stub);
90-
DECLARE_DISPATCH(glu_jvp_fn, glu_jvp_stub);
91-
DECLARE_DISPATCH(structured_activation_fn, silu_stub);
92-
DECLARE_DISPATCH(structured_activation_backward_fn, silu_backward_stub);
93-
DECLARE_DISPATCH(structured_activation_fn, mish_stub);
94-
DECLARE_DISPATCH(activation_backward_fn, mish_backward_stub);
95-
DECLARE_DISPATCH(activation_fn, prelu_stub);
96-
DECLARE_DISPATCH(activation_backward_fn, prelu_backward_stub);
69+
DECLARE_DISPATCH(elu_fn, elu_stub)
70+
DECLARE_DISPATCH(elu_backward_fn, elu_backward_stub)
71+
DECLARE_DISPATCH(softplus_fn, softplus_stub)
72+
DECLARE_DISPATCH(softplus_backward_fn, softplus_backward_stub)
73+
DECLARE_DISPATCH(log_sigmoid_cpu_fn, log_sigmoid_cpu_stub)
74+
DECLARE_DISPATCH(activation_backward_fn, log_sigmoid_backward_stub)
75+
DECLARE_DISPATCH(threshold_fn, threshold_stub)
76+
DECLARE_DISPATCH(gelu_fn, GeluKernel)
77+
DECLARE_DISPATCH(gelu_backward_fn, GeluBackwardKernel)
78+
DECLARE_DISPATCH(hardtanh_backward_fn, hardtanh_backward_stub)
79+
DECLARE_DISPATCH(hardsigmoid_fn, hardsigmoid_stub)
80+
DECLARE_DISPATCH(hardsigmoid_backward_fn, hardsigmoid_backward_stub)
81+
DECLARE_DISPATCH(hardswish_fn, hardswish_stub)
82+
DECLARE_DISPATCH(hardswish_backward_fn, hardswish_backward_stub)
83+
DECLARE_DISPATCH(shrink_fn, hardshrink_stub)
84+
DECLARE_DISPATCH(softshrink_fn, softshrink_stub)
85+
DECLARE_DISPATCH(shrink_backward_fn, shrink_backward_stub)
86+
DECLARE_DISPATCH(leaky_relu_fn, leaky_relu_stub)
87+
DECLARE_DISPATCH(leaky_relu_backward_fn, leaky_relu_backward_stub)
88+
DECLARE_DISPATCH(structured_activation_fn, glu_stub)
89+
DECLARE_DISPATCH(activation_backward_fn, glu_backward_stub)
90+
DECLARE_DISPATCH(glu_jvp_fn, glu_jvp_stub)
91+
DECLARE_DISPATCH(structured_activation_fn, silu_stub)
92+
DECLARE_DISPATCH(structured_activation_backward_fn, silu_backward_stub)
93+
DECLARE_DISPATCH(structured_activation_fn, mish_stub)
94+
DECLARE_DISPATCH(activation_backward_fn, mish_backward_stub)
95+
DECLARE_DISPATCH(activation_fn, prelu_stub)
96+
DECLARE_DISPATCH(activation_backward_fn, prelu_backward_stub)
9797

9898
} // namespace at::native

aten/src/ATen/native/AdaptivePooling.h

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -10,23 +10,23 @@ namespace at::native {
1010

1111
using adaptive_avg_pooling2d_fn = void(*)(Tensor& output, const Tensor& input, IntArrayRef output_size);
1212
using adaptive_avg_pooling2d_backward_fn = void(*)(Tensor& grad_input, const Tensor& grad_output);
13-
DECLARE_DISPATCH(adaptive_avg_pooling2d_fn, adaptive_avg_pool2d_kernel);
14-
DECLARE_DISPATCH(adaptive_avg_pooling2d_backward_fn, adaptive_avg_pool2d_backward_kernel);
13+
DECLARE_DISPATCH(adaptive_avg_pooling2d_fn, adaptive_avg_pool2d_kernel)
14+
DECLARE_DISPATCH(adaptive_avg_pooling2d_backward_fn, adaptive_avg_pool2d_backward_kernel)
1515

1616
using adaptive_max_pooling2d_fn = void(*)(const Tensor& output, const Tensor& indices, const Tensor& input, IntArrayRef output_size);
1717
using adaptive_max_pooling2d_backward_fn = void(*)(const Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
18-
DECLARE_DISPATCH(adaptive_max_pooling2d_fn, adaptive_max_pool2d_kernel);
19-
DECLARE_DISPATCH(adaptive_max_pooling2d_backward_fn, adaptive_max_pool2d_backward_kernel);
18+
DECLARE_DISPATCH(adaptive_max_pooling2d_fn, adaptive_max_pool2d_kernel)
19+
DECLARE_DISPATCH(adaptive_max_pooling2d_backward_fn, adaptive_max_pool2d_backward_kernel)
2020

2121
using adaptive_avg_pooling3d_fn = void(*)(Tensor& output, const Tensor& input, IntArrayRef output_size);
2222
using adaptive_avg_pooling3d_backward_fn = void(*)(Tensor& grad_input, const Tensor& grad_output);
23-
DECLARE_DISPATCH(adaptive_avg_pooling3d_fn, adaptive_avg_pool3d_kernel);
24-
DECLARE_DISPATCH(adaptive_avg_pooling3d_backward_fn, adaptive_avg_pool3d_backward_kernel);
23+
DECLARE_DISPATCH(adaptive_avg_pooling3d_fn, adaptive_avg_pool3d_kernel)
24+
DECLARE_DISPATCH(adaptive_avg_pooling3d_backward_fn, adaptive_avg_pool3d_backward_kernel)
2525

2626
using adaptive_max_pooling3d_fn = void(*)(const Tensor& output, const Tensor& indices, const Tensor& input, IntArrayRef output_size);
2727
using adaptive_max_pooling3d_backward_fn = void(*)(const Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
28-
DECLARE_DISPATCH(adaptive_max_pooling3d_fn, adaptive_max_pool3d_kernel);
29-
DECLARE_DISPATCH(adaptive_max_pooling3d_backward_fn, adaptive_max_pool3d_backward_kernel);
28+
DECLARE_DISPATCH(adaptive_max_pooling3d_fn, adaptive_max_pool3d_kernel)
29+
DECLARE_DISPATCH(adaptive_max_pooling3d_backward_fn, adaptive_max_pool3d_backward_kernel)
3030

3131
inline int64_t start_index(int64_t a, int64_t b, int64_t c) {
3232
return (a / b) * c + ((a % b) * c) / b;

aten/src/ATen/native/BlasKernel.cpp

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -986,8 +986,8 @@ void gemv(char trans, int64_t m, int64_t n, scalar_t alpha, const scalar_t *a, i
986986

987987
#define INSTANTIATE(scalar_t, _) \
988988
template void gemv<scalar_t>(char trans, int64_t m, int64_t n, scalar_t alpha, const scalar_t *a, int64_t lda, const scalar_t *x, int64_t incx, scalar_t beta, scalar_t *y, int64_t incy);
989-
AT_FORALL_SCALAR_TYPES_AND2(BFloat16, Half, INSTANTIATE);
990-
AT_FORALL_COMPLEX_TYPES(INSTANTIATE);
989+
AT_FORALL_SCALAR_TYPES_AND2(BFloat16, Half, INSTANTIATE)
990+
AT_FORALL_COMPLEX_TYPES(INSTANTIATE)
991991
#undef INSTANTIATE
992992

993993
namespace blas_impl {
@@ -1123,19 +1123,19 @@ scalar_t vdot_impl(int64_t n, scalar_t* x, int64_t incx, scalar_t* y, int64_t in
11231123
#define INSTANTIATE_DOT_IMPL(scalar_t) \
11241124
template scalar_t dot_impl<scalar_t>( \
11251125
int64_t n, scalar_t * x, int64_t incx, scalar_t * y, int64_t incy);
1126-
INSTANTIATE_DOT_IMPL(uint8_t);
1127-
INSTANTIATE_DOT_IMPL(int8_t);
1128-
INSTANTIATE_DOT_IMPL(int16_t);
1129-
INSTANTIATE_DOT_IMPL(int);
1130-
INSTANTIATE_DOT_IMPL(int64_t);
1131-
INSTANTIATE_DOT_IMPL(c10::Half);
1132-
INSTANTIATE_DOT_IMPL(c10::BFloat16);
1126+
INSTANTIATE_DOT_IMPL(uint8_t)
1127+
INSTANTIATE_DOT_IMPL(int8_t)
1128+
INSTANTIATE_DOT_IMPL(int16_t)
1129+
INSTANTIATE_DOT_IMPL(int)
1130+
INSTANTIATE_DOT_IMPL(int64_t)
1131+
INSTANTIATE_DOT_IMPL(c10::Half)
1132+
INSTANTIATE_DOT_IMPL(c10::BFloat16)
11331133

11341134
#define INSTANTIATE_VDOT_IMPL(scalar_t) \
11351135
template scalar_t vdot_impl<scalar_t>( \
11361136
int64_t n, scalar_t * x, int64_t incx, scalar_t * y, int64_t incy);
1137-
INSTANTIATE_VDOT_IMPL(c10::complex<float>);
1138-
INSTANTIATE_VDOT_IMPL(c10::complex<double>);
1137+
INSTANTIATE_VDOT_IMPL(c10::complex<float>)
1138+
INSTANTIATE_VDOT_IMPL(c10::complex<double>)
11391139

11401140
#undef INSTANTIATE_DOT_IMPL
11411141

aten/src/ATen/native/ConvUtils.h

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -13,62 +13,62 @@ namespace at::native {
1313
using conv_depthwise2d_backward_fn = std::tuple<at::Tensor,at::Tensor>(*)(
1414
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
1515
at::IntArrayRef, at::IntArrayRef, std::array<bool, 2>);
16-
DECLARE_DISPATCH(conv_depthwise2d_backward_fn, conv_depthwise2d_backward_stub);
16+
DECLARE_DISPATCH(conv_depthwise2d_backward_fn, conv_depthwise2d_backward_stub)
1717
using conv_depthwise3d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
1818
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
1919
at::IntArrayRef, at::IntArrayRef, std::array<bool, 3>);
20-
DECLARE_DISPATCH(conv_depthwise3d_backward_fn, conv_depthwise3d_backward_stub);
20+
DECLARE_DISPATCH(conv_depthwise3d_backward_fn, conv_depthwise3d_backward_stub)
2121
using cudnn_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor>(*)(
2222
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
2323
at::IntArrayRef, int64_t, bool, bool, bool, std::array<bool,2>);
24-
DECLARE_DISPATCH(cudnn_convolution_backward_fn, cudnn_convolution_backward_stub);
24+
DECLARE_DISPATCH(cudnn_convolution_backward_fn, cudnn_convolution_backward_stub)
2525
using mps_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
2626
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
2727
at::IntArrayRef, int64_t, std::array<bool,3>);
28-
DECLARE_DISPATCH(mps_convolution_backward_fn, mps_convolution_backward_stub);
28+
DECLARE_DISPATCH(mps_convolution_backward_fn, mps_convolution_backward_stub)
2929
using cudnn_convolution_transpose_backward_fn = std::tuple<at::Tensor,at::Tensor>(*)(
3030
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
3131
at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, bool, std::array<bool,2>);
32-
DECLARE_DISPATCH(cudnn_convolution_transpose_backward_fn, cudnn_convolution_transpose_backward_stub);
32+
DECLARE_DISPATCH(cudnn_convolution_transpose_backward_fn, cudnn_convolution_transpose_backward_stub)
3333
using miopen_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
3434
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
3535
at::IntArrayRef, int64_t, bool, bool, std::array<bool,3>);
36-
DECLARE_DISPATCH(miopen_convolution_backward_fn, miopen_convolution_backward_stub);
36+
DECLARE_DISPATCH(miopen_convolution_backward_fn, miopen_convolution_backward_stub)
3737
using miopen_convolution_transpose_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
3838
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
3939
at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, std::array<bool,3>);
40-
DECLARE_DISPATCH(miopen_convolution_transpose_backward_fn, miopen_convolution_transpose_backward_stub);
40+
DECLARE_DISPATCH(miopen_convolution_transpose_backward_fn, miopen_convolution_transpose_backward_stub)
4141
using miopen_depthwise_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
4242
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
4343
at::IntArrayRef, int64_t, bool, bool, std::array<bool,3>);
44-
DECLARE_DISPATCH(miopen_depthwise_convolution_backward_fn, miopen_depthwise_convolution_backward_stub);
44+
DECLARE_DISPATCH(miopen_depthwise_convolution_backward_fn, miopen_depthwise_convolution_backward_stub)
4545
using mkldnn_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
4646
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
4747
at::IntArrayRef, int64_t, std::array<bool,3>);
48-
DECLARE_DISPATCH(mkldnn_convolution_backward_fn, mkldnn_convolution_backward_stub);
48+
DECLARE_DISPATCH(mkldnn_convolution_backward_fn, mkldnn_convolution_backward_stub)
4949
using mkldnn_convolution_transpose_fn = Tensor(*)(const Tensor&, const Tensor&, const std::optional<Tensor>&,
5050
IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, int64_t);
51-
DECLARE_DISPATCH(mkldnn_convolution_transpose_fn, mkldnn_convolution_transpose_stub);
51+
DECLARE_DISPATCH(mkldnn_convolution_transpose_fn, mkldnn_convolution_transpose_stub)
5252
using mkldnn_convolution_transpose_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
5353
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
5454
at::IntArrayRef, at::IntArrayRef, int64_t, std::array<bool,3>);
55-
DECLARE_DISPATCH(mkldnn_convolution_transpose_backward_fn, mkldnn_convolution_transpose_backward_stub);
55+
DECLARE_DISPATCH(mkldnn_convolution_transpose_backward_fn, mkldnn_convolution_transpose_backward_stub)
5656
using slow_conv_dilated2d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
5757
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
5858
at::IntArrayRef, at::IntArrayRef, std::array<bool, 3>);
59-
DECLARE_DISPATCH(slow_conv_dilated2d_backward_fn, slow_conv_dilated2d_backward_stub);
59+
DECLARE_DISPATCH(slow_conv_dilated2d_backward_fn, slow_conv_dilated2d_backward_stub)
6060
using slow_conv_dilated3d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
6161
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
6262
at::IntArrayRef, at::IntArrayRef, std::array<bool, 3>);
63-
DECLARE_DISPATCH(slow_conv_dilated3d_backward_fn, slow_conv_dilated3d_backward_stub);
63+
DECLARE_DISPATCH(slow_conv_dilated3d_backward_fn, slow_conv_dilated3d_backward_stub)
6464
using slow_conv_transpose2d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
6565
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
6666
at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, std::array<bool,3>);
67-
DECLARE_DISPATCH(slow_conv_transpose2d_backward_fn, slow_conv_transpose2d_backward_stub);
67+
DECLARE_DISPATCH(slow_conv_transpose2d_backward_fn, slow_conv_transpose2d_backward_stub)
6868
using slow_conv_transpose3d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
6969
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
7070
at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, std::array<bool,3>);
71-
DECLARE_DISPATCH(slow_conv_transpose3d_backward_fn, slow_conv_transpose3d_backward_stub);
71+
DECLARE_DISPATCH(slow_conv_transpose3d_backward_fn, slow_conv_transpose3d_backward_stub)
7272

7373
namespace {
7474
bool is_cudnnv8_heuristic_mode_b() {

0 commit comments

Comments
 (0)