Skip to content

Commit f1b367c

Browse files
malfetpytorchmergebot
authored andcommitted
[BE] Nested namespace in ATen/native headers (pytorch#103753)
Use nested namespace and `enum class` in `ATen/native` headers. In particular, it helps avoid polluting global namespace with `MAX`,`MIN` enum values. Pull Request resolved: pytorch#103753 Approved by: https://github.com/atalman, https://github.com/Skylion007
1 parent fd4beb7 commit f1b367c

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

74 files changed

+224
-289
lines changed

aten/src/ATen/native/Activation.h

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,10 @@ struct TensorIteratorBase;
1414
class TensorBase;
1515
}
1616

17-
namespace at { namespace native {
17+
namespace at::native {
1818

1919
// These constants control the approximation behavior of gelu function.
20-
enum GeluType {
20+
enum class GeluType {
2121
None, // Baseline Gelu
2222
Tanh, // Tahn Gelu Approximation
2323
END
@@ -33,6 +33,14 @@ static GeluType get_gelutype_enum(const c10::string_view approximate) {
3333
}
3434
}
3535

36+
static std::string gelutype_to_string(const GeluType type) {
37+
switch(type) {
38+
case GeluType::None: return "none";
39+
case GeluType::Tanh: return "tanh";
40+
default: TORCH_CHECK(false, "unknown GELU type: ", static_cast<int>(type));
41+
}
42+
}
43+
3644
using structured_activation_fn = void (*)(TensorIteratorBase&);
3745
using structured_activation_backward_fn = void (*)(TensorIteratorBase&);
3846

@@ -87,6 +95,4 @@ DECLARE_DISPATCH(activation_backward_fn, mish_backward_stub);
8795
DECLARE_DISPATCH(activation_fn, prelu_stub);
8896
DECLARE_DISPATCH(activation_backward_fn, prelu_backward_stub);
8997

90-
} // namespace native
91-
92-
} // namespace at
98+
} // namespace at::native

aten/src/ATen/native/AdaptivePooling.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,7 @@
66
#include <c10/util/irange.h>
77
#include <cmath>
88

9-
namespace at {
10-
11-
namespace native {
9+
namespace at::native {
1210

1311
using adaptive_avg_pooling_fn = void(*)(Tensor& output, const Tensor& input, IntArrayRef output_size);
1412
using adaptive_avg_pooling_backward_fn = void(*)(Tensor& grad_input, const Tensor& grad_output);
@@ -38,4 +36,4 @@ static inline void adaptive_pool_empty_output_check(const Tensor& gradOutput_, c
3836
}
3937
}
4038

41-
}} // namespace at::native
39+
} // namespace at::native

aten/src/ATen/native/BatchLinearAlgebra.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ enum class TransposeType;
1515

1616
}
1717

18-
namespace at { namespace native {
18+
namespace at::native {
1919

2020
enum class LapackLstsqDriverType : int64_t { Gels, Gelsd, Gelsy, Gelss};
2121

@@ -317,4 +317,4 @@ using ldl_solve_fn = void (*)(
317317
bool /*upper*/,
318318
bool /*hermitian*/);
319319
DECLARE_DISPATCH(ldl_solve_fn, ldl_solve_stub);
320-
}} // namespace at::native
320+
} // namespace at::native

aten/src/ATen/native/BinaryOps.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ struct TensorIterator;
2121
struct TensorIteratorBase;
2222
}
2323

24-
namespace at { namespace native {
24+
namespace at::native {
2525

2626
inline void alpha_check(const ScalarType dtype, const Scalar& alpha) {
2727
TORCH_CHECK(! alpha.isBoolean() || dtype == ScalarType::Bool,
@@ -180,4 +180,4 @@ DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_u_stub);
180180
DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_v_stub);
181181
DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_w_stub);
182182

183-
}} // namespace at::native
183+
} // namespace at::native

aten/src/ATen/native/BucketizationUtils.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,7 @@
1010
#include <ATen/ops/result_type.h>
1111
#endif
1212

13-
namespace at {
14-
namespace native {
13+
namespace at::native {
1514

1615
// original values given by raw_*. If an original value is not contiguous, will make a contiguous copy to
1716
// the corresponding trimmed_* value. Additionally, if the dtypes of the boundary and input tensor do not
@@ -171,4 +170,4 @@ inline void searchsorted_pre_check(
171170
}
172171
}
173172

174-
}}
173+
} // namespace at::native

aten/src/ATen/native/CPUBlas.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,7 @@
77
#include <c10/core/ScalarType.h>
88
#include <c10/core/Scalar.h>
99

10-
namespace at {
11-
namespace native {
12-
namespace cpublas {
10+
namespace at::native::cpublas {
1311

1412
namespace internal {
1513
void normalize_last_dims(
@@ -161,4 +159,4 @@ void copy(int64_t n, const float *x, int64_t incx, float *y, int64_t incy);
161159
void copy(int64_t n, const c10::complex<double> *x, int64_t incx, c10::complex<double> *y, int64_t incy);
162160
void copy(int64_t n, const c10::complex<float> *x, int64_t incx, c10::complex<float> *y, int64_t incy);
163161

164-
}}} // namespace at::native::cpublas
162+
} // namespace at::native::cpublas

aten/src/ATen/native/CPUFallback.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
#include <c10/util/Metaprogramming.h>
88
#include <torch/library.h>
99

10-
namespace at { namespace native {
10+
namespace at::native {
1111

1212
// This function implements a boxed fallback to CPU.
1313
// External backends can add their own custom logging on top if it to customize their own CPU fallbacks.
@@ -42,5 +42,4 @@ using call_fallback_fn_symint = _call_fallback_fn<fallback_fn, Op, true, typenam
4242
template<c10::KernelFunction::BoxedKernelFunction* fallback_fn, class Op>
4343
using call_fallback_fn = _call_fallback_fn<fallback_fn, Op, false, typename Op::schema>;
4444

45-
} // namespace native
46-
} // namespace at
45+
} // namespace at::native

aten/src/ATen/native/CanUse32BitIndexMath.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@ namespace at {
66
class TensorBase;
77
}
88

9-
namespace at { namespace native {
9+
namespace at::native {
1010

1111
TORCH_API bool canUse32BitIndexMath(const at::TensorBase &t, int64_t max_elem=std::numeric_limits<int32_t>::max());
1212

13-
}}
13+
}

aten/src/ATen/native/ComplexHelper.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
// WARNING: this header contains non-inline functions and should be only
1616
// included from ONE cpp file
1717

18-
namespace at { namespace native {
18+
namespace at::native {
1919

2020
// View tensor with new dtype, storage offset, sizes and strides
2121
inline Tensor view_tensor(
@@ -94,4 +94,4 @@ Tensor view_as_complex(const Tensor& self) {
9494
return view_tensor(self, complex_type, new_storage_offset, new_sizes, new_strides);
9595
}
9696

97-
}} // namespace at::native
97+
} // namespace at::native

aten/src/ATen/native/CompositeRandomAccessor.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
#include <ATen/native/CompositeRandomAccessorCommon.h>
44

5-
namespace at { namespace native {
5+
namespace at::native {
66

77
struct TupleInfoCPU {
88
template <typename ...Types>
@@ -31,4 +31,4 @@ auto get(references_holder<Values, References> rh) -> decltype(std::get<N>(rh.da
3131
return std::get<N>(rh.data());
3232
}
3333

34-
}} // namespace at::native
34+
} // namespace at::native

0 commit comments

Comments
 (0)