Skip to content

Commit 089e5ce

Browse files
committed
Update
[ghstack-poisoned]
1 parent 666d961 commit 089e5ce

File tree

3 files changed

+16
-11
lines changed

3 files changed

+16
-11
lines changed

kernels/portable/cpu/op_glu.cpp

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,10 @@ Tensor& glu_out_tensor(
4141
Tensor& out) {
4242
const auto self_size = self.size(dim);
4343
ET_KERNEL_CHECK(
44-
ctx, self.dim() <= kTensorDimensionLimit, InvalidArgument, out);
44+
ctx,
45+
self.dim() <= static_cast<ssize_t>(kTensorDimensionLimit),
46+
InvalidArgument,
47+
out);
4548
std::array<executorch::aten::SizesType, kTensorDimensionLimit> half_sizes;
4649
std::copy(self.sizes().begin(), self.sizes().end(), half_sizes.begin());
4750
half_sizes[dim] /= 2;
@@ -74,7 +77,7 @@ Tensor& glu_out_tensor(
7477
utils::apply_bitensor_elementwise_fn<
7578
CTYPE_COMPUTE,
7679
op_name,
77-
utils::SupportedTensorDtypes::FLOATHBF16>(
80+
utils::SupportedTensorDtypes::FLOATHBF16>(
7881
[](const auto val_a, const auto val_b) -> CTYPE_COMPUTE {
7982
// TODO: rewrite this to be vectorization-capable.
8083
const auto one = static_cast<decltype(val_a)>(1.0);

kernels/portable/cpu/util/broadcast_indexes_range.h

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,8 @@ class BroadcastIndexesIterator {
6969
sizeof...(args) == kNumInputs && (std::is_same_v<Args, Tensor> && ...),
7070
"BroadcastIndexesIterator constructor requires kNumInputs input tensor"
7171
"arguments!");
72-
if (support_noncontiguous_tensors || output_dim_or_zero_if_no_broadcasting_ != 0) {
72+
if (support_noncontiguous_tensors ||
73+
output_dim_or_zero_if_no_broadcasting_ != 0) {
7374
effective_input_broadcast_strides_ = {
7475
effective_input_broadcast_stride(output, args)...};
7576
}
@@ -261,8 +262,8 @@ class BroadcastIndexesIterator {
261262
template <std::size_t kNumInputs, bool support_noncontiguous_tensors = false>
262263
class BroadcastIndexesRange {
263264
public:
264-
using iterator =
265-
internal::BroadcastIndexesIterator<kNumInputs, support_noncontiguous_tensors>;
265+
using iterator = internal::
266+
BroadcastIndexesIterator<kNumInputs, support_noncontiguous_tensors>;
266267

267268
template <typename... Args>
268269
BroadcastIndexesRange(const Tensor& output, const Args&... args)

kernels/portable/cpu/util/elementwise_util.h

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ namespace internal {
5656
* strides; normally, this is not strictly necessary because ExecuTorch
5757
* Tensors are contiguous.
5858
*/
59-
struct SupportNoncontiguousTensors{
59+
struct SupportNoncontiguousTensors {
6060
explicit SupportNoncontiguousTensors() = default;
6161
};
6262

@@ -245,7 +245,8 @@ inline void apply_elementwise_fn(
245245
apply_elementwise_fn_generic_impl<
246246
CTYPE_COMPUTE,
247247
op_name,
248-
support_noncontiguous_tensors>(compute_fun, ctx, out, out_dtypes, inputs...);
248+
support_noncontiguous_tensors>(
249+
compute_fun, ctx, out, out_dtypes, inputs...);
249250
}
250251

251252
/// DEPRECATED: prefer the variant with out_dtypes in the template argument.
@@ -343,8 +344,8 @@ inline void apply_bitensor_elementwise_fn(
343344
internal::apply_elementwise_fn<
344345
CTYPE_COMPUTE,
345346
op_name,
346-
out_dtypes,
347-
/*support_noncontiguous_tensors*/false>(
347+
out_dtypes,
348+
/*support_noncontiguous_tensors*/ false>(
348349
compute_fun,
349350
ctx,
350351
out,
@@ -369,8 +370,8 @@ inline void apply_bitensor_elementwise_fn(
369370
internal::apply_elementwise_fn<
370371
CTYPE_COMPUTE,
371372
op_name,
372-
out_dtypes,
373-
/*support_noncontiguous_tensors*/true>(
373+
out_dtypes,
374+
/*support_noncontiguous_tensors*/ true>(
374375
compute_fun,
375376
ctx,
376377
out,

0 commit comments

Comments
 (0)