Skip to content

Fix portable library build on Windows #13260

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 7 commits into
base: gh/GregoryComer/117/head
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion kernels/portable/cpu/op_amax.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <c10/util/irange.h>
#include <cmath>

#include <executorch/kernels/portable/cpu/util/math_util.h>
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
#include <executorch/runtime/kernel/kernel_includes.h>
#include <executorch/runtime/platform/assert.h>
Expand Down Expand Up @@ -51,7 +52,7 @@ Tensor& amax_out(
for (const auto out_ix : c10::irange(begin, end)) {
out_data[out_ix] = plan.execute<CTYPE>(
[](CTYPE v, CTYPE max_v) {
return std::isnan(v) || v > max_v ? v : max_v;
return utils::isnan_override(v) || v > max_v ? v : max_v;
},
out_ix);
}
Expand Down
3 changes: 2 additions & 1 deletion kernels/portable/cpu/op_amin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <c10/util/irange.h>
#include <cmath>

#include <executorch/kernels/portable/cpu/util/math_util.h>
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
#include <executorch/runtime/kernel/kernel_includes.h>
#include <executorch/runtime/platform/assert.h>
Expand Down Expand Up @@ -50,7 +51,7 @@ Tensor& amin_out(
for (const auto out_ix : c10::irange(begin, end)) {
out_data[out_ix] = plan.execute<CTYPE>(
[](CTYPE v, CTYPE min_v) {
return std::isnan(v) || v < min_v ? v : min_v;
return utils::isnan_override(v) || v < min_v ? v : min_v;
},
out_ix);
}
Expand Down
3 changes: 2 additions & 1 deletion kernels/portable/cpu/op_argmax.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <cmath>
#include <tuple>

#include <executorch/kernels/portable/cpu/util/math_util.h>
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
#include <executorch/runtime/kernel/kernel_includes.h>
#include <executorch/runtime/platform/assert.h>
Expand Down Expand Up @@ -55,7 +56,7 @@ Tensor& argmax_out(
// the below condition as written is equivalent to
// !isnan(accval) && (isnan(v) || v > acc_val). See
// argument in op_argmin.cpp.
if (!std::isnan(acc_val) && !(v <= acc_val)) {
if (!utils::isnan_override(acc_val) && !(v <= acc_val)) {
acc_val = v;
acc_ix = ix;
}
Expand Down
3 changes: 2 additions & 1 deletion kernels/portable/cpu/op_argmin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <cmath>
#include <tuple>

#include <executorch/kernels/portable/cpu/util/math_util.h>
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
#include <executorch/runtime/kernel/kernel_includes.h>
#include <executorch/runtime/platform/assert.h>
Expand Down Expand Up @@ -62,7 +63,7 @@ Tensor& argmin_out(
// - false, so the result is true. The result is trivially
// - true for the above condition that uses isnan(v) as
// - well.
if (!std::isnan(acc_val) && !(v >= acc_val)) {
if (!utils::isnan_override(acc_val) && !(v >= acc_val)) {
acc_val = v;
acc_ix = ix;
}
Expand Down
7 changes: 4 additions & 3 deletions kernels/portable/cpu/op_max.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <cmath>
#include <tuple>

#include <executorch/kernels/portable/cpu/util/math_util.h>
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
#include <executorch/runtime/kernel/kernel_includes.h>
#include <executorch/runtime/platform/assert.h>
Expand Down Expand Up @@ -88,8 +89,8 @@ std::tuple<Tensor&, Tensor&> max_out(
for (const auto out_ix : c10::irange(begin, end)) {
std::tuple<CTYPE, long> acc = reduce_over_dim<CTYPE>(
[](CTYPE v, long ix, CTYPE acc_val, long acc_ix) {
if (!std::isnan(acc_val) &&
(std::isnan(v) || v > acc_val)) {
if (!utils::isnan_override(acc_val) &&
(utils::isnan_override(v) || v > acc_val)) {
acc_val = v;
acc_ix = ix;
}
Expand Down Expand Up @@ -132,7 +133,7 @@ max_unary_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
data_out[0] = lower_bound<CTYPE_OUT>();
for (const auto i : c10::irange(in.numel())) {
CTYPE_OUT val = static_cast<CTYPE_OUT>(data_in[i]);
if (std::isnan(val)) {
if (utils::isnan_override(val)) {
data_out[0] = val;
break;
}
Expand Down
7 changes: 4 additions & 3 deletions kernels/portable/cpu/op_min.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <cmath>
#include <tuple>

#include <executorch/kernels/portable/cpu/util/math_util.h>
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
#include <executorch/runtime/kernel/kernel_includes.h>
#include <executorch/runtime/platform/assert.h>
Expand Down Expand Up @@ -88,8 +89,8 @@ std::tuple<Tensor&, Tensor&> min_out(
for (const auto out_ix : c10::irange(begin, end)) {
std::tuple<CTYPE, long> acc = reduce_over_dim<CTYPE>(
[](CTYPE v, long ix, CTYPE acc_val, long acc_ix) {
if (!std::isnan(acc_val) &&
(std::isnan(v) || v < acc_val)) {
if (!utils::isnan_override(acc_val) &&
(utils::isnan_override(v) || v < acc_val)) {
acc_val = v;
acc_ix = ix;
}
Expand Down Expand Up @@ -132,7 +133,7 @@ min_unary_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
data_out[0] = upper_bound<CTYPE_OUT>();
for (const auto i : c10::irange(in.numel())) {
CTYPE_OUT val = static_cast<CTYPE_OUT>(data_in[i]);
if (std::isnan(val)) {
if (utils::isnan_override(val)) {
data_out[0] = val;
break;
}
Expand Down
5 changes: 4 additions & 1 deletion kernels/portable/cpu/op_relu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <cmath>

#include <executorch/kernels/portable/cpu/util/functional_util.h>
#include <executorch/kernels/portable/cpu/util/math_util.h>
#include <executorch/runtime/kernel/kernel_includes.h>
#include <executorch/runtime/platform/assert.h>

Expand Down Expand Up @@ -45,7 +46,9 @@ Tensor& relu_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, "relu.out", CTYPE, [&]() {
apply_unary_map_fn(
[](const CTYPE val_in) {
return (std::isnan(val_in) || val_in >= CTYPE(0)) ? val_in : CTYPE(0);
return (utils::isnan_override(val_in) || val_in >= CTYPE(0))
? val_in
: CTYPE(0);
},
in.const_data_ptr<CTYPE>(),
out.mutable_data_ptr<CTYPE>(),
Expand Down
3 changes: 2 additions & 1 deletion kernels/portable/cpu/op_sign.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <cstring>

#include <executorch/kernels/portable/cpu/util/functional_util.h>
#include <executorch/kernels/portable/cpu/util/math_util.h>
#include <executorch/runtime/kernel/kernel_includes.h>
#include <executorch/runtime/platform/assert.h>

Expand Down Expand Up @@ -42,7 +43,7 @@ Tensor& sign_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, "sign.out", CTYPE, [&] {
apply_unary_map_fn(
[](const CTYPE val_in) {
if (std::isnan(val_in)) {
if (utils::isnan_override(val_in)) {
return val_in;
} else {
return static_cast<CTYPE>((val_in > 0) - (val_in < 0));
Expand Down
4 changes: 3 additions & 1 deletion kernels/portable/cpu/op_topk.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
#include <cmath>
#include <tuple>

#include <executorch/kernels/portable/cpu/util/math_util.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/kernel/kernel_includes.h>

namespace torch {
Expand Down Expand Up @@ -62,7 +64,7 @@ bool float_less_than(T x, T y) {
if constexpr (std::is_integral_v<T>) {
return x < y;
}
return (!std::isnan(x) && std::isnan(y)) || x < y;
return (!utils::isnan_override(x) && utils::isnan_override(y)) || x < y;
}

template <typename CTYPE, typename elem_t = std::pair<CTYPE, int64_t>>
Expand Down
21 changes: 20 additions & 1 deletion kernels/portable/cpu/util/math_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,14 @@

#pragma once

#include <executorch/runtime/core/exec_aten/exec_aten.h>

#if defined(ET_USE_PYTORCH_HEADERS) && ET_USE_PYTORCH_HEADERS
#include <ATen/cpu/vec/vec.h>
#endif

#include <type_traits>

namespace torch {
namespace executor {
namespace native {
Expand All @@ -29,7 +33,8 @@ template <
typename std::enable_if<std::is_integral<INT_T>::value, bool>::type = true>
INT_T floor_divide(INT_T a, INT_T b) {
const auto quot = a / b;
if (std::signbit(a) == std::signbit(b)) {
// MSVC does not like signbit on integral types.
if ((a < 0) == (b < 0)) {
return quot;
}
const auto rem = a % b;
Expand All @@ -52,6 +57,20 @@ FLOAT_T floor_divide(FLOAT_T a, FLOAT_T b) {
return div;
}

/**
* A wrapper around std::isnan that works with MSVC. When building with MSVC,
* std::isnan calls with integer inputs fail to compile due to ambiguous
* overload resolution.
*/
template <typename T>
bool isnan_override(T a) {
if constexpr (!std::is_integral_v<T>) {
return std::isnan(a);
} else {
return false;
}
}

/**
* Override min/max so we can emulate PyTorch's behavior with NaN entries.
*/
Expand Down
11 changes: 11 additions & 0 deletions shim_et/xplat/executorch/kernels/portable/op_registration_util.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,7 @@ ATEN_OPS = (
deps = [
"//executorch/runtime/core/exec_aten/util:scalar_type_util",
"//executorch/runtime/core/exec_aten/util:tensor_util",
"//executorch/kernels/portable/cpu/util:math_util",
"//executorch/kernels/portable/cpu/util:reduce_util",
],
),
Expand All @@ -255,6 +256,7 @@ ATEN_OPS = (
"//executorch/runtime/core/exec_aten/util:scalar_type_util",
"//executorch/runtime/core/exec_aten/util:tensor_util",
"//executorch/kernels/portable/cpu/util:index_util",
"//executorch/kernels/portable/cpu/util:math_util",
"//executorch/kernels/portable/cpu/util:reduce_util",
],
),
Expand All @@ -278,12 +280,14 @@ ATEN_OPS = (
op_target(
name = "op_argmax",
deps = [
"//executorch/kernels/portable/cpu/util:math_util",
"//executorch/kernels/portable/cpu/util:reduce_util",
],
),
op_target(
name = "op_argmin",
deps = [
"//executorch/kernels/portable/cpu/util:math_util",
"//executorch/kernels/portable/cpu/util:reduce_util",
],
),
Expand Down Expand Up @@ -806,6 +810,7 @@ ATEN_OPS = (
op_target(
name = "op_max",
deps = [
"//executorch/kernels/portable/cpu/util:math_util",
"//executorch/kernels/portable/cpu/util:reduce_util",
],
),
Expand Down Expand Up @@ -843,6 +848,7 @@ ATEN_OPS = (
op_target(
name = "op_min",
deps = [
"//executorch/kernels/portable/cpu/util:math_util",
"//executorch/kernels/portable/cpu/util:reduce_util",
],
),
Expand Down Expand Up @@ -1019,6 +1025,7 @@ ATEN_OPS = (
name = "op_relu",
deps = [
"//executorch/kernels/portable/cpu/util:functional_util",
"//executorch/kernels/portable/cpu/util:math_util",
],
),
op_target(
Expand Down Expand Up @@ -1129,6 +1136,7 @@ ATEN_OPS = (
name = "op_sign",
deps = [
"//executorch/kernels/portable/cpu/util:functional_util",
"//executorch/kernels/portable/cpu/util:math_util",
],
),
op_target(
Expand Down Expand Up @@ -1236,6 +1244,9 @@ ATEN_OPS = (
),
op_target(
name = "op_topk",
deps = [
"//executorch/kernels/portable/cpu/util:math_util",
]
),
op_target(
name = "op_transpose_copy",
Expand Down
3 changes: 1 addition & 2 deletions tools/cmake/preset/windows.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@


# keep sorted
set_overridable_option(EXECUTORCH_BUILD_EXECUTOR_RUNNER ON)
set_overridable_option(EXECUTORCH_BUILD_EXTENSION_DATA_LOADER ON)
set_overridable_option(EXECUTORCH_BUILD_EXTENSION_EVALUE_UTIL ON)
set_overridable_option(EXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR ON)
Expand All @@ -21,7 +22,5 @@ set_overridable_option(XNNPACK_ENABLE_AVX256VNNIGFNI OFF)
set_overridable_option(XNNPACK_ENABLE_AVX512BF16 OFF)

# Below options are not yet buildable on Windows, but should be.
set(EXECUTORCH_BUILD_PORTABLE_OPS OFF CACHE BOOL "")
#set_overridable_option(EXECUTORCH_BUILD_EXECUTOR_RUNNER ON)
#set_overridable_option(EXECUTORCH_BUILD_KERNELS_OPTIMIZED ON)
#set_overridable_option(EXECUTORCH_BUILD_KERNELS_QUANTIZED ON)
Loading