diff --git a/backends/cadence/fusion_g3/operators/op_clamp.cpp b/backends/cadence/fusion_g3/operators/op_clamp.cpp index fa8424e15eb..9f3f72a674f 100644 --- a/backends/cadence/fusion_g3/operators/op_clamp.cpp +++ b/backends/cadence/fusion_g3/operators/op_clamp.cpp @@ -21,13 +21,13 @@ #include #include -using ::executorch::aten::optional; using ::executorch::aten::Scalar; using ::executorch::aten::ScalarType; using ::executorch::aten::Tensor; using ::executorch::runtime::canCast; using ::executorch::runtime::Error; using ::executorch::runtime::KernelRuntimeContext; +using std::optional; namespace cadence { namespace impl { diff --git a/backends/cadence/fusion_g3/operators/op_dequantize.cpp b/backends/cadence/fusion_g3/operators/op_dequantize.cpp index dd9d4f2a517..c3fca3bb7d4 100644 --- a/backends/cadence/fusion_g3/operators/op_dequantize.cpp +++ b/backends/cadence/fusion_g3/operators/op_dequantize.cpp @@ -24,7 +24,7 @@ using ::executorch::runtime::Error; using ::executorch::runtime::KernelRuntimeContext; template -using optional = ::executorch::aten::optional; +using optional = std::optional; /* ScalarType in Executorch do not have support for below data types. * So, creating a placeholder for these data types. Once, ScalarTypes is * updated to have support for below data types, these can be removed and @@ -51,7 +51,7 @@ void check_dequantize_per_tensor_args( int64_t quant_min, int64_t quant_max, ScalarType dtype, - ::executorch::aten::optional& out_dtype, + std::optional& out_dtype, Tensor& out) { ET_CHECK_MSG( input.scalar_type() == ScalarType::Byte || @@ -93,7 +93,7 @@ Tensor& dequantize_impl( float* scale_data, int* zero_point_data, int* axis, - ::executorch::aten::optional out_dtype) { + std::optional out_dtype) { const ::executorch::aten::ArrayRef input_size = input.sizes(); @@ -260,8 +260,8 @@ Tensor& dequantize_impl( } } - ::executorch::aten::optional<::executorch::aten::ArrayRef> - optional_dim_list{::executorch::aten::ArrayRef{ + std::optional<::executorch::aten::ArrayRef> optional_dim_list{ + ::executorch::aten::ArrayRef{ dims, size_t(input.dim() - 1)}}; // Actual dequantization logic @@ -466,8 +466,8 @@ Tensor& dequantize_impl( } } - ::executorch::aten::optional<::executorch::aten::ArrayRef> - optional_dim_list{::executorch::aten::ArrayRef{ + std::optional<::executorch::aten::ArrayRef> optional_dim_list{ + ::executorch::aten::ArrayRef{ dims, size_t(input.dim() - 1)}}; // Actual dequantization logic @@ -600,7 +600,7 @@ Tensor& dequantize_per_tensor_tensor_args_out( int64_t quant_min, int64_t quant_max, ScalarType dtype, - ::executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out) { #ifdef OP_ARG_CHECK ET_CHECK_MSG( @@ -639,12 +639,12 @@ Tensor& dequantize_per_channel_out( KernelRuntimeContext& context, const Tensor& input, const Tensor& scale, - const ::executorch::aten::optional& opt_zero_points, + const std::optional& opt_zero_points, int64_t axis, int64_t quant_min, int64_t quant_max, ScalarType dtype, - ::executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out) { if (axis < 0) { axis += executorch::runtime::nonzero_dim(input); diff --git a/backends/cadence/fusion_g3/operators/op_div.cpp b/backends/cadence/fusion_g3/operators/op_div.cpp index 85e5da42765..a16e8ed02ba 100644 --- a/backends/cadence/fusion_g3/operators/op_div.cpp +++ b/backends/cadence/fusion_g3/operators/op_div.cpp @@ -19,14 +19,14 @@ #include #include -using ::executorch::aten::optional; using ::executorch::aten::Scalar; using ::executorch::aten::ScalarType; -using ::executorch::aten::string_view; using ::executorch::aten::Tensor; using ::executorch::runtime::canCast; using ::executorch::runtime::Error; using ::executorch::runtime::KernelRuntimeContext; +using std::optional; +using std::string_view; namespace cadence { namespace impl { @@ -686,4 +686,4 @@ Tensor& div_scalar_mode_out( } // namespace native } // namespace G3 } // namespace impl -} // namespace cadence \ No newline at end of file +} // namespace cadence diff --git a/backends/cadence/fusion_g3/operators/op_mean.cpp b/backends/cadence/fusion_g3/operators/op_mean.cpp index 48f691a145a..85a8f482aac 100644 --- a/backends/cadence/fusion_g3/operators/op_mean.cpp +++ b/backends/cadence/fusion_g3/operators/op_mean.cpp @@ -17,11 +17,11 @@ #include using ::executorch::aten::ArrayRef; -using ::executorch::aten::optional; using ::executorch::aten::ScalarType; using ::executorch::aten::Tensor; using ::executorch::runtime::Error; using ::executorch::runtime::KernelRuntimeContext; +using std::optional; namespace cadence { namespace impl { diff --git a/backends/cadence/fusion_g3/operators/op_native_layer_norm.cpp b/backends/cadence/fusion_g3/operators/op_native_layer_norm.cpp index 09c7c00fd2c..7e71df62d54 100644 --- a/backends/cadence/fusion_g3/operators/op_native_layer_norm.cpp +++ b/backends/cadence/fusion_g3/operators/op_native_layer_norm.cpp @@ -19,11 +19,11 @@ #include using ::executorch::aten::IntArrayRef; -using ::executorch::aten::optional; using ::executorch::aten::ScalarType; using ::executorch::aten::Tensor; using ::executorch::runtime::Error; using ::executorch::runtime::KernelRuntimeContext; +using std::optional; namespace cadence { namespace impl { diff --git a/backends/cadence/fusion_g3/operators/op_quantize.cpp b/backends/cadence/fusion_g3/operators/op_quantize.cpp index 2af77eca6c1..3ad399bca8b 100644 --- a/backends/cadence/fusion_g3/operators/op_quantize.cpp +++ b/backends/cadence/fusion_g3/operators/op_quantize.cpp @@ -329,8 +329,8 @@ Tensor& quantize_impl( } } - ::executorch::aten::optional<::executorch::aten::ArrayRef> - optional_dim_list{::executorch::aten::ArrayRef{ + std::optional<::executorch::aten::ArrayRef> optional_dim_list{ + ::executorch::aten::ArrayRef{ dims, size_t(input.dim() - 1)}}; // Actual quantization logic @@ -534,8 +534,8 @@ Tensor& quantize_impl( } } - ::executorch::aten::optional<::executorch::aten::ArrayRef> - optional_dim_list{::executorch::aten::ArrayRef{ + std::optional<::executorch::aten::ArrayRef> optional_dim_list{ + ::executorch::aten::ArrayRef{ dims, size_t(input.dim() - 1)}}; // Actual quantization logic diff --git a/backends/cadence/fusion_g3/operators/op_slice_copy.cpp b/backends/cadence/fusion_g3/operators/op_slice_copy.cpp index 249da9144a9..b7fd37fd1ee 100644 --- a/backends/cadence/fusion_g3/operators/op_slice_copy.cpp +++ b/backends/cadence/fusion_g3/operators/op_slice_copy.cpp @@ -37,8 +37,8 @@ Tensor& slice_copy_Tensor_out( KernelRuntimeContext& ctx, const Tensor& in, int64_t dim, - ::executorch::aten::optional start_val, - ::executorch::aten::optional end_val, + std::optional start_val, + std::optional end_val, int64_t step, Tensor& out) { (void)ctx; diff --git a/backends/cadence/fusion_g3/operators/operators.h b/backends/cadence/fusion_g3/operators/operators.h index 52330f39ab3..641bb82f035 100644 --- a/backends/cadence/fusion_g3/operators/operators.h +++ b/backends/cadence/fusion_g3/operators/operators.h @@ -47,13 +47,12 @@ ::executorch::aten::Tensor& dequantize_per_channel_out( ::executorch::runtime::KernelRuntimeContext& context, const ::executorch::aten::Tensor& input, const ::executorch::aten::Tensor& scale, - const ::executorch::aten::optional<::executorch::aten::Tensor>& - opt_zero_points, + const std::optional<::executorch::aten::Tensor>& opt_zero_points, int64_t axis, int64_t quant_min, int64_t quant_max, ::executorch::aten::ScalarType dtype, - ::executorch::aten::optional<::executorch::aten::ScalarType> out_dtype, + std::optional<::executorch::aten::ScalarType> out_dtype, ::executorch::aten::Tensor& out); ::executorch::aten::Tensor& dequantize_per_tensor_out( @@ -64,7 +63,7 @@ ::executorch::aten::Tensor& dequantize_per_tensor_out( int64_t quant_min, int64_t quant_max, ::executorch::aten::ScalarType dtype, - ::executorch::aten::optional<::executorch::aten::ScalarType> out_dtype, + std::optional<::executorch::aten::ScalarType> out_dtype, ::executorch::aten::Tensor& out); ::executorch::aten::Tensor& div_out( @@ -77,7 +76,7 @@ ::executorch::aten::Tensor& div_out_mode( ::executorch::runtime::KernelRuntimeContext& ctx, const ::executorch::aten::Tensor& a, const ::executorch::aten::Tensor& b, - ::executorch::aten::optional<::executorch::aten::string_view> mode, + std::optional mode, ::executorch::aten::Tensor& out); ::executorch::aten::Tensor& div_scalar_out( @@ -90,7 +89,7 @@ ::executorch::aten::Tensor& div_scalar_mode_out( ::executorch::runtime::KernelRuntimeContext& ctx, const ::executorch::aten::Tensor& a, const ::executorch::aten::Scalar& b, - ::executorch::aten::optional<::executorch::aten::string_view> mode, + std::optional mode, ::executorch::aten::Tensor& out); ::executorch::aten::Tensor& exp_out( @@ -101,10 +100,9 @@ ::executorch::aten::Tensor& exp_out( ::executorch::aten::Tensor& mean_dim_out( ::executorch::runtime::KernelRuntimeContext& ctx, const ::executorch::aten::Tensor& in, - ::executorch::aten::optional<::executorch::aten::ArrayRef> - dim_list, + std::optional<::executorch::aten::ArrayRef> dim_list, bool keepdim, - ::executorch::aten::optional<::executorch::aten::ScalarType> dtype, + std::optional<::executorch::aten::ScalarType> dtype, ::executorch::aten::Tensor& out); ::executorch::aten::Tensor& mul_out( @@ -127,8 +125,8 @@ native_layer_norm_out( ::executorch::runtime::KernelRuntimeContext& ctx, const ::executorch::aten::Tensor& input, ::executorch::aten::IntArrayRef normalized_shape, - const ::executorch::aten::optional<::executorch::aten::Tensor>& weight, - const ::executorch::aten::optional<::executorch::aten::Tensor>& bias, + const std::optional<::executorch::aten::Tensor>& weight, + const std::optional<::executorch::aten::Tensor>& bias, double eps, ::executorch::aten::Tensor& out, ::executorch::aten::Tensor& mean_out, @@ -165,8 +163,8 @@ ::executorch::aten::Tensor& slice_copy_Tensor_out( ::executorch::runtime::KernelRuntimeContext& ctx, const ::executorch::aten::Tensor& in, int64_t dim, - ::executorch::aten::optional start_val, - ::executorch::aten::optional end_val, + std::optional start_val, + std::optional end_val, int64_t step, ::executorch::aten::Tensor& out); @@ -226,15 +224,15 @@ ::executorch::aten::Tensor& where_out( ::executorch::aten::Tensor& clamp_out( ::executorch::runtime::KernelRuntimeContext& ctx, const ::executorch::aten::Tensor& in, - const ::executorch::aten::optional<::executorch::aten::Scalar>& min_opt, - const ::executorch::aten::optional<::executorch::aten::Scalar>& max_opt, + const std::optional<::executorch::aten::Scalar>& min_opt, + const std::optional<::executorch::aten::Scalar>& max_opt, ::executorch::aten::Tensor& out); ::executorch::aten::Tensor& clamp_tensor_out( ::executorch::runtime::KernelRuntimeContext& ctx, const ::executorch::aten::Tensor& in, - const ::executorch::aten::optional<::executorch::aten::Tensor>& min_opt, - const ::executorch::aten::optional<::executorch::aten::Tensor>& max_opt, + const std::optional<::executorch::aten::Tensor>& min_opt, + const std::optional<::executorch::aten::Tensor>& max_opt, ::executorch::aten::Tensor& out); ::executorch::aten::Tensor& transpose_copy_int_out( diff --git a/backends/cadence/hifi/operators/op_clamp.cpp b/backends/cadence/hifi/operators/op_clamp.cpp index 785e6f015d5..62fd127273b 100644 --- a/backends/cadence/hifi/operators/op_clamp.cpp +++ b/backends/cadence/hifi/operators/op_clamp.cpp @@ -51,8 +51,8 @@ namespace native { Tensor& clamp_Tensor_out( RuntimeContext& ctx, const Tensor& in, - const executorch::aten::optional& min_opt, - const executorch::aten::optional& max_opt, + const std::optional& min_opt, + const std::optional& max_opt, Tensor& out) { (void)ctx; @@ -325,8 +325,8 @@ Tensor& clamp_Tensor_out( Tensor& clamp_tensor_out( RuntimeContext& ctx, const Tensor& in, - const executorch::aten::optional& min_opt, - const executorch::aten::optional& max_opt, + const std::optional& min_opt, + const std::optional& max_opt, Tensor& out) { return clamp_Tensor_out(ctx, in, min_opt, max_opt, out); } diff --git a/backends/cadence/hifi/operators/op_div.cpp b/backends/cadence/hifi/operators/op_div.cpp index ecfd8f884dd..2c689ae4350 100644 --- a/backends/cadence/hifi/operators/op_div.cpp +++ b/backends/cadence/hifi/operators/op_div.cpp @@ -178,7 +178,7 @@ Tensor& div_out_mode( RuntimeContext& ctx, const Tensor& a, const Tensor& b, - executorch::aten::optional mode, + std::optional mode, Tensor& out) { ET_KERNEL_CHECK( ctx, diff --git a/backends/cadence/hifi/operators/op_quantized_fully_connected_out.cpp b/backends/cadence/hifi/operators/op_quantized_fully_connected_out.cpp index f80216ed47e..66c2e997142 100644 --- a/backends/cadence/hifi/operators/op_quantized_fully_connected_out.cpp +++ b/backends/cadence/hifi/operators/op_quantized_fully_connected_out.cpp @@ -19,12 +19,12 @@ namespace native { using ::executorch::aten::ArrayRef; using ::executorch::aten::IntArrayRef; -using ::executorch::aten::optional; using ::executorch::aten::Scalar; using ::executorch::aten::ScalarType; using ::executorch::aten::SizesType; using ::executorch::aten::Tensor; using ::executorch::runtime::KernelRuntimeContext; +using std::optional; void inline _quantized_fully_connected_asym8u( const Tensor& in, diff --git a/backends/cadence/hifi/operators/op_quantized_linear_out.cpp b/backends/cadence/hifi/operators/op_quantized_linear_out.cpp index 3d9983b40c7..4bf71cd8838 100644 --- a/backends/cadence/hifi/operators/op_quantized_linear_out.cpp +++ b/backends/cadence/hifi/operators/op_quantized_linear_out.cpp @@ -20,11 +20,11 @@ namespace impl { namespace HiFi { namespace native { -using ::executorch::aten::optional; using ::executorch::aten::ScalarType; using ::executorch::aten::Tensor; using ::executorch::runtime::getLeadingDims; using ::executorch::runtime::KernelRuntimeContext; +using std::optional; // The nnlib kernel to compute quantized linear via matmul. diff --git a/backends/cadence/hifi/operators/op_softmax.cpp b/backends/cadence/hifi/operators/op_softmax.cpp index 25d3ad7d389..645b9febef0 100644 --- a/backends/cadence/hifi/operators/op_softmax.cpp +++ b/backends/cadence/hifi/operators/op_softmax.cpp @@ -50,7 +50,7 @@ Tensor& _softmax_out( // Adjust for negative dim dim = dim < 0 ? dim + executorch::runtime::nonzero_dim(in) : dim; - const executorch::aten::optional& dim_t = dim; + const std::optional& dim_t = dim; const size_t d = ET_NORMALIZE_IX(dim_t.value(), in.dim()); const size_t size = in.size(d); diff --git a/backends/cadence/hifi/operators/operators.h b/backends/cadence/hifi/operators/operators.h index 105510e3421..ff0ce69baae 100644 --- a/backends/cadence/hifi/operators/operators.h +++ b/backends/cadence/hifi/operators/operators.h @@ -39,7 +39,7 @@ ::executorch::aten::Tensor& div_out_mode( ::executorch::runtime::KernelRuntimeContext& ctx, const ::executorch::aten::Tensor& a, const ::executorch::aten::Tensor& b, - ::executorch::aten::optional<::executorch::aten::string_view> mode, + std::optional mode, ::executorch::aten::Tensor& out); void quantized_linear_out( diff --git a/backends/cadence/hifi/operators/tests/test_op_div.cpp b/backends/cadence/hifi/operators/tests/test_op_div.cpp index 98ee6cb63ee..790319d2db4 100644 --- a/backends/cadence/hifi/operators/tests/test_op_div.cpp +++ b/backends/cadence/hifi/operators/tests/test_op_div.cpp @@ -25,16 +25,16 @@ namespace HiFi { namespace native { namespace { -using ::executorch::aten::optional; using ::executorch::aten::Scalar; using ::executorch::aten::ScalarType; -using ::executorch::aten::string_view; using ::executorch::aten::Tensor; using ::executorch::aten::TensorImpl; using ::executorch::runtime::Error; using ::executorch::runtime::KernelRuntimeContext; using ::executorch::runtime::runtime_init; using ::executorch::runtime::testing::TensorFactory; +using std::optional; +using std::string_view; class HiFiDivTest : public OperatorTest { public: diff --git a/backends/cadence/reference/operators/operators.h b/backends/cadence/reference/operators/operators.h index 2be956fe89e..637f38f8fec 100644 --- a/backends/cadence/reference/operators/operators.h +++ b/backends/cadence/reference/operators/operators.h @@ -27,7 +27,7 @@ using ::executorch::runtime::getLeadingDims; inline __attribute__((always_inline)) void linear_( const ::executorch::aten::Tensor& input, const ::executorch::aten::Tensor& weight, - const ::executorch::aten::optional<::executorch::aten::Tensor>& bias, + const std::optional<::executorch::aten::Tensor>& bias, ::executorch::aten::Tensor& output) { const float* __restrict__ input_data = input.const_data_ptr(); const float* __restrict__ weight_data = weight.const_data_ptr(); diff --git a/backends/cadence/reference/operators/quantized_fully_connected_out.cpp b/backends/cadence/reference/operators/quantized_fully_connected_out.cpp index 77b7dd94e9d..fe41c2d7e77 100644 --- a/backends/cadence/reference/operators/quantized_fully_connected_out.cpp +++ b/backends/cadence/reference/operators/quantized_fully_connected_out.cpp @@ -13,10 +13,10 @@ namespace impl { namespace reference { namespace native { -using ::executorch::aten::optional; using ::executorch::aten::ScalarType; using ::executorch::aten::Tensor; using ::executorch::runtime::KernelRuntimeContext; +using std::optional; void quantized_fully_connected_out( __ET_UNUSED KernelRuntimeContext& ctx, diff --git a/backends/cadence/reference/operators/quantized_linear_out.cpp b/backends/cadence/reference/operators/quantized_linear_out.cpp index 4f7ca9cc3ce..edd8634d56e 100644 --- a/backends/cadence/reference/operators/quantized_linear_out.cpp +++ b/backends/cadence/reference/operators/quantized_linear_out.cpp @@ -84,7 +84,7 @@ void quantized_linear_out( const Tensor& out_multiplier, const Tensor& out_shift, int64_t out_zero_point, - __ET_UNUSED const executorch::aten::optional& offset, + __ET_UNUSED const std::optional& offset, Tensor& out) { // TODO: refactor to use switch case as quantized_linear_per_tensor_out if (out.scalar_type() == executorch::aten::ScalarType::Byte) { @@ -127,7 +127,7 @@ void quantized_linear_per_tensor_out( const int64_t out_multiplier, const int64_t out_shift, const int64_t out_zero_point, - __ET_UNUSED const executorch::aten::optional& offset, + __ET_UNUSED const std::optional& offset, Tensor& out) { #define typed_quantized_linear_per_tensor(ctype, dtype) \ case executorch::aten::ScalarType::dtype: { \ diff --git a/backends/cadence/reference/operators/quantized_matmul_out.cpp b/backends/cadence/reference/operators/quantized_matmul_out.cpp index d12fc533e73..cc0fa05351c 100644 --- a/backends/cadence/reference/operators/quantized_matmul_out.cpp +++ b/backends/cadence/reference/operators/quantized_matmul_out.cpp @@ -60,7 +60,7 @@ void inline _typed_quantized_matmul( int64_t X_zero_point, const Tensor& Y, int64_t Y_zero_point, - const executorch::aten::optional& bias, + const std::optional& bias, int64_t out_multiplier, int64_t out_shift, int64_t out_zero_point, @@ -114,7 +114,7 @@ void quantized_matmul_out( int64_t X_zero_point, const Tensor& Y, int64_t Y_zero_point, - const executorch::aten::optional& bias, + const std::optional& bias, int64_t out_multiplier, int64_t out_shift, int64_t out_zero_point, diff --git a/codegen/tools/gen_selected_op_variants.py b/codegen/tools/gen_selected_op_variants.py index 95ae47f6f17..497cc624a63 100644 --- a/codegen/tools/gen_selected_op_variants.py +++ b/codegen/tools/gen_selected_op_variants.py @@ -17,7 +17,7 @@ from torchgen.code_template import CodeTemplate -ops_and_dtypes_template_str = """((executorch::aten::string_view(operator_name).compare("$operator_name") == 0)\n && ($dtype_checks))""" +ops_and_dtypes_template_str = """((std::string_view(operator_name).compare("$operator_name") == 0)\n && ($dtype_checks))""" ops_and_dtypes_template = CodeTemplate(ops_and_dtypes_template_str) selected_kernel_dtypes_h_template_str = """#pragma once diff --git a/codegen/tools/test/test_gen_selected_op_variants.py b/codegen/tools/test/test_gen_selected_op_variants.py index bdde7ac2236..187a185b281 100644 --- a/codegen/tools/test/test_gen_selected_op_variants.py +++ b/codegen/tools/test/test_gen_selected_op_variants.py @@ -73,11 +73,11 @@ def test_generates_correct_header(self) -> None: const char *operator_name, executorch::aten::ScalarType scalar_type ) { - return ((executorch::aten::string_view(operator_name).compare("add.out") == 0) + return ((std::string_view(operator_name).compare("add.out") == 0) && (scalar_type == executorch::aten::ScalarType::Float || scalar_type == executorch::aten::ScalarType::Int)) - || ((executorch::aten::string_view(operator_name).compare("mul.out") == 0) + || ((std::string_view(operator_name).compare("mul.out") == 0) && (scalar_type == executorch::aten::ScalarType::Float)) - || ((executorch::aten::string_view(operator_name).compare("sub.out") == 0) + || ((std::string_view(operator_name).compare("sub.out") == 0) && (true)); } """, @@ -145,11 +145,11 @@ def test_generates_correct_header(self) -> None: const char *operator_name, executorch::aten::ScalarType scalar_type ) { - return ((executorch::aten::string_view(operator_name).compare("add.out") == 0) + return ((std::string_view(operator_name).compare("add.out") == 0) && (scalar_type == executorch::aten::ScalarType::Float || scalar_type == executorch::aten::ScalarType::Int)) - || ((executorch::aten::string_view(operator_name).compare("mul.out") == 0) + || ((std::string_view(operator_name).compare("mul.out") == 0) && (scalar_type == executorch::aten::ScalarType::Float)) - || ((executorch::aten::string_view(operator_name).compare("sub.out") == 0) + || ((std::string_view(operator_name).compare("sub.out") == 0) && (true)); } """, diff --git a/devtools/etdump/etdump_filter.h b/devtools/etdump/etdump_filter.h index 29db43be8b9..64ea09f8bd3 100644 --- a/devtools/etdump/etdump_filter.h +++ b/devtools/etdump/etdump_filter.h @@ -17,8 +17,8 @@ namespace executorch::etdump { -using ::executorch::aten::string_view; using ::executorch::runtime::Result; +using std::string_view; /** * ETDumpFilter is a class that filters intermediate output based on output's diff --git a/extension/evalue_util/print_evalue.cpp b/extension/evalue_util/print_evalue.cpp index 5d8da39d737..192b51fee5a 100644 --- a/extension/evalue_util/print_evalue.cpp +++ b/extension/evalue_util/print_evalue.cpp @@ -191,8 +191,7 @@ void print_tensor_list( void print_list_optional_tensor( std::ostream& os, - executorch::aten::ArrayRef< - executorch::aten::optional> list) { + executorch::aten::ArrayRef> list) { os << "(len=" << list.size() << ")["; for (size_t i = 0; i < list.size(); ++i) { if (list.size() > 1) { diff --git a/extension/evalue_util/test/print_evalue_test.cpp b/extension/evalue_util/test/print_evalue_test.cpp index 03aebaf7892..b881e55d8a8 100644 --- a/extension/evalue_util/test/print_evalue_test.cpp +++ b/extension/evalue_util/test/print_evalue_test.cpp @@ -566,20 +566,18 @@ void expect_list_optional_tensor_output( // optional entries. It's important not to destroy these entries, // because the values list will own the underlying Tensors. auto unwrapped_values_memory = std::make_unique( - sizeof(executorch::aten::optional) * - wrapped_values.size()); - executorch::aten::optional* unwrapped_values = - reinterpret_cast*>( + sizeof(std::optional) * wrapped_values.size()); + std::optional* unwrapped_values = + reinterpret_cast*>( unwrapped_values_memory.get()); // Must be initialized because BoxedEvalueList will use operator=() on each // entry. for (int i = 0; i < wrapped_values.size(); ++i) { - new (&unwrapped_values[i]) - executorch::aten::optional(); + new (&unwrapped_values[i]) std::optional(); } ASSERT_LE(num_tensors, wrapped_values.size()); - BoxedEvalueList> list( + BoxedEvalueList> list( wrapped_values.data(), unwrapped_values, num_tensors); EValue value(list); expect_output(value, expected); diff --git a/extension/kernel_util/make_boxed_from_unboxed_functor.h b/extension/kernel_util/make_boxed_from_unboxed_functor.h index 16b71594eb3..22bc54fafdf 100644 --- a/extension/kernel_util/make_boxed_from_unboxed_functor.h +++ b/extension/kernel_util/make_boxed_from_unboxed_functor.h @@ -96,16 +96,15 @@ struct evalue_to_arg final { }; template -struct evalue_to_arg> final { - static executorch::aten::optional call(executorch::runtime::EValue& v) { +struct evalue_to_arg> final { + static std::optional call(executorch::runtime::EValue& v) { return v.toOptional(); } }; template -struct evalue_to_arg>> - final { - static executorch::aten::ArrayRef> call( +struct evalue_to_arg>> final { + static executorch::aten::ArrayRef> call( executorch::runtime::EValue& v) { return v.toListOptionalTensor(); } diff --git a/extension/kernel_util/test/make_boxed_from_unboxed_functor_test.cpp b/extension/kernel_util/test/make_boxed_from_unboxed_functor_test.cpp index eb7bf288b7d..2c7bb1f9e2b 100644 --- a/extension/kernel_util/test/make_boxed_from_unboxed_functor_test.cpp +++ b/extension/kernel_util/test/make_boxed_from_unboxed_functor_test.cpp @@ -16,7 +16,6 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; using executorch::aten::TensorImpl; @@ -26,6 +25,7 @@ using executorch::runtime::EValue; using executorch::runtime::get_op_function_from_registry; using executorch::runtime::KernelRuntimeContext; using executorch::runtime::registry_has_op_function; +using std::optional; Tensor& my_op_out(KernelRuntimeContext& ctx, const Tensor& a, Tensor& out) { (void)ctx; diff --git a/extension/llm/custom_ops/op_sdpa_impl.h b/extension/llm/custom_ops/op_sdpa_impl.h index c6f985fb77a..295255245e9 100644 --- a/extension/llm/custom_ops/op_sdpa_impl.h +++ b/extension/llm/custom_ops/op_sdpa_impl.h @@ -348,7 +348,9 @@ inline bool data_index_step(T& x, const T& X, Args&&... args) { return false; } -inline double calculate_scale(const Tensor& query, optional scale) { +inline double calculate_scale( + const Tensor& query, + std::optional scale) { const auto softmax_scale = scale.has_value() ? scale.value() : 1.0 / std::sqrt(query.size(3)); return softmax_scale; diff --git a/extension/llm/custom_ops/op_sdpa_test.cpp b/extension/llm/custom_ops/op_sdpa_test.cpp index d11cff0967e..6f43e726293 100644 --- a/extension/llm/custom_ops/op_sdpa_test.cpp +++ b/extension/llm/custom_ops/op_sdpa_test.cpp @@ -23,10 +23,10 @@ executorch::aten::Tensor op_scaled_dot_product_attention( const executorch::aten::Tensor& query, const executorch::aten::Tensor& key, const executorch::aten::Tensor& value, - const executorch::aten::optional& attn_mask, + const std::optional& attn_mask, double dropout_p, bool is_causal, - executorch::aten::optional scale, + std::optional scale, executorch::aten::Tensor& out) { executorch::runtime::KernelRuntimeContext context{}; return torch::executor::native::flash_attention_kernel_out( @@ -94,10 +94,10 @@ TEST(OpScaledDotProductAttentionTest, CorrectnessTest_105) { 0.7338, 0.2203, 0.6971}); - executorch::aten::optional attn_mask; + std::optional attn_mask; double dropout_p = 0; bool is_causal = false; - executorch::aten::optional scale; + std::optional scale; executorch::aten::Tensor ret_expected = tfFloat.make( {1, 1, 4, 4}, {0.4473, @@ -135,12 +135,12 @@ TEST(OpScaledDotProductAttentionTest, CorrectnessTest_11) { executorch::aten::Tensor value = tfFloat.make( {1, 1, 1, 8}, {99.375, 80.125, -81.0, 8.5, -70.375, -54.25, -80.25, 34.125}); - executorch::aten::optional attn_mask = - executorch::aten::optional( + std::optional attn_mask = + std::optional( tfFloat.full({1, 1}, std::numeric_limits::infinity())); double dropout_p = 0.0; bool is_causal = false; - executorch::aten::optional scale; + std::optional scale; std::vector out_size(query.sizes().begin(), query.sizes().end()); executorch::aten::Tensor out = tfFloat.zeros(out_size); // Pytorch says these should be NAN @@ -167,10 +167,10 @@ TEST(OpScaledDotProductAttentionTest, CorrectnessTest_13) { {65.0, 81.125, 8.125, 68.375, -54.25, -1.125, -73.25, -54.0, -28.75, -23.875, 49.0, 63.5, 96.375, 16.625, 79.5, 33.125, 32.875, -73.75, 69.125, 7.25, -35.0, 94.0, 6.75, 65.75}); - executorch::aten::optional attn_mask; + std::optional attn_mask; double dropout_p = 0.0; bool is_causal = true; - executorch::aten::optional scale; + std::optional scale; std::vector out_size(query.sizes().begin(), query.sizes().end()); executorch::aten::Tensor out = tfFloat.zeros(out_size); executorch::aten::Tensor ret_expected = tfFloat.make( @@ -235,10 +235,10 @@ TEST(OpScaledDotProductAttentionTest, CorrectnessTest_17) { -23.625, 85.875, -25.875, 57.625, 50.75, 76.625, -72.5, 26.0, 65.875, 13.125, -19.625, 7.5, -25.5, 40.25, 75.25, -48.0, 8.25, 5.125, 42.375, 23.75, 65.25, -77.0, 35.625, -12.0}); - executorch::aten::optional attn_mask; + std::optional attn_mask; double dropout_p = 0.0; bool is_causal = false; - executorch::aten::optional scale; + std::optional scale; executorch::aten::Tensor ret_expected = tfFloat.make( {3, 2, 2, 6}, {-26.375, -65.0, 55.5, 37.0, 90.0, 54.25, 83.75, -33.75, @@ -311,11 +311,10 @@ TEST(OpScaledDotProductAttentionTest, CorrectnessTest_18) { -27.875, 59.5, 15.5, -90.0, 39.5, -15.75, -16.375, -96.875, -96.125, -47.0, 0.75, -45.875, 74.625, 46.0, 20.5, -42.875, -55.0, 30.375, -27.375, 99.375, 18.375, 0.375, 54.25, -57.75}); - executorch::aten::optional attn_mask; + std::optional attn_mask; double dropout_p = 0.0; bool is_causal = false; - executorch::aten::optional scale = - executorch::aten::optional(-INFINITY); + std::optional scale = std::optional(-INFINITY); executorch::aten::Tensor ret_expected = tfFloat.make( {3, 2, 2, 6}, {NAN, NAN, NAN, NAN, NAN, NAN, NAN, NAN, NAN, NAN, NAN, NAN, @@ -388,8 +387,8 @@ TEST(OpScaledDotProductAttentionTest, CorrectnessTest_19) { 15.25, 53.75, 44.625, -22.0, -84.0, -7.25, 22.0, 25.875, 17.625, -86.875, 22.75, -74.0, -79.875, -68.0, -71.125, -81.625, -4.125, 65.875, 1.875, 76.125, -43.75, -15.25, -4.625, -66.125}); - executorch::aten::optional attn_mask = - executorch::aten::optional(tfFloat.make( + std::optional attn_mask = + std::optional(tfFloat.make( {3, 1, 2, 2, 4}, {39.0, 49.375, -87.125, -99.125, 49.375, -41.125, 26.25, 79.75, 91.0, -3.125, 65.75, 63.5, -48.375, 43.375, 22.5, -53.625, @@ -399,7 +398,7 @@ TEST(OpScaledDotProductAttentionTest, CorrectnessTest_19) { 2.25, 81.375, -87.125, 35.125, -39.125, 43.5, 52.875, 39.5})); double dropout_p = 0.0; bool is_causal = false; - executorch::aten::optional scale; + std::optional scale; executorch::aten::Tensor ret_expected = tfFloat.make( {3, 1, 2, 2, 6}, {37.0, @@ -494,15 +493,15 @@ TEST(OpScaledDotProductAttentionTest, CorrectnessTest_51) { executorch::aten::Tensor value = tfFloat.make( {1, 1, 3, 3}, {70.375, 30.875, 72.125, 53.0, 39.125, -4.625, 26.5, 79.5, 88.625}); - executorch::aten::optional attn_mask = - executorch::aten::optional(tfFloat.make( + std::optional attn_mask = + std::optional(tfFloat.make( {8, 3}, {-59.25, -26.25, -3.0, -24.125, 47.75, 92.375, 87.5, 21.5, 64.5, 45.0, -54.0, 17.375, -67.75, 14.625, 88.75, 36.0, 88.375, 25.75, 42.5, -13.375, -82.75, -59.625, -21.125, 6.5})); double dropout_p = 0.0; bool is_causal = false; - executorch::aten::optional scale; + std::optional scale; executorch::aten::Tensor ret_expected = tfFloat.make( {1, 1, 8, 3}, {70.375, 30.875, 72.125, 70.375, 30.875, 72.125, 70.375, 30.875, diff --git a/extension/llm/custom_ops/op_sdpa_with_kv_cache_test.cpp b/extension/llm/custom_ops/op_sdpa_with_kv_cache_test.cpp index 6c0496af32d..d2f93bcef32 100644 --- a/extension/llm/custom_ops/op_sdpa_with_kv_cache_test.cpp +++ b/extension/llm/custom_ops/op_sdpa_with_kv_cache_test.cpp @@ -26,10 +26,10 @@ executorch::aten::Tensor op_sdpa_with_kv_cache( executorch::aten::Tensor& value_cache, const int64_t start_pos, const int64_t seq_len, - const executorch::aten::optional& attn_mask, + const std::optional& attn_mask, double dropout_p, bool is_causal, - executorch::aten::optional scale, + std::optional scale, executorch::aten::Tensor& out) { executorch::runtime::KernelRuntimeContext context{}; return torch::executor::native::sdpa_with_kv_cache_out( @@ -142,10 +142,10 @@ TEST(OpScaledDotProductAttentionTest, BasicTest) { executorch::aten::Tensor value_cache_1 = tfFloat.zeros({1, 5, 4, 4}); executorch::aten::Tensor key_cache_2 = tfFloat.zeros({1, 5, 4, 4}); executorch::aten::Tensor value_cache_2 = tfFloat.zeros({1, 5, 4, 4}); - executorch::aten::optional attn_mask; + std::optional attn_mask; double dropout_p = 0; bool is_causal = false; - executorch::aten::optional scale; + std::optional scale; // start pos: 0 layer id 0 executorch::aten::Tensor ret_expected_0 = tfFloat.make( @@ -384,10 +384,10 @@ TEST(OpScaledDotProductAttentionTest, LargerTest) { executorch::aten::Tensor value_cache_1 = tfFloat.zeros({1, 8, 7, 4}); executorch::aten::Tensor key_cache_2 = tfFloat.zeros({1, 8, 7, 4}); executorch::aten::Tensor value_cache_2 = tfFloat.zeros({1, 8, 7, 4}); - executorch::aten::optional attn_mask; + std::optional attn_mask; double dropout_p = 0; bool is_causal = false; - executorch::aten::optional scale; + std::optional scale; // start pos: 0 layer id 0 executorch::aten::Tensor ret_expected_0 = tfFloat.make( @@ -549,10 +549,10 @@ TEST(OpScaledDotProductAttentionTest, SequenceTest) { executorch::aten::Tensor key_cache_0 = tfFloat.zeros({1, 5, 8, 4}); executorch::aten::Tensor value_cache_0 = tfFloat.zeros({1, 5, 8, 4}); - executorch::aten::optional attn_mask; + std::optional attn_mask; double dropout_p = 0; bool is_causal = false; - executorch::aten::optional scale; + std::optional scale; // start pos: 0 layer id 0 executorch::aten::Tensor ret_expected_0 = tfFloat.make( diff --git a/extension/training/module/training_module.cpp b/extension/training/module/training_module.cpp index d119738715e..51140c14e32 100644 --- a/extension/training/module/training_module.cpp +++ b/extension/training/module/training_module.cpp @@ -79,7 +79,7 @@ TrainingModule::execute_forward_backward( size_t name_index = 0; for (size_t grad_index = grad_start; grad_index < param_start; ++grad_index, ++name_index) { - executorch::aten::string_view fqn = fqn_list.at(name_index).toString(); + std::string_view fqn = fqn_list.at(name_index).toString(); gradients_map.insert({fqn, outputs.get().at(grad_index).toTensor()}); } } @@ -87,8 +87,7 @@ TrainingModule::execute_forward_backward( return user_outputs; } -runtime::Result< - const std::map> +runtime::Result> TrainingModule::named_parameters(const std::string& method_name) { // If we haven't seen this method before, populate the dict. if (method_named_parameters_.find(method_name) == @@ -126,7 +125,7 @@ TrainingModule::named_parameters(const std::string& method_name) { size_t name_index = 0; for (size_t param_index = param_start; param_index < method->outputs_size(); ++param_index, ++name_index) { - executorch::aten::string_view fqn = fqn_list.at(name_index).toString(); + std::string_view fqn = fqn_list.at(name_index).toString(); executorch::aten::Tensor param = method->get_output(param_index).toTensor(); method_named_parameters_.at(method_name).insert({fqn, param}); @@ -135,8 +134,7 @@ TrainingModule::named_parameters(const std::string& method_name) { return method_named_parameters_.at(method_name); } -runtime::Result< - const std::map> +runtime::Result> TrainingModule::named_gradients(const std::string& method_name) { if (method_named_gradients_.find(method_name) == method_named_gradients_.end()) { diff --git a/extension/training/module/training_module.h b/extension/training/module/training_module.h index 7bf81623c04..d4050bea827 100644 --- a/extension/training/module/training_module.h +++ b/extension/training/module/training_module.h @@ -75,8 +75,7 @@ class ET_EXPERIMENTAL TrainingModule final * parameter tensor, or an error if the method is not a joint graph. */ ET_EXPERIMENTAL - runtime::Result< - const std::map> + runtime::Result> named_parameters(const std::string& method_name); /** @@ -91,19 +90,18 @@ class ET_EXPERIMENTAL TrainingModule final * or has not been executed yet. */ ET_EXPERIMENTAL - runtime::Result< - const std::map> + runtime::Result> named_gradients(const std::string& method_name); private: std::unordered_map< std::string, - std::map> + std::map> method_named_gradients_; std::unordered_map< std::string, - std::map> + std::map> method_named_parameters_; }; diff --git a/extension/training/optimizer/sgd.cpp b/extension/training/optimizer/sgd.cpp index 1e7cae70ab8..eaef77becb2 100644 --- a/extension/training/optimizer/sgd.cpp +++ b/extension/training/optimizer/sgd.cpp @@ -66,7 +66,7 @@ void SGDParamGroup::set_options(std::unique_ptr options) { options_ = std::move(options); } -const std::map& +const std::map& SGDParamGroup::named_parameters() const { return named_parameters_; } @@ -81,9 +81,8 @@ void SGD::add_param_group(const SGDParamGroup& param_group) { param_groups_.emplace_back(std::move(param_group_)); } -Error SGD::step( - const std::map& - named_gradients) { +Error SGD::step(const std::map& + named_gradients) { for (auto& group : param_groups_) { auto& options = static_cast(group.options()); auto weight_decay = options.weight_decay(); diff --git a/extension/training/optimizer/sgd.h b/extension/training/optimizer/sgd.h index 055d561287d..edb04215378 100644 --- a/extension/training/optimizer/sgd.h +++ b/extension/training/optimizer/sgd.h @@ -151,11 +151,11 @@ class ET_EXPERIMENTAL SGDParamGroup { * qualified names. */ /* implicit */ SGDParamGroup( - const std::map& + const std::map& named_parameters) : named_parameters_(named_parameters) {} SGDParamGroup( - const std::map& + const std::map& named_parameters, std::unique_ptr options) : named_parameters_(named_parameters), options_(std::move(options)) {} @@ -164,12 +164,11 @@ class ET_EXPERIMENTAL SGDParamGroup { SGDOptions& options(); const SGDOptions& options() const; void set_options(std::unique_ptr options); - const std::map& - named_parameters() const; + const std::map& named_parameters() + const; private: - std::map - named_parameters_; + std::map named_parameters_; std::unique_ptr options_; }; @@ -189,7 +188,7 @@ class ET_EXPERIMENTAL SGD { } explicit SGD( - const std::map& + const std::map& named_parameters, SGDOptions defaults) : SGD({SGDParamGroup(named_parameters)}, defaults) {} @@ -206,7 +205,7 @@ class ET_EXPERIMENTAL SGD { * fully qualified name. */ ::executorch::runtime::Error step( - const std::map& + const std::map& named_gradients); private: diff --git a/extension/training/optimizer/test/sgd_test.cpp b/extension/training/optimizer/test/sgd_test.cpp index 51aad9ee2d1..6c4a73a2570 100644 --- a/extension/training/optimizer/test/sgd_test.cpp +++ b/extension/training/optimizer/test/sgd_test.cpp @@ -68,10 +68,8 @@ TEST_F(SGDOptimizerTest, SGDOptionsDefaultValuesTest) { TEST_F(SGDOptimizerTest, SGDOptimizerSimple) { TensorFactory tf; - std::map - named_parameters; - std::map - named_gradients; + std::map named_parameters; + std::map named_gradients; named_parameters.insert({"param1", tf.make({1, 1}, {1})}); @@ -92,8 +90,7 @@ TEST_F(SGDOptimizerTest, SGDOptimizerSimple) { TEST_F(SGDOptimizerTest, SGDOptimizerComplex) { TensorFactory tf; - std::map - named_parameters; + std::map named_parameters; named_parameters.insert({"param1", tf.make({1, 1}, {1.0})}); named_parameters.insert({"param2", tf.make({1, 1}, {2.0})}); @@ -101,8 +98,7 @@ TEST_F(SGDOptimizerTest, SGDOptimizerComplex) { SGD optimizer(named_parameters, SGDOptions{0.1, 0.1, 0, 2, true}); for (int i = 0; i < 10; ++i) { - std::map - named_gradients; + std::map named_gradients; // dummy gradient of -1 for all epochs named_gradients.insert({"param1", tf.make({1, 1}, {-1})}); named_gradients.insert({"param2", tf.make({1, 1}, {-1})}); diff --git a/extension/training/pybindings/_training_lib.cpp b/extension/training/pybindings/_training_lib.cpp index d37778f5301..2f181c1a867 100644 --- a/extension/training/pybindings/_training_lib.cpp +++ b/extension/training/pybindings/_training_lib.cpp @@ -42,8 +42,7 @@ struct PySGD final { params_() #endif { - std::map - cpp_inputs; + std::map cpp_inputs; auto py_named_params = py::cast>(named_params); const auto params_size = py::len(named_params); @@ -52,7 +51,7 @@ struct PySGD final { for (auto pair : py_named_params) { fqns_.push_back(pair.first); - executorch::aten::string_view v{fqns_.back().c_str(), pair.first.size()}; + std::string_view v{fqns_.back().c_str(), pair.first.size()}; #ifndef USE_ATEN_LIB // convert at::Tensor to torch::executor::Tensor params_.emplace_back(alias_tensor_ptr_to_attensor(pair.second)); @@ -76,8 +75,7 @@ struct PySGD final { void step(const py::dict& py_dict) { auto py_named_gradients = py::cast>(py_dict); - std::map - cpp_inputs; + std::map cpp_inputs; std::vector fqn; #ifndef USE_ATEN_LIB diff --git a/kernels/aten/cpu/op__to_dim_order_copy.cpp b/kernels/aten/cpu/op__to_dim_order_copy.cpp index a8216c9a8e9..0ed10f69d5a 100644 --- a/kernels/aten/cpu/op__to_dim_order_copy.cpp +++ b/kernels/aten/cpu/op__to_dim_order_copy.cpp @@ -23,7 +23,7 @@ template using OptionalArrayRef = executorch::aten::OptionalArrayRef; template -using Optional = executorch::aten::optional; +using Optional = std::optional; namespace { Optional get_memory_format(OptionalArrayRef dim_order) { diff --git a/kernels/optimized/cpu/binary_ops.h b/kernels/optimized/cpu/binary_ops.h index acd90e6f86f..adbce351f65 100644 --- a/kernels/optimized/cpu/binary_ops.h +++ b/kernels/optimized/cpu/binary_ops.h @@ -233,7 +233,7 @@ Tensor& handle_broadcast_elementwise( const Tensor& b, Tensor& out, const ElementwiseOptimizedPath selected_optimized_path, - const executorch::aten::optional& alpha = {}) { + const std::optional& alpha = {}) { if ((selected_optimized_path == ElementwiseOptimizedPath::kBroadcastLastDim) || (selected_optimized_path == diff --git a/kernels/optimized/cpu/op_gelu.cpp b/kernels/optimized/cpu/op_gelu.cpp index ebe8923b590..4641ec6cc9b 100644 --- a/kernels/optimized/cpu/op_gelu.cpp +++ b/kernels/optimized/cpu/op_gelu.cpp @@ -24,7 +24,7 @@ namespace native { using Tensor = executorch::aten::Tensor; using ScalarType = executorch::aten::ScalarType; -using string_view = executorch::aten::string_view; +using string_view = std::string_view; namespace { diff --git a/kernels/optimized/cpu/op_native_layer_norm.cpp b/kernels/optimized/cpu/op_native_layer_norm.cpp index 9d8c069a42c..341edb53d39 100644 --- a/kernels/optimized/cpu/op_native_layer_norm.cpp +++ b/kernels/optimized/cpu/op_native_layer_norm.cpp @@ -115,8 +115,8 @@ std::tuple opt_native_layer_norm_out( KernelRuntimeContext& ctx, const Tensor& input, IntArrayRef normalized_shape, - const executorch::aten::optional& weight, - const executorch::aten::optional& bias, + const std::optional& weight, + const std::optional& bias, double eps, Tensor& out, Tensor& mean_out, diff --git a/kernels/optimized/third-party/eigen b/kernels/optimized/third-party/eigen index a39ade4ccf9..72944340994 160000 --- a/kernels/optimized/third-party/eigen +++ b/kernels/optimized/third-party/eigen @@ -1 +1 @@ -Subproject commit a39ade4ccf99df845ec85c580fbbb324f71952fa +Subproject commit 729443409942a1816ddf74b95224003b83f4925c diff --git a/kernels/portable/cpu/op__to_dim_order_copy.cpp b/kernels/portable/cpu/op__to_dim_order_copy.cpp index 70fc3507f05..3a08f99b2a2 100644 --- a/kernels/portable/cpu/op__to_dim_order_copy.cpp +++ b/kernels/portable/cpu/op__to_dim_order_copy.cpp @@ -27,7 +27,7 @@ template using OptionalArrayRef = executorch::aten::OptionalArrayRef; template -using Optional = executorch::aten::optional; +using Optional = std::optional; namespace { diff --git a/kernels/portable/cpu/op_argmax.cpp b/kernels/portable/cpu/op_argmax.cpp index ffbc469c53d..72881453d39 100644 --- a/kernels/portable/cpu/op_argmax.cpp +++ b/kernels/portable/cpu/op_argmax.cpp @@ -18,8 +18,8 @@ namespace torch { namespace executor { namespace native { -using executorch::aten::optional; using executorch::aten::Tensor; +using std::optional; Tensor& argmax_out( KernelRuntimeContext& ctx, diff --git a/kernels/portable/cpu/op_argmin.cpp b/kernels/portable/cpu/op_argmin.cpp index b0816596e4e..4e661c68694 100644 --- a/kernels/portable/cpu/op_argmin.cpp +++ b/kernels/portable/cpu/op_argmin.cpp @@ -18,8 +18,8 @@ namespace torch { namespace executor { namespace native { -using executorch::aten::optional; using executorch::aten::Tensor; +using std::optional; Tensor& argmin_out( KernelRuntimeContext& ctx, diff --git a/kernels/portable/cpu/op_avg_pool2d.cpp b/kernels/portable/cpu/op_avg_pool2d.cpp index e7cd61ba20a..e41c1fa1afa 100644 --- a/kernels/portable/cpu/op_avg_pool2d.cpp +++ b/kernels/portable/cpu/op_avg_pool2d.cpp @@ -28,7 +28,7 @@ Tensor& avg_pool2d_out( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - executorch::aten::optional divisor_override, + std::optional divisor_override, Tensor& out) { ET_KERNEL_CHECK( ctx, diff --git a/kernels/portable/cpu/op_cdist_forward.cpp b/kernels/portable/cpu/op_cdist_forward.cpp index 03d6d47ec75..3e82584f820 100644 --- a/kernels/portable/cpu/op_cdist_forward.cpp +++ b/kernels/portable/cpu/op_cdist_forward.cpp @@ -15,8 +15,8 @@ namespace torch { namespace executor { namespace native { -using executorch::aten::optional; using executorch::aten::Tensor; +using std::optional; namespace { diff --git a/kernels/portable/cpu/op_clamp.cpp b/kernels/portable/cpu/op_clamp.cpp index 6974789eccf..d7d9fab2f59 100644 --- a/kernels/portable/cpu/op_clamp.cpp +++ b/kernels/portable/cpu/op_clamp.cpp @@ -70,8 +70,8 @@ ET_NODISCARD bool check_bounds( Tensor& clamp_out( KernelRuntimeContext& ctx, const Tensor& in, - const executorch::aten::optional& min_opt, - const executorch::aten::optional& max_opt, + const std::optional& min_opt, + const std::optional& max_opt, Tensor& out) { bool has_min = min_opt.has_value(); bool has_max = max_opt.has_value(); @@ -163,8 +163,8 @@ Tensor& clamp_out( Tensor& clamp_tensor_out( KernelRuntimeContext& ctx, const Tensor& in, - const executorch::aten::optional& min_opt, - const executorch::aten::optional& max_opt, + const std::optional& min_opt, + const std::optional& max_opt, Tensor& out) { bool has_min = min_opt.has_value(); bool has_max = max_opt.has_value(); diff --git a/kernels/portable/cpu/op_clone.cpp b/kernels/portable/cpu/op_clone.cpp index 6026953a7f2..8cce3fe16bd 100644 --- a/kernels/portable/cpu/op_clone.cpp +++ b/kernels/portable/cpu/op_clone.cpp @@ -21,7 +21,7 @@ using Tensor = executorch::aten::Tensor; Tensor& clone_out( KernelRuntimeContext& context, const Tensor& self, - executorch::aten::optional memory_format, + std::optional memory_format, Tensor& out) { (void)context; diff --git a/kernels/portable/cpu/op_convolution.cpp b/kernels/portable/cpu/op_convolution.cpp index b5eb8d1f5db..68991a09b33 100644 --- a/kernels/portable/cpu/op_convolution.cpp +++ b/kernels/portable/cpu/op_convolution.cpp @@ -44,7 +44,7 @@ void conv2d_impl( const CTYPE* const w_ptr, SizesArrayRef w_sizes, StridesArrayRef w_strides, - const executorch::aten::optional& bias, + const std::optional& bias, const char* const bias_ptr, LoadFn load_bias, IntArrayRef stride, @@ -195,7 +195,7 @@ template void convolution_wrapper( const Tensor& in, const Tensor& weight, - const executorch::aten::optional& bias, + const std::optional& bias, LoadFn load_bias, IntArrayRef stride, IntArrayRef padding, @@ -350,7 +350,7 @@ Tensor& convolution_out( KernelRuntimeContext& ctx, const Tensor& in, const Tensor& weight, - const executorch::aten::optional& bias, + const std::optional& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, diff --git a/kernels/portable/cpu/op_convolution_backward.cpp b/kernels/portable/cpu/op_convolution_backward.cpp index 6a89b0da769..2535ed4eb6c 100644 --- a/kernels/portable/cpu/op_convolution_backward.cpp +++ b/kernels/portable/cpu/op_convolution_backward.cpp @@ -64,7 +64,7 @@ bool check_convolution_backward_args( check_convolution_args( input, weight, - executorch::aten::optional(), + std::optional(), stride, padding, dilation, diff --git a/kernels/portable/cpu/op_div.cpp b/kernels/portable/cpu/op_div.cpp index 70f9479c464..51a65747b33 100644 --- a/kernels/portable/cpu/op_div.cpp +++ b/kernels/portable/cpu/op_div.cpp @@ -78,7 +78,7 @@ Tensor& div_out_mode( KernelRuntimeContext& ctx, const Tensor& a, const Tensor& b, - executorch::aten::optional mode, + std::optional mode, Tensor& out) { if (!mode.has_value()) { return div_out(ctx, a, b, out); @@ -209,7 +209,7 @@ Tensor& div_scalar_mode_out( KernelRuntimeContext& ctx, const Tensor& a, const Scalar& b, - executorch::aten::optional mode, + std::optional mode, Tensor& out) { if (!mode.has_value()) { return div_scalar_out(ctx, a, b, out); diff --git a/kernels/portable/cpu/op_empty.cpp b/kernels/portable/cpu/op_empty.cpp index 3e3ee66cac8..c2f5efd7001 100644 --- a/kernels/portable/cpu/op_empty.cpp +++ b/kernels/portable/cpu/op_empty.cpp @@ -26,7 +26,7 @@ using executorch::aten::Tensor; Tensor& empty_out( KernelRuntimeContext& context, IntArrayRef size, - executorch::aten::optional memory_format, + std::optional memory_format, Tensor& out) { (void)context; diff --git a/kernels/portable/cpu/op_gelu.cpp b/kernels/portable/cpu/op_gelu.cpp index c3e5c62d677..0489d3d12b4 100644 --- a/kernels/portable/cpu/op_gelu.cpp +++ b/kernels/portable/cpu/op_gelu.cpp @@ -19,7 +19,7 @@ namespace native { using Tensor = executorch::aten::Tensor; using ScalarType = executorch::aten::ScalarType; -using string_view = executorch::aten::string_view; +using string_view = std::string_view; Tensor& gelu_out( KernelRuntimeContext& ctx, diff --git a/kernels/portable/cpu/op_index.cpp b/kernels/portable/cpu/op_index.cpp index c7887eaecb0..a81ce6ad737 100644 --- a/kernels/portable/cpu/op_index.cpp +++ b/kernels/portable/cpu/op_index.cpp @@ -20,8 +20,7 @@ namespace executor { namespace native { using Tensor = executorch::aten::Tensor; -using TensorOptList = - executorch::aten::ArrayRef>; +using TensorOptList = executorch::aten::ArrayRef>; Tensor& index_Tensor_out( KernelRuntimeContext& ctx, diff --git a/kernels/portable/cpu/op_linear_scratch_example.cpp b/kernels/portable/cpu/op_linear_scratch_example.cpp index 904a5746ec7..b7a263a199f 100644 --- a/kernels/portable/cpu/op_linear_scratch_example.cpp +++ b/kernels/portable/cpu/op_linear_scratch_example.cpp @@ -23,7 +23,7 @@ namespace native { using Tensor = executorch::aten::Tensor; template -using optional = executorch::aten::optional; +using optional = std::optional; // kernel for demonstration purpose only diff --git a/kernels/portable/cpu/op_logit.cpp b/kernels/portable/cpu/op_logit.cpp index 317ddb6fff1..8b3bbb38912 100644 --- a/kernels/portable/cpu/op_logit.cpp +++ b/kernels/portable/cpu/op_logit.cpp @@ -20,7 +20,7 @@ using executorch::aten::Tensor; Tensor& logit_out( KernelRuntimeContext& ctx, const Tensor& in, - executorch::aten::optional eps, + std::optional eps, Tensor& out) { (void)ctx; diff --git a/kernels/portable/cpu/op_native_batch_norm.cpp b/kernels/portable/cpu/op_native_batch_norm.cpp index ce4607f4586..aa6919924f1 100644 --- a/kernels/portable/cpu/op_native_batch_norm.cpp +++ b/kernels/portable/cpu/op_native_batch_norm.cpp @@ -24,8 +24,8 @@ using SizesType = executorch::aten::SizesType; std::tuple _native_batch_norm_legit_no_training_out( KernelRuntimeContext& ctx, const Tensor& in, - const executorch::aten::optional& weight, - const executorch::aten::optional& bias, + const std::optional& weight, + const std::optional& bias, const Tensor& running_mean, const Tensor& running_var, double momentum, @@ -139,8 +139,8 @@ std::tuple _native_batch_norm_legit_no_training_out( std::tuple _native_batch_norm_legit_out( KernelRuntimeContext& ctx, const Tensor& in, - const executorch::aten::optional& weight, - const executorch::aten::optional& bias, + const std::optional& weight, + const std::optional& bias, Tensor& running_mean, Tensor& running_var, bool training, @@ -177,8 +177,8 @@ std::tuple _native_batch_norm_legit_out( std::tuple _native_batch_norm_legit_no_stats_out( KernelRuntimeContext& ctx, const Tensor& in, - const executorch::aten::optional& weight, - const executorch::aten::optional& bias, + const std::optional& weight, + const std::optional& bias, bool training, double momentum, double eps, @@ -196,8 +196,8 @@ std::tuple _native_batch_norm_legit_no_stats_out( in, weight, bias, - executorch::aten::optional(), - executorch::aten::optional(), + std::optional(), + std::optional(), momentum, eps, out, diff --git a/kernels/portable/cpu/op_native_group_norm.cpp b/kernels/portable/cpu/op_native_group_norm.cpp index c373dfe26bd..b13e6c2e5ed 100644 --- a/kernels/portable/cpu/op_native_group_norm.cpp +++ b/kernels/portable/cpu/op_native_group_norm.cpp @@ -116,8 +116,8 @@ void group_norm( std::tuple native_group_norm_out( KernelRuntimeContext& ctx, const Tensor& input, - const executorch::aten::optional& weight, - const executorch::aten::optional& bias, + const std::optional& weight, + const std::optional& bias, int64_t N, int64_t C, int64_t HxW, diff --git a/kernels/portable/cpu/op_native_layer_norm.cpp b/kernels/portable/cpu/op_native_layer_norm.cpp index 66c80b7cccc..12a03a184f6 100644 --- a/kernels/portable/cpu/op_native_layer_norm.cpp +++ b/kernels/portable/cpu/op_native_layer_norm.cpp @@ -102,8 +102,8 @@ std::tuple native_layer_norm_out( KernelRuntimeContext& ctx, const Tensor& input, IntArrayRef normalized_shape, - const executorch::aten::optional& weight, - const executorch::aten::optional& bias, + const std::optional& weight, + const std::optional& bias, double eps, Tensor& out, Tensor& mean_out, diff --git a/kernels/portable/cpu/op_repeat_interleave.cpp b/kernels/portable/cpu/op_repeat_interleave.cpp index c6d73476272..50da02c5646 100644 --- a/kernels/portable/cpu/op_repeat_interleave.cpp +++ b/kernels/portable/cpu/op_repeat_interleave.cpp @@ -66,7 +66,7 @@ using Tensor = executorch::aten::Tensor; Tensor& repeat_interleave_Tensor_out( KernelRuntimeContext& ctx, const Tensor& repeats, - executorch::aten::optional output_size, + std::optional output_size, Tensor& out) { (void)ctx; diff --git a/kernels/portable/cpu/op_slice_copy.cpp b/kernels/portable/cpu/op_slice_copy.cpp index acffb58b74e..1d4e509e083 100644 --- a/kernels/portable/cpu/op_slice_copy.cpp +++ b/kernels/portable/cpu/op_slice_copy.cpp @@ -20,8 +20,8 @@ Tensor& slice_copy_Tensor_out( KernelRuntimeContext& ctx, const Tensor& in, int64_t dim, - executorch::aten::optional start_val, - executorch::aten::optional end_val, + std::optional start_val, + std::optional end_val, int64_t step, Tensor& out) { (void)ctx; diff --git a/kernels/portable/cpu/op_slice_scatter.cpp b/kernels/portable/cpu/op_slice_scatter.cpp index 5a9138a0359..29c4ff7ab90 100644 --- a/kernels/portable/cpu/op_slice_scatter.cpp +++ b/kernels/portable/cpu/op_slice_scatter.cpp @@ -24,8 +24,8 @@ Tensor& slice_scatter_out( const Tensor& input, const Tensor& src, int64_t dim, - executorch::aten::optional start_val, - executorch::aten::optional end_val, + std::optional start_val, + std::optional end_val, int64_t step, Tensor& out) { (void)ctx; diff --git a/kernels/portable/cpu/op_to_copy.cpp b/kernels/portable/cpu/op_to_copy.cpp index 25d2b51b3b3..73d920a7b05 100644 --- a/kernels/portable/cpu/op_to_copy.cpp +++ b/kernels/portable/cpu/op_to_copy.cpp @@ -32,7 +32,7 @@ Tensor& to_copy_out( KernelRuntimeContext& ctx, const Tensor& self, bool non_blocking, - executorch::aten::optional memory_format, + std::optional memory_format, Tensor& out) { ET_KERNEL_CHECK( ctx, diff --git a/kernels/portable/cpu/op_upsample_bilinear2d.cpp b/kernels/portable/cpu/op_upsample_bilinear2d.cpp index 8d27504e500..a1c06749c4a 100644 --- a/kernels/portable/cpu/op_upsample_bilinear2d.cpp +++ b/kernels/portable/cpu/op_upsample_bilinear2d.cpp @@ -15,8 +15,8 @@ namespace executor { namespace native { using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::SizesType; +using std::optional; namespace { template diff --git a/kernels/portable/cpu/op_upsample_nearest2d.cpp b/kernels/portable/cpu/op_upsample_nearest2d.cpp index 0421e837df3..92437a02c47 100644 --- a/kernels/portable/cpu/op_upsample_nearest2d.cpp +++ b/kernels/portable/cpu/op_upsample_nearest2d.cpp @@ -14,8 +14,8 @@ namespace executor { namespace native { using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::SizesType; +using std::optional; namespace { template diff --git a/kernels/portable/cpu/util/activation_ops_util.cpp b/kernels/portable/cpu/util/activation_ops_util.cpp index 2f90f55e1aa..87503d3a4d6 100644 --- a/kernels/portable/cpu/util/activation_ops_util.cpp +++ b/kernels/portable/cpu/util/activation_ops_util.cpp @@ -15,7 +15,10 @@ namespace torch { namespace executor { -bool check_gelu_args(const Tensor& in, string_view approximate, Tensor& out) { +bool check_gelu_args( + const Tensor& in, + std::string_view approximate, + Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); ET_LOG_AND_RETURN_IF_FALSE(in.scalar_type() != ScalarType::Bool); ET_CHECK_OR_RETURN_FALSE( diff --git a/kernels/portable/cpu/util/activation_ops_util.h b/kernels/portable/cpu/util/activation_ops_util.h index 84299c7936d..be5e91a8936 100644 --- a/kernels/portable/cpu/util/activation_ops_util.h +++ b/kernels/portable/cpu/util/activation_ops_util.h @@ -13,7 +13,10 @@ namespace torch { namespace executor { -bool check_gelu_args(const Tensor& in, string_view approximate, Tensor& out); +bool check_gelu_args( + const Tensor& in, + std::string_view approximate, + Tensor& out); bool check_glu_args(const Tensor& in, int64_t dim, Tensor& out); diff --git a/kernels/portable/cpu/util/advanced_index_util.cpp b/kernels/portable/cpu/util/advanced_index_util.cpp index b3c0f24a856..04fe4c1fbce 100644 --- a/kernels/portable/cpu/util/advanced_index_util.cpp +++ b/kernels/portable/cpu/util/advanced_index_util.cpp @@ -15,8 +15,7 @@ namespace torch { namespace executor { using Tensor = executorch::aten::Tensor; -using TensorOptList = - executorch::aten::ArrayRef>; +using TensorOptList = executorch::aten::ArrayRef>; namespace { diff --git a/kernels/portable/cpu/util/advanced_index_util.h b/kernels/portable/cpu/util/advanced_index_util.h index 78b23a5e775..418ce996442 100644 --- a/kernels/portable/cpu/util/advanced_index_util.h +++ b/kernels/portable/cpu/util/advanced_index_util.h @@ -14,8 +14,7 @@ namespace torch { namespace executor { using Tensor = executorch::aten::Tensor; -using TensorOptList = - executorch::aten::ArrayRef>; +using TensorOptList = executorch::aten::ArrayRef>; /** * Performs preliminary checks on the arguments. However, it doesn't check that diff --git a/kernels/portable/cpu/util/copy_ops_util.cpp b/kernels/portable/cpu/util/copy_ops_util.cpp index 02b2910fc88..1527e6d9e35 100644 --- a/kernels/portable/cpu/util/copy_ops_util.cpp +++ b/kernels/portable/cpu/util/copy_ops_util.cpp @@ -745,7 +745,7 @@ bool check_split_copy_args( bool check_to_copy_args( const Tensor& input, bool non_blocking, - executorch::aten::optional memory_format, + std::optional memory_format, Tensor& out) { (void)input; (void)out; diff --git a/kernels/portable/cpu/util/copy_ops_util.h b/kernels/portable/cpu/util/copy_ops_util.h index cef2b3d4ee1..2532efbd016 100644 --- a/kernels/portable/cpu/util/copy_ops_util.h +++ b/kernels/portable/cpu/util/copy_ops_util.h @@ -194,7 +194,7 @@ bool check_split_copy_args( bool check_to_copy_args( const Tensor& input, bool non_blocking, - executorch::aten::optional memory_format, + std::optional memory_format, Tensor& out); bool check__to_dim_order_copy_args( diff --git a/kernels/portable/cpu/util/kernel_ops_util.cpp b/kernels/portable/cpu/util/kernel_ops_util.cpp index 771175eccd0..daa85f6beec 100644 --- a/kernels/portable/cpu/util/kernel_ops_util.cpp +++ b/kernels/portable/cpu/util/kernel_ops_util.cpp @@ -269,7 +269,7 @@ bool check_avg_pool2d_args( const IntArrayRef padding, const bool ceil_mode, const bool count_include_pad, - const executorch::aten::optional& divisor_override, + const std::optional& divisor_override, const Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); @@ -324,7 +324,7 @@ void get_avg_pool2d_out_target_size( bool check_convolution_args( const Tensor& in, const Tensor& weight, - const executorch::aten::optional& bias, + const std::optional& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, diff --git a/kernels/portable/cpu/util/kernel_ops_util.h b/kernels/portable/cpu/util/kernel_ops_util.h index 8028f254eb4..e3eaf4d043e 100644 --- a/kernels/portable/cpu/util/kernel_ops_util.h +++ b/kernels/portable/cpu/util/kernel_ops_util.h @@ -327,7 +327,7 @@ void apply_kernel_2d_reduce_then_map_fn( const IntArrayRef padding, const IntArrayRef dilation, Tensor& out, - executorch::aten::optional indices = {}) { + std::optional indices = {}) { executorch::aten::ArrayRef in_sizes = in.sizes(); executorch::aten::ArrayRef out_sizes = out.sizes(); @@ -393,7 +393,7 @@ bool check_avg_pool2d_args( const IntArrayRef padding, const bool ceil_mode, const bool count_include_pad, - const executorch::aten::optional& divisor_override, + const std::optional& divisor_override, const Tensor& out); void get_avg_pool2d_out_target_size( @@ -408,7 +408,7 @@ void get_avg_pool2d_out_target_size( bool check_convolution_args( const Tensor& in, const Tensor& weight, - const executorch::aten::optional& bias, + const std::optional& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, diff --git a/kernels/portable/cpu/util/normalization_ops_util.cpp b/kernels/portable/cpu/util/normalization_ops_util.cpp index 215fc03a146..f7118257898 100644 --- a/kernels/portable/cpu/util/normalization_ops_util.cpp +++ b/kernels/portable/cpu/util/normalization_ops_util.cpp @@ -18,10 +18,10 @@ using Tensor = executorch::aten::Tensor; bool check_batch_norm_args( const Tensor& in, - const executorch::aten::optional& weight, - const executorch::aten::optional& bias, - const executorch::aten::optional& running_mean, - const executorch::aten::optional& running_var, + const std::optional& weight, + const std::optional& bias, + const std::optional& running_mean, + const std::optional& running_var, double momentum, double eps, Tensor& out, @@ -76,8 +76,8 @@ bool check_batch_norm_args( bool check_layer_norm_args( const Tensor& in, IntArrayRef normalized_shape, - const executorch::aten::optional& weight, - const executorch::aten::optional& bias, + const std::optional& weight, + const std::optional& bias, Tensor& out, Tensor& mean_out, Tensor& rstd_out) { @@ -142,8 +142,8 @@ void get_layer_norm_out_target_size( bool check_group_norm_args( const Tensor& in, - const executorch::aten::optional& weight, - const executorch::aten::optional& bias, + const std::optional& weight, + const std::optional& bias, int64_t N, int64_t C, int64_t HxW, diff --git a/kernels/portable/cpu/util/normalization_ops_util.h b/kernels/portable/cpu/util/normalization_ops_util.h index ad2dc10d2f8..f7bcfa0e8f0 100644 --- a/kernels/portable/cpu/util/normalization_ops_util.h +++ b/kernels/portable/cpu/util/normalization_ops_util.h @@ -15,10 +15,10 @@ namespace executor { bool check_batch_norm_args( const Tensor& in, - const executorch::aten::optional& weight, - const executorch::aten::optional& bias, - const executorch::aten::optional& running_mean, - const executorch::aten::optional& running_var, + const std::optional& weight, + const std::optional& bias, + const std::optional& running_mean, + const std::optional& running_var, double momentum, double eps, Tensor& out, @@ -28,8 +28,8 @@ bool check_batch_norm_args( bool check_layer_norm_args( const Tensor& input, IntArrayRef normalized_shape, - const executorch::aten::optional& weight, - const executorch::aten::optional& bias, + const std::optional& weight, + const std::optional& bias, Tensor& out, Tensor& mean_out, Tensor& rstd_out); @@ -42,8 +42,8 @@ void get_layer_norm_out_target_size( bool check_group_norm_args( const Tensor& input, - const executorch::aten::optional& weight, - const executorch::aten::optional& bias, + const std::optional& weight, + const std::optional& bias, int64_t N, int64_t C, int64_t HxW, diff --git a/kernels/portable/cpu/util/reduce_util.cpp b/kernels/portable/cpu/util/reduce_util.cpp index 31296d67ee7..afeb56f719f 100644 --- a/kernels/portable/cpu/util/reduce_util.cpp +++ b/kernels/portable/cpu/util/reduce_util.cpp @@ -34,8 +34,7 @@ inline size_t _normalize_non_neg_d(ssize_t d, ssize_t in_dim) { ET_NODISCARD bool check_dim_list_is_valid( const executorch::aten::Tensor& in, - const executorch::aten::optional>& - dim_list) { + const std::optional>& dim_list) { if (dim_list.has_value() && dim_list.value().size() != 0) { const auto& reduce_dims = dim_list.value(); bool dim_exist[kTensorDimensionLimit]; @@ -79,7 +78,7 @@ bool check_dim_in_dim_list( */ size_t get_reduced_dim_product( const Tensor& in, - const executorch::aten::optional& dim) { + const std::optional& dim) { if (in.dim() == 0) { return 1; } @@ -95,8 +94,7 @@ size_t get_reduced_dim_product( */ size_t get_reduced_dim_product( const Tensor& in, - const executorch::aten::optional>& - dim_list) { + const std::optional>& dim_list) { if (in.dim() == 0) { return 1; } @@ -115,9 +113,7 @@ size_t get_reduced_dim_product( * Returns the number of elements of the output of reducing `in` * over `dim`. */ -size_t get_out_numel( - const Tensor& in, - const executorch::aten::optional& dim) { +size_t get_out_numel(const Tensor& in, const std::optional& dim) { size_t out_numel = 1; if (dim.has_value()) { const auto dim_val = dim.value(); @@ -142,8 +138,7 @@ size_t get_out_numel( */ size_t get_out_numel( const Tensor& in, - const executorch::aten::optional>& - dim_list) { + const std::optional>& dim_list) { size_t out_numel = 1; if (dim_list.has_value() && dim_list.value().size() != 0) { for (size_t d = 0; d < static_cast(in.dim()); ++d) { @@ -161,7 +156,7 @@ size_t get_out_numel( */ size_t get_init_index( const Tensor& in, - const executorch::aten::optional& dim, + const std::optional& dim, const size_t out_ix) { if (!dim.has_value()) { return 0; @@ -192,8 +187,7 @@ size_t get_init_index( */ size_t get_init_index( const Tensor& in, - const executorch::aten::optional>& - dim_list, + const std::optional>& dim_list, const size_t out_ix) { if (!dim_list.has_value() || dim_list.value().size() == 0) { return 0; @@ -216,7 +210,7 @@ size_t get_init_index( size_t compute_reduced_out_size( const Tensor& in, - const executorch::aten::optional& dim, + const std::optional& dim, bool keepdim, executorch::aten::SizesType* sizes_arr) { const auto in_dim = in.dim(); @@ -253,8 +247,7 @@ size_t compute_reduced_out_size( size_t compute_reduced_out_size( const Tensor& in, - const executorch::aten::optional>& - dim_list, + const std::optional>& dim_list, bool keepdim, executorch::aten::SizesType* sizes_arr) { // check_dim_in_dim_list and later comparisons @@ -296,7 +289,7 @@ size_t compute_reduced_out_size( Error resize_reduction_out( const Tensor& in, - const executorch::aten::optional& dim, + const std::optional& dim, bool keepdim, Tensor& out) { executorch::aten::SizesType sizes_arr[kTensorDimensionLimit]; @@ -308,8 +301,7 @@ Error resize_reduction_out( Error resize_reduction_out( const Tensor& in, - const executorch::aten::optional>& - dim_list, + const std::optional>& dim_list, bool keepdim, Tensor& out) { executorch::aten::SizesType sizes_arr[kTensorDimensionLimit]; diff --git a/kernels/portable/cpu/util/reduce_util.h b/kernels/portable/cpu/util/reduce_util.h index 9319ab01142..11bd9f9f546 100644 --- a/kernels/portable/cpu/util/reduce_util.h +++ b/kernels/portable/cpu/util/reduce_util.h @@ -147,8 +147,7 @@ void apply_on_flat_ix_with_dim_mask_and_base( ET_NODISCARD bool check_dim_list_is_valid( const executorch::aten::Tensor& in, - const executorch::aten::optional>& - dim_list); + const std::optional>& dim_list); bool check_dim_in_dim_list( const size_t dim, @@ -157,52 +156,49 @@ bool check_dim_in_dim_list( size_t get_reduced_dim_product( const executorch::aten::Tensor& in, - const executorch::aten::optional& dim); + const std::optional& dim); size_t get_reduced_dim_product( const executorch::aten::Tensor& in, - const executorch::aten::optional>& - dim_list); + const std::optional>& dim_list); // Resolve ambiguity between the above two overloads -- ArrayRef and // optional are both implicitly constructible from int64_t. inline size_t get_reduced_dim_product( const executorch::aten::Tensor& in, int64_t dim) { - return get_reduced_dim_product(in, executorch::aten::optional(dim)); + return get_reduced_dim_product(in, std::optional(dim)); } size_t get_out_numel( const executorch::aten::Tensor& in, - const executorch::aten::optional& dim); + const std::optional& dim); size_t get_out_numel( const executorch::aten::Tensor& in, - const executorch::aten::optional>& - dim_list); + const std::optional>& dim_list); // Resolve ambiguity between the above two overloads -- ArrayRef and // optional are both implicitly constructible from int64_t. inline size_t get_out_numel(const executorch::aten::Tensor& in, int64_t dim) { - return get_out_numel(in, executorch::aten::optional(dim)); + return get_out_numel(in, std::optional(dim)); } size_t get_init_index( const executorch::aten::Tensor& in, - const executorch::aten::optional& dim, + const std::optional& dim, const size_t out_ix); size_t get_init_index( const executorch::aten::Tensor& in, - const executorch::aten::optional>& - dim_list, + const std::optional>& dim_list, const size_t out_ix); inline size_t get_init_index( const executorch::aten::Tensor& in, int64_t dim, const size_t out_ix) { - return get_init_index(in, executorch::aten::optional(dim), out_ix); + return get_init_index(in, std::optional(dim), out_ix); } // // Iteration Functions @@ -219,7 +215,7 @@ template void apply_over_dim( const Fn& fn, const executorch::aten::Tensor& in, - const executorch::aten::optional& dim) { + const std::optional& dim) { // If dim is null, apply fn over the entire tensor if (!dim.has_value()) { fn(in.numel(), 1, 0); @@ -269,7 +265,7 @@ template void apply_over_dim( const Fn& fn, const executorch::aten::Tensor& in, - const executorch::aten::optional& dim, + const std::optional& dim, const size_t out_ix, const int64_t start = 0, const int64_t end = -1) { @@ -326,8 +322,7 @@ class ApplyOverDimListPlan { ApplyOverDimListPlan( const executorch::aten::Tensor& in, // If set, lifetime must last until execute() returns. - const executorch::aten::optional>& - dim_list, + const std::optional>& dim_list, const int64_t start = 0, const int64_t end = -1) : dim_list_(dim_list), in_(in) { @@ -396,8 +391,8 @@ class ApplyOverDimListPlan { return in_; } - const executorch::aten::optional>& - get_dim_list() const { + const std::optional>& get_dim_list() + const { return dim_list_; } @@ -421,7 +416,7 @@ class ApplyOverDimListPlan { }; ExecutionMode mode_; size_t out_numel_; - executorch::aten::optional> dim_list_; + std::optional> dim_list_; std::array is_in_dim_list_; const executorch::aten::Tensor& in_; }; @@ -437,8 +432,7 @@ template void apply_over_dim_list( const Fn& fn, const executorch::aten::Tensor& in, - const executorch::aten::optional>& - dim_list, + const std::optional>& dim_list, const size_t out_ix, const int64_t start = 0, const int64_t end = -1) { @@ -483,7 +477,7 @@ std::tuple map_reduce_over_dim( const MapOp& map_fun, const ReduceOp& reduce_fun, const executorch::aten::Tensor& in, - const executorch::aten::optional& dim, + const std::optional& dim, const size_t out_ix) { if (dim.has_value()) { if (in.dim() != 0) { @@ -535,8 +529,7 @@ class MapReduceOverDimListPlan { public: MapReduceOverDimListPlan( const executorch::aten::Tensor& in, - const executorch::aten::optional>& - dim_list) + const std::optional>& dim_list) : plan_(in, dim_list, 1, -1) { ET_CHECK_MSG(in.numel() > 0, "Input tensor must be nonempty"); } @@ -605,8 +598,7 @@ CTYPE_OUT map_reduce_over_dim_list( const MapOp& map_fun, const ReduceOp& reduce_fun, const executorch::aten::Tensor& in, - const executorch::aten::optional>& - dim_list, + const std::optional>& dim_list, const size_t out_ix) { MapReduceOverDimListPlan plan(in, dim_list); return plan.execute(map_fun, reduce_fun, out_ix); @@ -636,7 +628,7 @@ template std::tuple reduce_over_dim( const ReduceOp& reduce_fun, const executorch::aten::Tensor& in, - const executorch::aten::optional& dim, + const std::optional& dim, const size_t out_ix) { return map_reduce_over_dim( [](CTYPE v) { return v; }, reduce_fun, in, dim, out_ix); @@ -650,8 +642,7 @@ class ReduceOverDimListPlan { public: ReduceOverDimListPlan( const executorch::aten::Tensor& in, - const executorch::aten::optional>& - dim_list) + const std::optional>& dim_list) : plan_(in, dim_list) {} template @@ -687,8 +678,7 @@ template CTYPE reduce_over_dim_list( const ReduceOp& reduce_fun, const executorch::aten::Tensor& in, - const executorch::aten::optional>& - dim_list, + const std::optional>& dim_list, const size_t out_ix) { ReduceOverDimListPlan plan(in, dim_list); return plan.execute(reduce_fun, out_ix); @@ -700,20 +690,19 @@ CTYPE reduce_over_dim_list( size_t compute_reduced_out_size( const executorch::aten::Tensor& in, - const executorch::aten::optional& dim, + const std::optional& dim, bool keepdim, executorch::aten::SizesType* sizes_arr); size_t compute_reduced_out_size( const executorch::aten::Tensor& in, - const executorch::aten::optional>& - dim_list, + const std::optional>& dim_list, bool keepdim, executorch::aten::SizesType* sizes_arr); inline ssize_t compute_reduced_out_dim( const executorch::aten::Tensor& in, - const executorch::aten::optional& dim, + const std::optional& dim, bool keepdim) { return ( keepdim ? in.dim() @@ -723,8 +712,7 @@ inline ssize_t compute_reduced_out_dim( inline ssize_t compute_reduced_out_dim( const executorch::aten::Tensor& in, - const executorch::aten::optional>& - dim_list, + const std::optional>& dim_list, bool keepdim) { return ( keepdim ? in.dim() @@ -741,14 +729,13 @@ inline ssize_t compute_reduced_out_dim( Error resize_reduction_out( const executorch::aten::Tensor& in, - const executorch::aten::optional& dim, + const std::optional& dim, bool keepdim, executorch::aten::Tensor& out); Error resize_reduction_out( const executorch::aten::Tensor& in, - const executorch::aten::optional>& - dim_list, + const std::optional>& dim_list, bool keepdim, executorch::aten::Tensor& out); @@ -759,8 +746,7 @@ inline Error resize_reduction_out( int64_t dim, bool keepdim, executorch::aten::Tensor& out) { - return resize_reduction_out( - in, executorch::aten::optional(dim), keepdim, out); + return resize_reduction_out(in, std::optional(dim), keepdim, out); } #ifndef USE_ATEN_LIB @@ -820,7 +806,7 @@ bool check_prod_out_args( template [[nodiscard]] bool parallel_for_each_reduce_over_dim_output_index( const Tensor& in, - executorch::aten::optional dim, + std::optional dim, const Tensor& out, const Func& func) { #ifdef ET_USE_THREADPOOL @@ -843,7 +829,7 @@ template template [[nodiscard]] bool parallel_for_each_reduce_over_dim_list_output_index( const Tensor& in, - executorch::aten::optional> dim_list, + std::optional> dim_list, const Tensor& out, const Func& func) { #ifdef ET_UE_THREADPOOL diff --git a/kernels/portable/cpu/util/test/reduce_test.cpp b/kernels/portable/cpu/util/test/reduce_test.cpp index 7552a22d3bb..69e1093b183 100644 --- a/kernels/portable/cpu/util/test/reduce_test.cpp +++ b/kernels/portable/cpu/util/test/reduce_test.cpp @@ -16,10 +16,10 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; using executorch::runtime::testing::TensorFactory; +using std::optional; using torch::executor::apply_over_dim; using torch::executor::apply_over_dim_list; using torch::executor::get_out_numel; diff --git a/kernels/portable/cpu/util/upsample_util.h b/kernels/portable/cpu/util/upsample_util.h index 073c6332d6b..7ac1fec591f 100644 --- a/kernels/portable/cpu/util/upsample_util.h +++ b/kernels/portable/cpu/util/upsample_util.h @@ -45,7 +45,7 @@ Error resize_upsample_2d( // Ported from aten/src/ATen/native/UpSample.h template inline scalar_t compute_scales_value( - const executorch::aten::optional& scale, + const std::optional& scale, int64_t input_size, int64_t output_size) { return scale.has_value() ? static_cast(1.0 / scale.value()) @@ -58,7 +58,7 @@ inline scalar_t area_pixel_compute_scale( int64_t input_size, int64_t output_size, bool align_corners, - const executorch::aten::optional& scale) { + const std::optional& scale) { // see Note [area_pixel_compute_scale] if (align_corners) { if (output_size > 1) { diff --git a/kernels/portable/test/op_div_test.cpp b/kernels/portable/test/op_div_test.cpp index ee0ba3f6a04..dec78af459c 100644 --- a/kernels/portable/test/op_div_test.cpp +++ b/kernels/portable/test/op_div_test.cpp @@ -32,7 +32,7 @@ class OpDivScalarOutKernelTest : public OperatorTest { Tensor& op_div_out_mode( const Tensor& a, const Tensor& b, - executorch::aten::optional mode, + std::optional mode, Tensor& out) { return torch::executor::aten::div_outf(context_, a, b, mode, out); } @@ -43,7 +43,7 @@ class OpDivScalarModeOutKernelTest : public OperatorTest { Tensor& op_div_scalar_mode_out( const Tensor& a, const Scalar& b, - executorch::aten::optional mode, + std::optional mode, Tensor& out) { return torch::executor::aten::div_outf(context_, a, b, mode, out); } @@ -60,7 +60,7 @@ TEST_F(OpDivScalarOutKernelTest, SanityCheckModeTrunc) { op_div_out_mode( tf_a.make(sizes, {1, 2, 4, -9}), tf_a.make(sizes, {2, 2, 2, 2}), - executorch::aten::optional("trunc"), + std::optional("trunc"), out); // Check that it matches the expected output. @@ -78,7 +78,7 @@ TEST_F(OpDivScalarOutKernelTest, SanityCheckModeFloor) { op_div_out_mode( tf_a.make(sizes, {1, 2, 4, -9}), tf_a.make(sizes, {2, 2, 2, 2}), - executorch::aten::optional("floor"), + std::optional("floor"), out); // Check that it matches the expected output. @@ -95,7 +95,7 @@ TEST_F(OpDivScalarModeOutKernelTest, SanityCheckModeTrunc) { op_div_scalar_mode_out( tf.make(sizes, {1, 2, 4, -9}), 2, - executorch::aten::optional("trunc"), + std::optional("trunc"), out); // Check that it matches the expected output. @@ -112,7 +112,7 @@ TEST_F(OpDivScalarModeOutKernelTest, SanityCheckModeFloor) { op_div_scalar_mode_out( tf.make(sizes, {1, 2, 4, -9}), 2, - executorch::aten::optional("floor"), + std::optional("floor"), out); // Check that it matches the expected output. diff --git a/kernels/portable/test/op_gelu_test.cpp b/kernels/portable/test/op_gelu_test.cpp index 2e5cad55c35..fa663f8ba78 100644 --- a/kernels/portable/test/op_gelu_test.cpp +++ b/kernels/portable/test/op_gelu_test.cpp @@ -16,15 +16,16 @@ using namespace ::testing; using executorch::aten::ScalarType; -using executorch::aten::string_view; using executorch::aten::Tensor; +using std::string_view; using torch::executor::testing::TensorFactory; // Note: This file is used for testing op_gelu for *portable kernel specific*. // If your test case is generic and should be tested on all kernels, add it to // executorch/kernels/test/op_gelu_test.cpp instead. -Tensor& op_gelu_out(const Tensor& self, string_view approximate, Tensor& out) { +Tensor& +op_gelu_out(const Tensor& self, std::string_view approximate, Tensor& out) { executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::native::gelu_out(context, self, approximate, out); } diff --git a/kernels/quantized/cpu/embeddingxb.cpp b/kernels/quantized/cpu/embeddingxb.cpp index eab9a533cfa..4a76eff1eef 100644 --- a/kernels/quantized/cpu/embeddingxb.cpp +++ b/kernels/quantized/cpu/embeddingxb.cpp @@ -65,11 +65,11 @@ static inline int32_t get_embedding_dim( void check_embedding_xbit_args( const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, const int64_t weight_quant_min, const int64_t weight_quant_max, const Tensor& indices, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out, int weight_nbit) { ET_CHECK_MSG(8 % weight_nbit == 0, "nbit must divide 8"); @@ -170,7 +170,7 @@ template void embedding_xbit_per_channel( const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, const Tensor& indices, Tensor& out, int weight_nbit) { @@ -260,7 +260,7 @@ Tensor& quantized_embedding_xbit_out( // non quant input and returns fp output const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, const int64_t weight_quant_min, const int64_t weight_quant_max, const Tensor& indices, @@ -299,7 +299,7 @@ Tensor& quantized_embedding_xbit_out( KernelRuntimeContext& context, const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, int64_t weight_quant_min, int64_t weight_quant_max, const Tensor& indices, @@ -325,11 +325,11 @@ Tensor& quantized_embedding_xbit_dtype_out( // non quant input and returns fp output const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, const int64_t weight_quant_min, const int64_t weight_quant_max, const Tensor& indices, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out, int weight_nbit) { // TODO (jakeszwe): improve these to account for the size of out in relation @@ -368,11 +368,11 @@ Tensor& quantized_embedding_xbit_dtype_out( KernelRuntimeContext& context, const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, int64_t weight_quant_min, int64_t weight_quant_max, const Tensor& indices, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out, int weight_nbit) { // TODO(larryliu): Add a context arg to the real op function and remove this diff --git a/kernels/quantized/cpu/embeddingxb.h b/kernels/quantized/cpu/embeddingxb.h index 3c8be3d86a1..0d09ca83b43 100644 --- a/kernels/quantized/cpu/embeddingxb.h +++ b/kernels/quantized/cpu/embeddingxb.h @@ -24,7 +24,7 @@ Tensor& quantized_embedding_xbit_out( // non quant input and returns fp output const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, const int64_t weight_quant_min, const int64_t weight_quant_max, const Tensor& indices, @@ -35,7 +35,7 @@ Tensor& quantized_embedding_xbit_out( KernelRuntimeContext& context, const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, int64_t weight_quant_min, int64_t weight_quant_max, const Tensor& indices, @@ -47,11 +47,11 @@ Tensor& quantized_embedding_xbit_dtype_out( // non quant input and returns fp output const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, const int64_t weight_quant_min, const int64_t weight_quant_max, const Tensor& indices, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out, int weight_nbit); @@ -59,11 +59,11 @@ Tensor& quantized_embedding_xbit_dtype_out( KernelRuntimeContext& context, const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, int64_t weight_quant_min, int64_t weight_quant_max, const Tensor& indices, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out, int weight_nbit); diff --git a/kernels/quantized/cpu/op_dequantize.cpp b/kernels/quantized/cpu/op_dequantize.cpp index 97c8584e3d9..c1f2770d3d6 100644 --- a/kernels/quantized/cpu/op_dequantize.cpp +++ b/kernels/quantized/cpu/op_dequantize.cpp @@ -38,7 +38,7 @@ void check_dequantize_per_tensor_args( int64_t quant_min, int64_t quant_max, ScalarType dtype, - executorch::aten::optional& out_dtype, + std::optional& out_dtype, Tensor& out) { ET_CHECK_MSG( input.scalar_type() == ScalarType::Byte || @@ -170,7 +170,7 @@ float get_scale(const Tensor& scale, size_t channel_ix) { bool can_use_optimized_dequantize_per_channel( const Tensor& in, const ScalarType in_dtype, - executorch::aten::optional& out_dtype) { + std::optional& out_dtype) { bool is_contiguous = false; #ifdef USE_ATEN_LIB is_contiguous = in.is_contiguous(); @@ -188,13 +188,13 @@ bool can_use_optimized_dequantize_per_channel( void dequantize_per_channel_optimized( const Tensor& in, const Tensor& scales, - const executorch::aten::optional& opt_zero_points, + const std::optional& opt_zero_points, Tensor& out, int64_t axis, int64_t quant_min, int64_t quant_max, ScalarType in_dtype, - executorch::aten::optional& out_dtype) { + std::optional& out_dtype) { check_dequantize_per_tensor_args( in, quant_min, quant_max, in_dtype, out_dtype, out); ET_CHECK_MSG( @@ -263,7 +263,7 @@ Tensor& dequantize_per_tensor_out( int64_t quant_min, int64_t quant_max, ScalarType dtype, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out) { torch::executor::Error err = resize_tensor(out, input.sizes()); ET_CHECK_MSG( @@ -323,7 +323,7 @@ Tensor& dequantize_per_tensor_tensor_args_out( int64_t quant_min, int64_t quant_max, ScalarType dtype, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out) { ET_CHECK_MSG( scale.scalar_type() == ScalarType::Double, @@ -357,12 +357,12 @@ Tensor& dequantize_per_tensor_tensor_args_out( Tensor& dequantize_per_channel_out( const Tensor& input, const Tensor& scale, - const executorch::aten::optional& opt_zero_points, + const std::optional& opt_zero_points, int64_t axis, int64_t quant_min, int64_t quant_max, ScalarType dtype, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out) { // normalize axis ET_CHECK_MSG( @@ -428,9 +428,8 @@ Tensor& dequantize_per_channel_out( zero_point_data = nullptr; } - executorch::aten::optional> - optional_dim_list{ - executorch::aten::ArrayRef{dims, size_t(input.dim() - 1)}}; + std::optional> optional_dim_list{ + executorch::aten::ArrayRef{dims, size_t(input.dim() - 1)}}; // Actual dequantization logic // input, out are the input and output tensors @@ -447,7 +446,7 @@ Tensor& dequantize_per_channel_out( const auto* input_data_ptr = input.const_data_ptr(); \ ET_CHECK_MSG( \ axis == 0, "Axis must be 0 for a single dimensional tensors"); \ - const executorch::aten::optional dim; \ + const std::optional dim; \ apply_over_dim( \ [input_data_ptr, out_data_ptr, zero_point_data, &scale]( \ size_t numel, size_t stride, size_t base_ix) { \ @@ -518,12 +517,12 @@ Tensor& dequantize_per_channel_out( KernelRuntimeContext& context, const Tensor& input, const Tensor& scale, - const executorch::aten::optional& opt_zero_points, + const std::optional& opt_zero_points, int64_t axis, int64_t quant_min, int64_t quant_max, ScalarType dtype, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out) { (void)context; torch::executor::Error err = resize_tensor(out, input.sizes()); @@ -551,7 +550,7 @@ Tensor& dequantize_per_tensor_out( int64_t quant_min, int64_t quant_max, ScalarType dtype, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out) { // TODO(larryliu): Add a context arg to the real op function and remove this // wrapper @@ -568,7 +567,7 @@ Tensor& dequantize_per_tensor_tensor_args_out( int64_t quant_min, int64_t quant_max, ScalarType dtype, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out) { // TODO(larryliu): Add a context arg to the real op function and remove this // wrapper diff --git a/kernels/quantized/cpu/op_embedding.cpp b/kernels/quantized/cpu/op_embedding.cpp index c43755ed3da..899655c538f 100644 --- a/kernels/quantized/cpu/op_embedding.cpp +++ b/kernels/quantized/cpu/op_embedding.cpp @@ -27,11 +27,11 @@ namespace { void check_embedding_byte_args( const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, const int64_t weight_quant_min, const int64_t weight_quant_max, const Tensor& indices, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out) { ET_CHECK_MSG( weight.dim() == 2, "weight must be 2D but got() %zd dims", weight.dim()); @@ -129,7 +129,7 @@ template void embedding_byte_per_channel( const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, const Tensor& indices, Tensor& out) { // An embedding layer nn.Embedding(num_embeddings, embedding_dim) has a @@ -234,7 +234,7 @@ Tensor& quantized_embedding_byte_out( // non quant input and returns fp output const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, const int64_t weight_quant_min, const int64_t weight_quant_max, const Tensor& indices, @@ -269,7 +269,7 @@ Tensor& quantized_embedding_byte_out( KernelRuntimeContext& context, const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, int64_t weight_quant_min, int64_t weight_quant_max, const Tensor& indices, @@ -293,11 +293,11 @@ Tensor& quantized_embedding_byte_dtype_out( // non quant input and returns fp output const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, const int64_t weight_quant_min, const int64_t weight_quant_max, const Tensor& indices, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out) { // TODO (jakeszwe): improve these to account for the size of out in relation // to weight and indices accounting for a possible batch dimension @@ -332,11 +332,11 @@ Tensor& quantized_embedding_byte_dtype_out( KernelRuntimeContext& context, const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, int64_t weight_quant_min, int64_t weight_quant_max, const Tensor& indices, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out) { // TODO(larryliu): Add a context arg to the real op function and remove this // wrapper diff --git a/kernels/quantized/cpu/op_embedding2b.cpp b/kernels/quantized/cpu/op_embedding2b.cpp index 9d274d38e1b..edab21af79a 100644 --- a/kernels/quantized/cpu/op_embedding2b.cpp +++ b/kernels/quantized/cpu/op_embedding2b.cpp @@ -37,7 +37,7 @@ Tensor& quantized_embedding_2bit_out( // non quant input and returns fp output const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, const int64_t weight_quant_min, const int64_t weight_quant_max, const Tensor& indices, @@ -57,7 +57,7 @@ Tensor& quantized_embedding_2bit_out( KernelRuntimeContext& context, const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, int64_t weight_quant_min, int64_t weight_quant_max, const Tensor& indices, @@ -77,11 +77,11 @@ Tensor& quantized_embedding_2bit_out( Tensor& quantized_embedding_2bit_dtype_out( const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, int64_t weight_quant_min, int64_t weight_quant_max, const Tensor& indices, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out) { return quantized_embedding_xbit_dtype_out( weight, @@ -99,11 +99,11 @@ Tensor& quantized_embedding_2bit_dtype_out( KernelRuntimeContext& context, const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, int64_t weight_quant_min, int64_t weight_quant_max, const Tensor& indices, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out) { return quantized_embedding_xbit_dtype_out( context, diff --git a/kernels/quantized/cpu/op_embedding4b.cpp b/kernels/quantized/cpu/op_embedding4b.cpp index 773b7c5c10f..cae80110b61 100644 --- a/kernels/quantized/cpu/op_embedding4b.cpp +++ b/kernels/quantized/cpu/op_embedding4b.cpp @@ -37,7 +37,7 @@ Tensor& quantized_embedding_4bit_out( // non quant input and returns fp output const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, const int64_t weight_quant_min, const int64_t weight_quant_max, const Tensor& indices, @@ -57,7 +57,7 @@ Tensor& quantized_embedding_4bit_out( KernelRuntimeContext& context, const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, int64_t weight_quant_min, int64_t weight_quant_max, const Tensor& indices, @@ -79,11 +79,11 @@ Tensor& quantized_embedding_4bit_dtype_out( // non quant input and returns fp output const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, const int64_t weight_quant_min, const int64_t weight_quant_max, const Tensor& indices, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out) { return quantized_embedding_xbit_dtype_out( weight, @@ -101,11 +101,11 @@ Tensor& quantized_embedding_4bit_dtype_out( KernelRuntimeContext& context, const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, int64_t weight_quant_min, int64_t weight_quant_max, const Tensor& indices, - executorch::aten::optional out_dtype, + std::optional out_dtype, Tensor& out) { return quantized_embedding_xbit_dtype_out( context, diff --git a/kernels/quantized/cpu/op_mixed_linear.cpp b/kernels/quantized/cpu/op_mixed_linear.cpp index c97ed2cb7c9..a9d5db10533 100644 --- a/kernels/quantized/cpu/op_mixed_linear.cpp +++ b/kernels/quantized/cpu/op_mixed_linear.cpp @@ -19,8 +19,8 @@ bool check_quantized_mixed_linear_args( const Tensor& in, const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, - const executorch::aten::optional dtype, + const std::optional& opt_weight_zero_points, + const std::optional dtype, Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(in, 2)); ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(weight, 2)); @@ -64,8 +64,8 @@ Tensor& quantized_mixed_linear_out( const Tensor& in, const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, - const executorch::aten::optional dtype, + const std::optional& opt_weight_zero_points, + const std::optional dtype, Tensor& out) { // TODO (gjcomer) Replace with ET_KERNEL_CHECK when context is available. ET_CHECK(check_quantized_mixed_linear_args( @@ -117,8 +117,8 @@ Tensor& quantized_mixed_linear_out( const Tensor& in, const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, - const executorch::aten::optional dtype, + const std::optional& opt_weight_zero_points, + const std::optional dtype, Tensor& out) { // TODO(mcandales): Remove the need for this wrapper // TODO(mkg): add support for dtype diff --git a/kernels/quantized/cpu/op_mixed_mm.cpp b/kernels/quantized/cpu/op_mixed_mm.cpp index 564de74dfde..5e52c681e1b 100644 --- a/kernels/quantized/cpu/op_mixed_mm.cpp +++ b/kernels/quantized/cpu/op_mixed_mm.cpp @@ -19,7 +19,7 @@ bool check_quantized_mixed_mm_args( const Tensor& in, const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(in, 2)); ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(weight, 2)); @@ -55,7 +55,7 @@ Tensor& quantized_mixed_mm_out( const Tensor& in, const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, Tensor& out) { ET_CHECK(check_quantized_mixed_mm_args( in, weight, weight_scales, opt_weight_zero_points, out)); @@ -92,7 +92,7 @@ Tensor& quantized_mixed_mm_out( const Tensor& in, const Tensor& weight, const Tensor& weight_scales, - const executorch::aten::optional& opt_weight_zero_points, + const std::optional& opt_weight_zero_points, Tensor& out) { // TODO(mcandales): Remove the need for this wrapper (void)ctx; diff --git a/kernels/quantized/cpu/op_quantize.cpp b/kernels/quantized/cpu/op_quantize.cpp index 632bddd58c4..4665c3d665b 100644 --- a/kernels/quantized/cpu/op_quantize.cpp +++ b/kernels/quantized/cpu/op_quantize.cpp @@ -294,9 +294,8 @@ Tensor& quantize_per_channel_out( const double* scale_data = scale.const_data_ptr(); const int64_t* zero_point_data = zero_point.const_data_ptr(); - executorch::aten::optional> - optional_dim_list{ - executorch::aten::ArrayRef{dims, size_t(input.dim() - 1)}}; + std::optional> optional_dim_list{ + executorch::aten::ArrayRef{dims, size_t(input.dim() - 1)}}; // Actual quantization logic // input, out are the input and output tensors diff --git a/kernels/quantized/test/op_add_test.cpp b/kernels/quantized/test/op_add_test.cpp index 3f258827973..fdf38cc8255 100644 --- a/kernels/quantized/test/op_add_test.cpp +++ b/kernels/quantized/test/op_add_test.cpp @@ -20,11 +20,11 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::Scalar; using executorch::aten::ScalarType; using executorch::aten::Tensor; using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; +using std::optional; using torch::executor::native::add_out; using torch::executor::native::dequantize_per_tensor_out; using torch::executor::native::quantize_per_tensor_out; diff --git a/kernels/quantized/test/op_dequantize_test.cpp b/kernels/quantized/test/op_dequantize_test.cpp index 934b4777305..bbda1590a10 100644 --- a/kernels/quantized/test/op_dequantize_test.cpp +++ b/kernels/quantized/test/op_dequantize_test.cpp @@ -19,10 +19,10 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::Scalar; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::native::dequantize_per_channel_out; using torch::executor::native::dequantize_per_tensor_out; using torch::executor::native::dequantize_per_tensor_tensor_args_out; diff --git a/kernels/quantized/test/op_embedding2b_test.cpp b/kernels/quantized/test/op_embedding2b_test.cpp index a350b77ec0d..66a3b589dde 100644 --- a/kernels/quantized/test/op_embedding2b_test.cpp +++ b/kernels/quantized/test/op_embedding2b_test.cpp @@ -18,10 +18,10 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; +using std::optional; using torch::executor::native::quantized_embedding_2bit_out; using torch::executor::testing::TensorFactory; diff --git a/kernels/quantized/test/op_embedding4b_test.cpp b/kernels/quantized/test/op_embedding4b_test.cpp index 6ab10376b88..b8d5c639c7e 100644 --- a/kernels/quantized/test/op_embedding4b_test.cpp +++ b/kernels/quantized/test/op_embedding4b_test.cpp @@ -18,10 +18,10 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; +using std::optional; using torch::executor::native::quantized_embedding_4bit_out; using torch::executor::testing::TensorFactory; diff --git a/kernels/quantized/test/op_embedding_test.cpp b/kernels/quantized/test/op_embedding_test.cpp index 531b0cb0811..5d5ad45ace8 100644 --- a/kernels/quantized/test/op_embedding_test.cpp +++ b/kernels/quantized/test/op_embedding_test.cpp @@ -20,11 +20,11 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::Scalar; using executorch::aten::ScalarType; using executorch::aten::Tensor; using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; +using std::optional; using torch::executor::native::dequantize_per_tensor_out; using torch::executor::native::embedding_out; using torch::executor::native::quantize_per_tensor_out; diff --git a/kernels/quantized/test/op_mixed_linear_test.cpp b/kernels/quantized/test/op_mixed_linear_test.cpp index 833fc766ffd..e659b41151e 100644 --- a/kernels/quantized/test/op_mixed_linear_test.cpp +++ b/kernels/quantized/test/op_mixed_linear_test.cpp @@ -17,10 +17,10 @@ #include using namespace ::testing; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; +using std::optional; using torch::executor::native::quantized_mixed_linear_out; using torch::executor::testing::TensorFactory; diff --git a/kernels/quantized/test/op_mixed_mm_test.cpp b/kernels/quantized/test/op_mixed_mm_test.cpp index 4d81089fa91..8051f299fbd 100644 --- a/kernels/quantized/test/op_mixed_mm_test.cpp +++ b/kernels/quantized/test/op_mixed_mm_test.cpp @@ -17,10 +17,10 @@ #include using namespace ::testing; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; +using std::optional; using torch::executor::native::quantized_mixed_mm_out; using torch::executor::testing::TensorFactory; diff --git a/kernels/test/op__empty_dim_order_test.cpp b/kernels/test/op__empty_dim_order_test.cpp index a533133f53b..b3534948c8d 100644 --- a/kernels/test/op__empty_dim_order_test.cpp +++ b/kernels/test/op__empty_dim_order_test.cpp @@ -18,10 +18,10 @@ using namespace ::testing; using executorch::aten::DimOrderType; using executorch::aten::IntArrayRef; -using executorch::aten::optional; using executorch::aten::OptionalArrayRef; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; class OpEmptyDimOrderOutTest : public OperatorTest { diff --git a/kernels/test/op__to_dim_order_copy_test.cpp b/kernels/test/op__to_dim_order_copy_test.cpp index 3fbecac2071..4a8afe51267 100644 --- a/kernels/test/op__to_dim_order_copy_test.cpp +++ b/kernels/test/op__to_dim_order_copy_test.cpp @@ -22,9 +22,9 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; // To further emphasize the accuracy of our op_to, we TEST_F the conversion @@ -57,7 +57,7 @@ class OpToDimOrderCopyTest : public OperatorTest { Tensor& op__to_dim_order_copy_out( const Tensor& self, bool non_blocking, - executorch::aten::optional> dim_order, + std::optional> dim_order, Tensor& out) { return torch::executor::dim_order_ops::_to_dim_order_copy_outf( context_, self, non_blocking, dim_order, out); diff --git a/kernels/test/op_any_test.cpp b/kernels/test/op_any_test.cpp index 072b7ed3c83..fc815ea8508 100644 --- a/kernels/test/op_any_test.cpp +++ b/kernels/test/op_any_test.cpp @@ -18,9 +18,9 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; class OpAnyOutTest : public OperatorTest { diff --git a/kernels/test/op_argmax_test.cpp b/kernels/test/op_argmax_test.cpp index 4d68dfe88be..21f7be35e85 100644 --- a/kernels/test/op_argmax_test.cpp +++ b/kernels/test/op_argmax_test.cpp @@ -17,9 +17,9 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; class OpArgmaxTest : public OperatorTest { diff --git a/kernels/test/op_argmin_test.cpp b/kernels/test/op_argmin_test.cpp index a0b2699a28f..3478c21675b 100644 --- a/kernels/test/op_argmin_test.cpp +++ b/kernels/test/op_argmin_test.cpp @@ -17,9 +17,9 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; class OpArgminTest : public OperatorTest { diff --git a/kernels/test/op_as_strided_copy_test.cpp b/kernels/test/op_as_strided_copy_test.cpp index 63fe5a3982b..cb0191c69a8 100644 --- a/kernels/test/op_as_strided_copy_test.cpp +++ b/kernels/test/op_as_strided_copy_test.cpp @@ -19,10 +19,10 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::Scalar; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; class OpAsStridedCopyOutTest : public OperatorTest { diff --git a/kernels/test/op_avg_pool2d_test.cpp b/kernels/test/op_avg_pool2d_test.cpp index 9ceedce75dc..600e59d8f22 100644 --- a/kernels/test/op_avg_pool2d_test.cpp +++ b/kernels/test/op_avg_pool2d_test.cpp @@ -27,7 +27,7 @@ class OpAvgPool2DOutTest : public OperatorTest { executorch::aten::ArrayRef padding, bool ceil_mode, bool count_include_pad, - executorch::aten::optional divisor_override, + std::optional divisor_override, executorch::aten::Tensor& out) { return torch::executor::aten::avg_pool2d_outf( context_, @@ -116,7 +116,7 @@ class OpAvgPool2DOutTest : public OperatorTest { padding_vec.data(), padding_vec.size()); bool ceil_mode = false; bool count_include_pad = true; - executorch::aten::optional divisor_override; + std::optional divisor_override; executorch::aten::Tensor out = tf_dtype.zeros({2, 3, 3, 4}); executorch::aten::Tensor out_expected = tf_dtype.make( {2, 3, 3, 4}, @@ -282,8 +282,7 @@ class OpAvgPool2DOutTest : public OperatorTest { padding_vec.data(), padding_vec.size()); bool ceil_mode = false; bool count_include_pad = true; - executorch::aten::optional divisor_override = - executorch::aten::optional(10); + std::optional divisor_override = std::optional(10); executorch::aten::Tensor out = tfFloat.zeros({2, 3, 3, 4}); executorch::aten::Tensor out_expected = tfFloat.make( {2, 3, 3, 4}, @@ -535,7 +534,7 @@ class OpAvgPool2DOutTest : public OperatorTest { padding_vec.data(), padding_vec.size()); bool ceil_mode = true; bool count_include_pad = false; - executorch::aten::optional divisor_override; + std::optional divisor_override; executorch::aten::Tensor out = tfFloat.zeros({2, 3, 13, 7}); executorch::aten::Tensor out_expected = tfFloat.make( {2, 3, 13, 7}, diff --git a/kernels/test/op_cdist_forward_test.cpp b/kernels/test/op_cdist_forward_test.cpp index 9ddab4c3c49..73be637569a 100644 --- a/kernels/test/op_cdist_forward_test.cpp +++ b/kernels/test/op_cdist_forward_test.cpp @@ -18,10 +18,10 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; +using std::optional; using torch::executor::testing::TensorFactory; Tensor& op_cdist_forward_out( diff --git a/kernels/test/op_clamp_test.cpp b/kernels/test/op_clamp_test.cpp index 8a021c70303..68a1c6a1997 100644 --- a/kernels/test/op_clamp_test.cpp +++ b/kernels/test/op_clamp_test.cpp @@ -23,13 +23,13 @@ using namespace ::testing; using executorch::aten::ArrayRef; using executorch::aten::nullopt; -using executorch::aten::optional; using executorch::aten::Scalar; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; -using OptScalar = executorch::aten::optional; +using OptScalar = std::optional; class OpClampOutTest : public OperatorTest { protected: diff --git a/kernels/test/op_clone_test.cpp b/kernels/test/op_clone_test.cpp index fef61590f95..43e4576548a 100644 --- a/kernels/test/op_clone_test.cpp +++ b/kernels/test/op_clone_test.cpp @@ -17,9 +17,9 @@ using namespace ::testing; using executorch::aten::MemoryFormat; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; class OpCloneTest : public OperatorTest { diff --git a/kernels/test/op_convolution_backward_test.cpp b/kernels/test/op_convolution_backward_test.cpp index 4d681754c30..ce9cd2bea4f 100644 --- a/kernels/test/op_convolution_backward_test.cpp +++ b/kernels/test/op_convolution_backward_test.cpp @@ -16,9 +16,9 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using IntArrayRef = executorch::aten::ArrayRef; using OptIntArrayRef = executorch::aten::OptionalArrayRef; using torch::executor::testing::TensorFactory; diff --git a/kernels/test/op_convolution_test.cpp b/kernels/test/op_convolution_test.cpp index 2929d19e8cb..070268bd436 100644 --- a/kernels/test/op_convolution_test.cpp +++ b/kernels/test/op_convolution_test.cpp @@ -17,9 +17,9 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; class OpConvOutTest : public OperatorTest { @@ -164,7 +164,7 @@ TEST_F(OpConvCorrectnessTest, GenericSmokeTest) { op_convolution_out( input, weight, - executorch::aten::optional(bias), + std::optional(bias), executorch::aten::ArrayRef{stride, 1}, executorch::aten::ArrayRef{padding, 1}, executorch::aten::ArrayRef{dilation, 1}, @@ -492,7 +492,7 @@ TEST_F(OpConvCorrectnessTest, InvalidInputShape) { op_convolution_out( input, weight, - executorch::aten::optional(bias), + std::optional(bias), executorch::aten::ArrayRef{stride, 1}, executorch::aten::ArrayRef{padding, 1}, executorch::aten::ArrayRef{dilation, 1}, @@ -506,7 +506,7 @@ TEST_F(OpConvCorrectnessTest, InvalidInputShape) { op_convolution_out( input, weight, - executorch::aten::optional(bias), + std::optional(bias), executorch::aten::ArrayRef{stride, 1}, executorch::aten::ArrayRef{padding, 1}, executorch::aten::ArrayRef{dilation, 1}, @@ -538,7 +538,7 @@ TEST_F(OpConvCorrectnessTest, TransposedDefaultParams) { op_convolution_out( input, weight, - executorch::aten::optional(bias), + std::optional(bias), executorch::aten::ArrayRef{stride, 1}, executorch::aten::ArrayRef{padding, 1}, executorch::aten::ArrayRef{dilation, 1}, @@ -575,7 +575,7 @@ TEST_F(OpConvCorrectnessTest, TransposedNonDefaultParams) { op_convolution_out( input, weight, - executorch::aten::optional(bias), + std::optional(bias), executorch::aten::ArrayRef{stride, 1}, executorch::aten::ArrayRef{padding, 1}, executorch::aten::ArrayRef{dilation, 1}, @@ -643,7 +643,7 @@ TEST_F(OpConvCorrectnessTest, TransposedDefaultParamsChannelsLast) { op_convolution_out( input, weight, - executorch::aten::optional(bias), + std::optional(bias), executorch::aten::ArrayRef{stride, 1}, executorch::aten::ArrayRef{padding, 1}, executorch::aten::ArrayRef{dilation, 1}, @@ -687,7 +687,7 @@ TEST_F(OpConvCorrectnessTest, TransposedNonDefaultParamsChannelsLast) { op_convolution_out( input, weight, - executorch::aten::optional(bias), + std::optional(bias), executorch::aten::ArrayRef{stride, 1}, executorch::aten::ArrayRef{padding, 1}, executorch::aten::ArrayRef{dilation, 1}, @@ -719,7 +719,7 @@ TEST_F(OpConvCorrectnessTest, InvalidOutputPadding) { op_convolution_out( input, weight, - executorch::aten::optional(bias), + std::optional(bias), executorch::aten::ArrayRef{stride, 1}, executorch::aten::ArrayRef{padding, 1}, executorch::aten::ArrayRef{dilation, 1}, diff --git a/kernels/test/op_copy_test.cpp b/kernels/test/op_copy_test.cpp index 5ef58b571ba..97fd7e7e6c0 100644 --- a/kernels/test/op_copy_test.cpp +++ b/kernels/test/op_copy_test.cpp @@ -17,9 +17,9 @@ using namespace ::testing; using executorch::aten::MemoryFormat; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; class OpCopyTest : public OperatorTest { diff --git a/kernels/test/op_cumsum_test.cpp b/kernels/test/op_cumsum_test.cpp index 094acb761c8..3e0ec164d04 100644 --- a/kernels/test/op_cumsum_test.cpp +++ b/kernels/test/op_cumsum_test.cpp @@ -18,9 +18,9 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; class OpCumSumOutTest : public OperatorTest { diff --git a/kernels/test/op_elu_test.cpp b/kernels/test/op_elu_test.cpp index 73ee8ac31a7..d7784f4dc04 100644 --- a/kernels/test/op_elu_test.cpp +++ b/kernels/test/op_elu_test.cpp @@ -17,8 +17,8 @@ using executorch::aten::Scalar; using executorch::aten::ScalarType; -using executorch::aten::string_view; using executorch::aten::Tensor; +using std::string_view; using torch::executor::testing::TensorFactory; class OpEluTest : public OperatorTest { diff --git a/kernels/test/op_empty_test.cpp b/kernels/test/op_empty_test.cpp index 6e77b4e1f07..23173b1feae 100644 --- a/kernels/test/op_empty_test.cpp +++ b/kernels/test/op_empty_test.cpp @@ -19,9 +19,9 @@ using namespace ::testing; using executorch::aten::IntArrayRef; using executorch::aten::MemoryFormat; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; class OpEmptyOutTest : public OperatorTest { diff --git a/kernels/test/op_expand_copy_test.cpp b/kernels/test/op_expand_copy_test.cpp index b90a19a7c0d..86d3858c830 100644 --- a/kernels/test/op_expand_copy_test.cpp +++ b/kernels/test/op_expand_copy_test.cpp @@ -196,9 +196,10 @@ TEST_F(OpExpandOutTest, ExpandOneToNPlusNewDimDifferentTwo) { EXPECT_TENSOR_EQ(out, ret); EXPECT_TENSOR_EQ( out, - tf.make(/*sizes*/ {2, 6, 2}, /*data=*/{42, 96, 42, 96, 42, 96, 42, 96, - 42, 96, 42, 96, 42, 96, 42, 96, - 42, 96, 42, 96, 42, 96, 42, 96})); + tf.make( + /*sizes*/ {2, 6, 2}, + /*data=*/{42, 96, 42, 96, 42, 96, 42, 96, 42, 96, 42, 96, + 42, 96, 42, 96, 42, 96, 42, 96, 42, 96, 42, 96})); } TEST_F(OpExpandOutTest, BadOutDataTypeGoodShapeDeath) { diff --git a/kernels/test/op_full_like_test.cpp b/kernels/test/op_full_like_test.cpp index 84f2fc554f7..c0b9dcc4107 100644 --- a/kernels/test/op_full_like_test.cpp +++ b/kernels/test/op_full_like_test.cpp @@ -18,10 +18,10 @@ using namespace ::testing; using executorch::aten::MemoryFormat; -using executorch::aten::optional; using executorch::aten::Scalar; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; class OpFullLikeTest : public OperatorTest { diff --git a/kernels/test/op_full_test.cpp b/kernels/test/op_full_test.cpp index 67a897933c3..93129679087 100644 --- a/kernels/test/op_full_test.cpp +++ b/kernels/test/op_full_test.cpp @@ -19,10 +19,10 @@ using namespace ::testing; using executorch::aten::IntArrayRef; using executorch::aten::MemoryFormat; -using executorch::aten::optional; using executorch::aten::Scalar; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; class OpFullOutTest : public OperatorTest { diff --git a/kernels/test/op_gelu_test.cpp b/kernels/test/op_gelu_test.cpp index e7a150c7a32..8fae399fb18 100644 --- a/kernels/test/op_gelu_test.cpp +++ b/kernels/test/op_gelu_test.cpp @@ -17,15 +17,15 @@ using namespace ::testing; using executorch::aten::ScalarType; -using executorch::aten::string_view; using executorch::aten::Tensor; +using std::string_view; using torch::executor::testing::SupportedFeatures; using torch::executor::testing::TensorFactory; class OpGeluTest : public OperatorTest { protected: Tensor& - op_gelu_out(const Tensor& self, string_view approximate, Tensor& out) { + op_gelu_out(const Tensor& self, std::string_view approximate, Tensor& out) { return torch::executor::aten::gelu_outf(context_, self, approximate, out); } diff --git a/kernels/test/op_index_put_test.cpp b/kernels/test/op_index_put_test.cpp index b25cdb01e92..f1021d9ad61 100644 --- a/kernels/test/op_index_put_test.cpp +++ b/kernels/test/op_index_put_test.cpp @@ -18,9 +18,9 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; using OptTensorArrayRef = ArrayRef>; diff --git a/kernels/test/op_index_test.cpp b/kernels/test/op_index_test.cpp index ab17a92bfb5..2471d44b0a3 100644 --- a/kernels/test/op_index_test.cpp +++ b/kernels/test/op_index_test.cpp @@ -19,9 +19,9 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; using OptTensorArrayRef = ArrayRef>; @@ -411,11 +411,11 @@ TEST_F(OpIndexTensorOutTest, IndicesWithOnlyNullTensorsSupported) { optional indices0[] = {optional()}; run_test_cases(x, indices0, x); - optional indices1[] = {optional(), optional()}; + optional indices1[] = {optional(), std::optional()}; run_test_cases(x, indices1, x); optional indices2[] = { - optional(), optional(), optional()}; + optional(), std::optional(), std::optional()}; Tensor out = tf.ones({2, 3}); ET_EXPECT_KERNEL_FAILURE_WITH_MSG( context_, op_index_tensor_out(x, indices2, out), ""); diff --git a/kernels/test/op_lift_fresh_copy_test.cpp b/kernels/test/op_lift_fresh_copy_test.cpp index 39f88e11aa8..215ad4e05c6 100644 --- a/kernels/test/op_lift_fresh_copy_test.cpp +++ b/kernels/test/op_lift_fresh_copy_test.cpp @@ -16,9 +16,9 @@ #include using namespace ::testing; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; class OpLiftFreshCopyTest : public OperatorTest { diff --git a/kernels/test/op_logical_not_test.cpp b/kernels/test/op_logical_not_test.cpp index ad453db18f8..d06a3dcefea 100644 --- a/kernels/test/op_logical_not_test.cpp +++ b/kernels/test/op_logical_not_test.cpp @@ -18,9 +18,9 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; class OpLogicalNotOutTest : public OperatorTest { diff --git a/kernels/test/op_logit_test.cpp b/kernels/test/op_logit_test.cpp index a5e78e8aa6b..1bb0a43a37d 100644 --- a/kernels/test/op_logit_test.cpp +++ b/kernels/test/op_logit_test.cpp @@ -16,14 +16,15 @@ #include using namespace ::testing; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; class OpLogitOutTest : public OperatorTest { protected: - Tensor& op_logit_out(const Tensor& self, optional eps, Tensor& out) { + Tensor& + op_logit_out(const Tensor& self, std::optional eps, Tensor& out) { return torch::executor::aten::logit_outf(context_, self, eps, out); } diff --git a/kernels/test/op_mean_test.cpp b/kernels/test/op_mean_test.cpp index 898ffe6f8e0..47702be82cb 100644 --- a/kernels/test/op_mean_test.cpp +++ b/kernels/test/op_mean_test.cpp @@ -19,10 +19,10 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; using executorch::runtime::Error; +using std::optional; using torch::executor::testing::TensorFactory; class OpMeanOutTest : public OperatorTest { diff --git a/kernels/test/op_narrow_copy_test.cpp b/kernels/test/op_narrow_copy_test.cpp index 264f3e129a9..28ddeb285cb 100644 --- a/kernels/test/op_narrow_copy_test.cpp +++ b/kernels/test/op_narrow_copy_test.cpp @@ -16,9 +16,9 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; class OpNarrowCopyOutTest : public OperatorTest { diff --git a/kernels/test/op_native_batch_norm_test.cpp b/kernels/test/op_native_batch_norm_test.cpp index bf05a87312d..2a4b8de7d9d 100644 --- a/kernels/test/op_native_batch_norm_test.cpp +++ b/kernels/test/op_native_batch_norm_test.cpp @@ -25,8 +25,8 @@ class OpNativeBatchNormLegitNoTrainingOutTest : public OperatorTest { executorch::aten::Tensor&> op_native_batch_norm_legit_no_training_out( const executorch::aten::Tensor& input, - const executorch::aten::optional& weight, - const executorch::aten::optional& bias, + const std::optional& weight, + const std::optional& bias, const executorch::aten::Tensor& running_mean, const executorch::aten::Tensor& running_var, double momentum, @@ -63,8 +63,8 @@ class OpNativeBatchNormLegitNoTrainingOutTest : public OperatorTest { 9.971039772033691, 3.5423521995544434, 7.452159881591797, 9.93700122833252, 1.8560808897018433, 1.524025797843933, 7.3222975730896}); - executorch::aten::optional weight = - executorch::aten::optional(tf.make( + std::optional weight = + std::optional(tf.make( {7}, {8.287437438964844, 8.227645874023438, @@ -73,8 +73,8 @@ class OpNativeBatchNormLegitNoTrainingOutTest : public OperatorTest { 4.119281768798828, 8.593960762023926, 2.3760855197906494})); - executorch::aten::optional bias = - executorch::aten::optional(tf.make( + std::optional bias = + std::optional(tf.make( {7}, {7.824275970458984, 6.84327507019043, @@ -163,8 +163,8 @@ class OpNativeBatchNormLegitOutTest : public OperatorTest { executorch::aten::Tensor&> op_native_batch_norm_legit_out( const executorch::aten::Tensor& input, - const executorch::aten::optional& weight, - const executorch::aten::optional& bias, + const std::optional& weight, + const std::optional& bias, executorch::aten::Tensor& running_mean, executorch::aten::Tensor& running_var, bool training, @@ -198,8 +198,8 @@ class OpNativeBatchNormLegitNoStatsOutTest : public OperatorTest { executorch::aten::Tensor&> op_native_batch_norm_legit_no_stats_out( const executorch::aten::Tensor& input, - const executorch::aten::optional& weight, - const executorch::aten::optional& bias, + const std::optional& weight, + const std::optional& bias, bool training, double momentum, double eps, @@ -225,10 +225,10 @@ class OpNativeBatchNormLegitNoStatsOutTest : public OperatorTest { executorch::aten::Tensor input = tf.make({3, 4}, {0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121}); - executorch::aten::optional weight = - executorch::aten::optional(); - executorch::aten::optional bias = - executorch::aten::optional(); + std::optional weight = + std::optional(); + std::optional bias = + std::optional(); bool training = true; double momentum = 1e-3; double eps = 1e-5; @@ -339,8 +339,8 @@ TEST_F(OpNativeBatchNormLegitNoTrainingOutTest, SampleAtomicTest3D) { 3.887125253677368, 9.278786659240723, 6.742891311645508, 5.01821756362915, 2.326876640319824, 7.939553737640381, 3.2622408866882324, 3.829448699951172}); - executorch::aten::optional weight = - executorch::aten::optional(tfFloat.make( + std::optional weight = + std::optional(tfFloat.make( {7}, {0.5193436145782471, 4.531304836273193, @@ -349,8 +349,8 @@ TEST_F(OpNativeBatchNormLegitNoTrainingOutTest, SampleAtomicTest3D) { 2.6848177909851074, 7.309220314025879, 2.2476916313171387})); - executorch::aten::optional bias = - executorch::aten::optional(tfFloat.make( + std::optional bias = + std::optional(tfFloat.make( {7}, {4.643010139465332, 0.2791440486907959, @@ -521,15 +521,15 @@ TEST_F(OpNativeBatchNormLegitNoTrainingOutTest, SampleAtomicTest4D) { 9.173870086669922, 3.781676769256592, 5.6734232902526855, 3.301741600036621, 1.3799077272415161, 8.990988731384277, 2.2520315647125244, 2.483280897140503}); - executorch::aten::optional weight = - executorch::aten::optional(tfFloat.make( + std::optional weight = + std::optional(tfFloat.make( {4}, {1.8311285972595215, 5.851841926574707, 6.108979225158691, 5.1755266189575195})); - executorch::aten::optional bias = - executorch::aten::optional(tfFloat.make( + std::optional bias = + std::optional(tfFloat.make( {4}, {5.1375732421875, 3.7950849533081055, @@ -681,15 +681,15 @@ TEST_F(OpNativeBatchNormLegitNoTrainingOutTest, SampleAtomicTestDouble) { 1.7936384677886963, 1.8733304738998413, 9.386192321777344, 2.442445755004883, 2.2374587059020996, 1.6268903017044067, 1.9272565841674805, 0.04978537559509277, 5.165012359619141}); - executorch::aten::optional weight = - executorch::aten::optional(tfDouble.make( + std::optional weight = + std::optional(tfDouble.make( {4}, {5.4100823402404785, 3.3440847396850586, 0.9714162349700928, 0.6811875104904175})); - executorch::aten::optional bias = - executorch::aten::optional(tfDouble.make( + std::optional bias = + std::optional(tfDouble.make( {4}, {6.839208126068115, 6.471728801727295, @@ -820,9 +820,9 @@ TEST_F(OpNativeBatchNormLegitNoTrainingOutTest, SampleAtomicTestNoWeight) { 6.707937240600586, 0.946076512336731, 6.623589515686035, 5.87992000579834, 2.196932315826416, 8.085456848144531, 7.774395942687988, 8.86058235168457}); - executorch::aten::optional weight; - executorch::aten::optional bias = - executorch::aten::optional(tfFloat.make( + std::optional weight; + std::optional bias = + std::optional(tfFloat.make( {7}, {3.2798612117767334, 7.070205211639404, @@ -939,8 +939,8 @@ TEST_F( 0.1542043685913086, 3.606675863265991, 2.65787410736084, 5.136600494384766, 6.950716972351074, 6.051759719848633, 7.304986953735352, 6.186429977416992}); - executorch::aten::optional weight; - executorch::aten::optional bias; + std::optional weight; + std::optional bias; executorch::aten::Tensor running_mean = tfFloat.make( {4}, {8.043643951416016, @@ -1004,8 +1004,8 @@ TEST_F(OpNativeBatchNormLegitOutTest, SampleAtomicTest2D) { 9.971039772033691, 3.5423521995544434, 7.452159881591797, 9.93700122833252, 1.8560808897018433, 1.524025797843933, 7.3222975730896}); - executorch::aten::optional weight = - executorch::aten::optional(tfFloat.make( + std::optional weight = + std::optional(tfFloat.make( {7}, {8.287437438964844, 8.227645874023438, @@ -1014,8 +1014,8 @@ TEST_F(OpNativeBatchNormLegitOutTest, SampleAtomicTest2D) { 4.119281768798828, 8.593960762023926, 2.3760855197906494})); - executorch::aten::optional bias = - executorch::aten::optional(tfFloat.make( + std::optional bias = + std::optional(tfFloat.make( {7}, {7.824275970458984, 6.84327507019043, @@ -1092,10 +1092,10 @@ TEST_F(OpNativeBatchNormLegitNoStatsOutTest, SampleAtomicTest3D) { executorch::aten::Tensor input = tfFloat.make( {2, 3, 4}, {0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 144, 169, 196, 225, 256, 289, 324, 361, 400, 441, 484, 529}); - executorch::aten::optional weight = - executorch::aten::optional(); - executorch::aten::optional bias = - executorch::aten::optional(); + std::optional weight = + std::optional(); + std::optional bias = + std::optional(); bool training = true; double momentum = 1e-3; double eps = 1e-5; @@ -1128,11 +1128,11 @@ TEST_F(OpNativeBatchNormLegitNoStatsOutTest, SampleAtomicTest4D) { tfFloat.make({2, 3, 2, 2}, {0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 144, 169, 196, 225, 256, 289, 324, 361, 400, 441, 484, 529}); - executorch::aten::optional weight = - executorch::aten::optional( + std::optional weight = + std::optional( tfFloat.make({3}, {1.1, 0.7, 0.3})); - executorch::aten::optional bias = - executorch::aten::optional( + std::optional bias = + std::optional( tfFloat.make({3}, {1.7, 2.2, 3.3})); bool training = true; double momentum = 1e-3; diff --git a/kernels/test/op_native_group_norm_test.cpp b/kernels/test/op_native_group_norm_test.cpp index 7452350ad29..e196899fbca 100644 --- a/kernels/test/op_native_group_norm_test.cpp +++ b/kernels/test/op_native_group_norm_test.cpp @@ -15,9 +15,9 @@ #include using namespace ::testing; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; ::std::tuple op_native_group_norm_out( diff --git a/kernels/test/op_native_layer_norm_test.cpp b/kernels/test/op_native_layer_norm_test.cpp index 7916a9130ac..d8cc2d3b2e4 100644 --- a/kernels/test/op_native_layer_norm_test.cpp +++ b/kernels/test/op_native_layer_norm_test.cpp @@ -24,13 +24,13 @@ using namespace ::testing; using executorch::aten::ArrayRef; using executorch::aten::IntArrayRef; using executorch::aten::nullopt; -using executorch::aten::optional; using executorch::aten::Scalar; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; -using OptScalar = executorch::aten::optional; +using OptScalar = std::optional; class OpNativeLayerNormTest : public OperatorTest { protected: @@ -249,7 +249,7 @@ class OpNativeLayerNormTest : public OperatorTest { SCOPED_TRACE(test_case.title); // Printed if the test fails Tensor in = tf.make(test_case.sizes, test_case.input_data); - executorch::aten::optional weight, bias; + std::optional weight, bias; if (!test_case.weight_data.empty()) { weight = tf.make(test_case.normalized_shape, test_case.weight_data); } diff --git a/kernels/test/op_prod_test.cpp b/kernels/test/op_prod_test.cpp index 11a7e3fae4f..d385fd7cb48 100644 --- a/kernels/test/op_prod_test.cpp +++ b/kernels/test/op_prod_test.cpp @@ -16,13 +16,13 @@ #include using namespace ::testing; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; Tensor& -op_prod_out(const Tensor& self, optional dtype, Tensor& out) { +op_prod_out(const Tensor& self, std::optional dtype, Tensor& out) { executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::aten::prod_outf(context, self, dtype, out); } diff --git a/kernels/test/op_repeat_interleave_test.cpp b/kernels/test/op_repeat_interleave_test.cpp index a54f6b1c9a7..71018667ac7 100644 --- a/kernels/test/op_repeat_interleave_test.cpp +++ b/kernels/test/op_repeat_interleave_test.cpp @@ -13,9 +13,9 @@ #include using namespace ::testing; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; class OpRepeatInterleaveTensorOutTest : public OperatorTest { diff --git a/kernels/test/op_slice_copy_test.cpp b/kernels/test/op_slice_copy_test.cpp index 8c77d6415f7..c7e8a0acf66 100644 --- a/kernels/test/op_slice_copy_test.cpp +++ b/kernels/test/op_slice_copy_test.cpp @@ -18,9 +18,9 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; class OpSliceCopyTensorOutTest : public OperatorTest { diff --git a/kernels/test/op_slice_scatter_test.cpp b/kernels/test/op_slice_scatter_test.cpp index 9d406cf968a..14a5bd2679d 100644 --- a/kernels/test/op_slice_scatter_test.cpp +++ b/kernels/test/op_slice_scatter_test.cpp @@ -18,9 +18,9 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; class OpSliceScatterTensorOutTest : public OperatorTest { diff --git a/kernels/test/op_sum_test.cpp b/kernels/test/op_sum_test.cpp index 2f110e06b1d..748e5427b1d 100644 --- a/kernels/test/op_sum_test.cpp +++ b/kernels/test/op_sum_test.cpp @@ -19,9 +19,9 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; class OpSumOutTest : public OperatorTest { diff --git a/kernels/test/op_to_copy_test.cpp b/kernels/test/op_to_copy_test.cpp index 0641f45b468..d9798d6d573 100644 --- a/kernels/test/op_to_copy_test.cpp +++ b/kernels/test/op_to_copy_test.cpp @@ -22,9 +22,9 @@ using namespace ::testing; using executorch::aten::MemoryFormat; -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; // To further emphasize the accuracy of our op_to, we test the conversion diff --git a/kernels/test/op_upsample_bilinear2d_test.cpp b/kernels/test/op_upsample_bilinear2d_test.cpp index c751e8bd80c..95fea942e39 100644 --- a/kernels/test/op_upsample_bilinear2d_test.cpp +++ b/kernels/test/op_upsample_bilinear2d_test.cpp @@ -15,9 +15,9 @@ #include -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::SupportedFeatures; using torch::executor::testing::TensorFactory; diff --git a/kernels/test/op_upsample_nearest2d_test.cpp b/kernels/test/op_upsample_nearest2d_test.cpp index 9f7e0229691..76e66e666dd 100644 --- a/kernels/test/op_upsample_nearest2d_test.cpp +++ b/kernels/test/op_upsample_nearest2d_test.cpp @@ -17,9 +17,9 @@ #include -using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::SupportedFeatures; using torch::executor::testing::TensorFactory; diff --git a/kernels/test/op_var_test.cpp b/kernels/test/op_var_test.cpp index fbfd16f1b23..f2bd3acccf3 100644 --- a/kernels/test/op_var_test.cpp +++ b/kernels/test/op_var_test.cpp @@ -19,10 +19,10 @@ using namespace ::testing; using executorch::aten::ArrayRef; -using executorch::aten::optional; using executorch::aten::Scalar; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using std::optional; using torch::executor::testing::TensorFactory; namespace { @@ -77,7 +77,11 @@ class OpVarOutTest : public OperatorTest { ET_EXPECT_KERNEL_FAILURE( context_, op_var_out( - self, optional_dim_list, /*unbiased=*/true, /*keepdim=*/true, out)); + self, + optional_dim_list, + /*unbiased=*/true, + /*keepdim=*/true, + out)); // the same dim appears multiple times in list of dims int64_t dims_2[2] = {2, 2}; @@ -85,7 +89,11 @@ class OpVarOutTest : public OperatorTest { ET_EXPECT_KERNEL_FAILURE( context_, op_var_out( - self, optional_dim_list, /*unbiased=*/true, /*keepdim=*/true, out)); + self, + optional_dim_list, + /*unbiased=*/true, + /*keepdim=*/true, + out)); } template @@ -115,7 +123,11 @@ class OpVarOutTest : public OperatorTest { ET_EXPECT_KERNEL_FAILURE( context_, op_var_out( - self, optional_dim_list, /*unbiased=*/true, /*keepdim=*/true, out)); + self, + optional_dim_list, + /*unbiased=*/true, + /*keepdim=*/true, + out)); // dimension size mismatch when keepdim is false out = tf_out.zeros({2, 1, 4}); @@ -171,7 +183,11 @@ class OpVarOutTest : public OperatorTest { // keepdim=false should work out = tf_out.zeros({2, 3}); op_var_out( - self, optional_dim_list, /*unbiased=*/true, /*keepdim=*/false, out); + self, + optional_dim_list, + /*unbiased=*/true, + /*keepdim=*/false, + out); // clang-format off expect_tensor_close_with_increased_tol(out, tf_out.make( {2, 3}, @@ -192,7 +208,11 @@ class OpVarOutTest : public OperatorTest { out = tf_out.zeros({4}); op_var_out( - self, optional_dim_list, /*unbiased=*/true, /*keepdim=*/false, out); + self, + optional_dim_list, + /*unbiased=*/true, + /*keepdim=*/false, + out); expect_tensor_close_with_increased_tol( out, tf_out.make({4}, {56.0, 56.0, 56.0, 56.0})); @@ -201,7 +221,11 @@ class OpVarOutTest : public OperatorTest { int64_t dims_3[1] = {-2}; optional_dim_list = ArrayRef{dims_3, 1}; op_var_out( - self, optional_dim_list, /*unbiased=*/false, /*keepdim=*/true, out); + self, + optional_dim_list, + /*unbiased=*/false, + /*keepdim=*/true, + out); // clang-format off expect_tensor_close_with_increased_tol(out, tf_out.make( {2, 1, 4}, @@ -324,7 +348,11 @@ TEST_F(OpVarOutTest, InvalidDTypeDies) { ET_EXPECT_KERNEL_FAILURE( context_, op_var_out( - self, optional_dim_list, /*unbiased=*/true, /*keepdim=*/true, out)); + self, + optional_dim_list, + /*unbiased=*/true, + /*keepdim=*/true, + out)); } TEST_F(OpVarOutTest, AllFloatInputFloatOutputPasses) { diff --git a/runtime/core/evalue.cpp b/runtime/core/evalue.cpp index e4d156218c3..121a9a29fa2 100644 --- a/runtime/core/evalue.cpp +++ b/runtime/core/evalue.cpp @@ -11,24 +11,20 @@ namespace executorch { namespace runtime { template <> -executorch::aten::ArrayRef> -BoxedEvalueList>::get() - const { +executorch::aten::ArrayRef> +BoxedEvalueList>::get() const { for (typename executorch::aten::ArrayRef< - executorch::aten::optional>::size_type i = - 0; + std::optional>::size_type i = 0; i < wrapped_vals_.size(); i++) { if (wrapped_vals_[i] == nullptr) { unwrapped_vals_[i] = executorch::aten::nullopt; } else { unwrapped_vals_[i] = - wrapped_vals_[i] - ->to>(); + wrapped_vals_[i]->to>(); } } - return executorch::aten::ArrayRef< - executorch::aten::optional>{ + return executorch::aten::ArrayRef>{ unwrapped_vals_, wrapped_vals_.size()}; } } // namespace runtime diff --git a/runtime/core/evalue.h b/runtime/core/evalue.h index 500c2bc4f0e..6f1cc5f06db 100644 --- a/runtime/core/evalue.h +++ b/runtime/core/evalue.h @@ -77,9 +77,8 @@ class BoxedEvalueList { }; template <> -executorch::aten::ArrayRef> -BoxedEvalueList>::get() - const; +executorch::aten::ArrayRef> +BoxedEvalueList>::get() const; // Aggregate typing system similar to IValue only slimmed down with less // functionality, no dependencies on atomic, and fewer supported types to better @@ -102,7 +101,7 @@ struct EValue { executorch::aten::ArrayRef as_bool_list; BoxedEvalueList as_int_list; BoxedEvalueList as_tensor_list; - BoxedEvalueList> + BoxedEvalueList> as_list_optional_tensor; } copyable_union; @@ -290,9 +289,9 @@ struct EValue { return tag == Tag::String; } - executorch::aten::string_view toString() const { + std::string_view toString() const { ET_CHECK_MSG(isString(), "EValue is not a String."); - return executorch::aten::string_view( + return std::string_view( payload.copyable_union.as_string.data(), payload.copyable_union.as_string.size()); } @@ -357,7 +356,7 @@ struct EValue { /****** List Optional Tensor Type ******/ /*implicit*/ EValue( - BoxedEvalueList> t) + BoxedEvalueList> t) : tag(Tag::ListOptionalTensor) { payload.copyable_union.as_list_optional_tensor = t; } @@ -366,8 +365,7 @@ struct EValue { return tag == Tag::ListOptionalTensor; } - executorch::aten::ArrayRef< - executorch::aten::optional> + executorch::aten::ArrayRef> toListOptionalTensor() const { return payload.copyable_union.as_list_optional_tensor.get(); } @@ -413,7 +411,7 @@ struct EValue { * an uninitialized state. */ template - inline executorch::aten::optional toOptional() const { + inline std::optional toOptional() const { if (this->isNone()) { return executorch::aten::nullopt; } @@ -494,33 +492,33 @@ EVALUE_DEFINE_TO(executorch::aten::Scalar, toScalar) EVALUE_DEFINE_TO(int64_t, toInt) EVALUE_DEFINE_TO(bool, toBool) EVALUE_DEFINE_TO(double, toDouble) -EVALUE_DEFINE_TO(executorch::aten::string_view, toString) +EVALUE_DEFINE_TO(std::string_view, toString) EVALUE_DEFINE_TO(executorch::aten::ScalarType, toScalarType) EVALUE_DEFINE_TO(executorch::aten::MemoryFormat, toMemoryFormat) EVALUE_DEFINE_TO(executorch::aten::Layout, toLayout) EVALUE_DEFINE_TO(executorch::aten::Device, toDevice) // Tensor and Optional Tensor EVALUE_DEFINE_TO( - executorch::aten::optional, + std::optional, toOptional) EVALUE_DEFINE_TO(executorch::aten::Tensor, toTensor) // IntList and Optional IntList EVALUE_DEFINE_TO(executorch::aten::ArrayRef, toIntList) EVALUE_DEFINE_TO( - executorch::aten::optional>, + std::optional>, toOptional>) // DoubleList and Optional DoubleList EVALUE_DEFINE_TO(executorch::aten::ArrayRef, toDoubleList) EVALUE_DEFINE_TO( - executorch::aten::optional>, + std::optional>, toOptional>) // BoolList and Optional BoolList EVALUE_DEFINE_TO(executorch::aten::ArrayRef, toBoolList) EVALUE_DEFINE_TO( - executorch::aten::optional>, + std::optional>, toOptional>) // TensorList and Optional TensorList @@ -528,14 +526,12 @@ EVALUE_DEFINE_TO( executorch::aten::ArrayRef, toTensorList) EVALUE_DEFINE_TO( - executorch::aten::optional< - executorch::aten::ArrayRef>, + std::optional>, toOptional>) // List of Optional Tensor EVALUE_DEFINE_TO( - executorch::aten::ArrayRef< - executorch::aten::optional>, + executorch::aten::ArrayRef>, toListOptionalTensor) #undef EVALUE_DEFINE_TO diff --git a/runtime/core/test/evalue_test.cpp b/runtime/core/test/evalue_test.cpp index 996dc187dce..06cdc40ad98 100644 --- a/runtime/core/test/evalue_test.cpp +++ b/runtime/core/test/evalue_test.cpp @@ -102,7 +102,7 @@ TEST_F(EValueTest, ToOptionalInt) { EXPECT_TRUE(e.isInt()); EXPECT_FALSE(e.isNone()); - executorch::aten::optional o = e.toOptional(); + std::optional o = e.toOptional(); EXPECT_TRUE(o.has_value()); EXPECT_EQ(o.value(), 5); } @@ -111,7 +111,7 @@ TEST_F(EValueTest, NoneToOptionalInt) { EValue e; EXPECT_TRUE(e.isNone()); - executorch::aten::optional o = e.toOptional(); + std::optional o = e.toOptional(); EXPECT_FALSE(o.has_value()); } @@ -121,7 +121,7 @@ TEST_F(EValueTest, ToOptionalScalar) { EXPECT_TRUE(e.isScalar()); EXPECT_FALSE(e.isNone()); - executorch::aten::optional o = + std::optional o = e.toOptional(); EXPECT_TRUE(o.has_value()); EXPECT_TRUE(o.value().isFloatingPoint()); @@ -141,7 +141,7 @@ TEST_F(EValueTest, NoneToOptionalScalar) { EValue e; EXPECT_TRUE(e.isNone()); - executorch::aten::optional o = + std::optional o = e.toOptional(); EXPECT_FALSE(o.has_value()); } @@ -150,7 +150,7 @@ TEST_F(EValueTest, NoneToOptionalTensor) { EValue e; EXPECT_TRUE(e.isNone()); - executorch::aten::optional o = + std::optional o = e.toOptional(); EXPECT_FALSE(o.has_value()); } @@ -170,7 +170,7 @@ TEST_F(EValueTest, toString) { EXPECT_TRUE(e.isString()); EXPECT_FALSE(e.isNone()); - executorch::aten::string_view x = e.toString(); + std::string_view x = e.toString(); EXPECT_EQ(x, "foo"); } @@ -216,9 +216,9 @@ TEST_F(EValueTest, toOptionalTensorList) { // create list, empty evalue ctor gets tag::None EValue values[2] = {EValue(), EValue()}; EValue* values_p[2] = {&values[0], &values[1]}; - executorch::aten::optional storage[2]; + std::optional storage[2]; // wrap in array ref - BoxedEvalueList> a( + BoxedEvalueList> a( values_p, storage, 2); // create Evalue @@ -227,9 +227,8 @@ TEST_F(EValueTest, toOptionalTensorList) { EXPECT_TRUE(e.isListOptionalTensor()); // Convert back to list - executorch::aten::ArrayRef< - executorch::aten::optional> - x = e.toListOptionalTensor(); + executorch::aten::ArrayRef> x = + e.toListOptionalTensor(); EXPECT_EQ(x.size(), 2); EXPECT_FALSE(x[0].has_value()); EXPECT_FALSE(x[1].has_value()); diff --git a/runtime/executor/method.cpp b/runtime/executor/method.cpp index 7f4836a9e76..fe44f49e7e8 100644 --- a/runtime/executor/method.cpp +++ b/runtime/executor/method.cpp @@ -1606,8 +1606,7 @@ EValue& Method::mutable_input(size_t i) { return mutable_value(get_input_index(i)); } -Result Method::get_attribute( - executorch::aten::string_view name) { +Result Method::get_attribute(std::string_view name) { auto flatbuffer_values = serialization_plan_->values(); size_t counter = 0; diff --git a/runtime/executor/method.h b/runtime/executor/method.h index 0cf7164c98e..99a6aea439f 100644 --- a/runtime/executor/method.h +++ b/runtime/executor/method.h @@ -202,7 +202,7 @@ class Method final { * failure. */ ET_NODISCARD Result get_attribute( - executorch::aten::string_view name); + std::string_view name); /** * Execute the method. diff --git a/runtime/executor/method_meta.cpp b/runtime/executor/method_meta.cpp index fd980c64272..c284a0d82fb 100644 --- a/runtime/executor/method_meta.cpp +++ b/runtime/executor/method_meta.cpp @@ -82,7 +82,7 @@ TensorInfo::TensorInfo( Span dim_order, executorch::aten::ScalarType scalar_type, const bool is_memory_planned, - executorch::aten::string_view name) + std::string_view name) : sizes_(sizes), dim_order_(dim_order), name_(name), @@ -110,7 +110,7 @@ size_t TensorInfo::nbytes() const { return nbytes_; } -executorch::aten::string_view TensorInfo::name() const { +std::string_view TensorInfo::name() const { return name_; } @@ -168,8 +168,8 @@ Result MethodMeta::input_tensor_meta(size_t index) const { static_cast(tensor_value->scalar_type()), tensor_value->allocation_info() != nullptr || tensor_value->data_buffer_idx() != 0 /* is_memory_planned */, - executorch::aten::string_view{nullptr, 0}); // Count constant returns as - // memory planned. + std::string_view{nullptr, 0}); // Count constant returns as + // memory planned. } size_t MethodMeta::num_outputs() const { @@ -220,8 +220,8 @@ Result MethodMeta::output_tensor_meta(size_t index) const { static_cast(tensor_value->scalar_type()), tensor_value->allocation_info() != nullptr || tensor_value->data_buffer_idx() != 0 /* is_memory_planned */, - executorch::aten::string_view{nullptr, 0}); // Count constant returns as - // memory planned. + std::string_view{nullptr, 0}); // Count constant returns as + // memory planned. } size_t MethodMeta::num_attributes() const { @@ -265,7 +265,7 @@ Result MethodMeta::attribute_tensor_meta(size_t index) const { tensor_value->scalar_type()), tensor_value->allocation_info() != nullptr || tensor_value->data_buffer_idx() != 0 /* is_memory_planned */, - executorch::aten::string_view{t_name->c_str(), t_name->size()}); + std::string_view{t_name->c_str(), t_name->size()}); } ++counter; } diff --git a/runtime/executor/method_meta.h b/runtime/executor/method_meta.h index 3e71ebf1420..1b3be75ef17 100644 --- a/runtime/executor/method_meta.h +++ b/runtime/executor/method_meta.h @@ -70,7 +70,7 @@ class TensorInfo final { * Returns the fully qualified name of the Tensor might be empty if the tensor * is nameless. */ - executorch::aten::string_view name() const; + std::string_view name() const; private: // Let MethodMeta create TensorInfo. @@ -82,7 +82,7 @@ class TensorInfo final { Span dim_order, executorch::aten::ScalarType scalar_type, const bool is_memory_planned, - executorch::aten::string_view name); + std::string_view name); /** * The sizes of the tensor. @@ -101,7 +101,7 @@ class TensorInfo final { Span dim_order_; /// The fully qualified name of the Tensor. - executorch::aten::string_view name_; + std::string_view name_; /// The scalar type of the tensor. executorch::aten::ScalarType scalar_type_; diff --git a/runtime/executor/tensor_parser.h b/runtime/executor/tensor_parser.h index e2b5ff8d6ea..fae183ea6e4 100644 --- a/runtime/executor/tensor_parser.h +++ b/runtime/executor/tensor_parser.h @@ -56,8 +56,7 @@ ET_NODISCARD Error validateTensorLayout( // list of optionals: list of optional Tensor, list of optional float etc, so we // just use a template to avoid boilerplate. template -ET_NODISCARD Result>> -parseListOptionalType( +ET_NODISCARD Result>> parseListOptionalType( const flatbuffers::Vector* value_indices, EValue* values, size_t values_len, @@ -69,8 +68,8 @@ parseListOptionalType( } auto* optional_tensor_list = - memory_manager->method_allocator() - ->allocateList>(value_indices->size()); + memory_manager->method_allocator()->allocateList>( + value_indices->size()); if (optional_tensor_list == nullptr) { return Error::MemoryAllocationFailed; } @@ -87,7 +86,7 @@ parseListOptionalType( // copy assignment is not defined if its non trivial. if (index == -1) { new (&optional_tensor_list[output_idx]) - executorch::aten::optional(executorch::aten::nullopt); + std::optional(executorch::aten::nullopt); // no value to point to. BoxedEvalueList for optional tensor will convert // this to nullopt. // TODO(T161156879): do something less hacky here. @@ -99,12 +98,12 @@ parseListOptionalType( "Invalid value index %" PRId32 " for ListOptional", index); new (&optional_tensor_list[output_idx]) - executorch::aten::optional(values[index].toOptional()); + std::optional(values[index].toOptional()); evalp_list[output_idx] = &values[static_cast(index)]; } output_idx++; } - return BoxedEvalueList>( + return BoxedEvalueList>( evalp_list, optional_tensor_list, value_indices->size()); }