Skip to content

Commit 248ab1d

Browse files
author
morelos
committed
Update base for Update on "[ET-VK][Ops] dequantize_per_tensor.default test setup"
Creating dequantize_per_tensor testing framework along with a reference implementation for testing Differential Revision: [D76267054](https://our.internmc.facebook.com/intern/diff/D76267054/) [ghstack-poisoned]
2 parents 6ea38e1 + 4a14fdd commit 248ab1d

File tree

183 files changed

+1494
-980
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

183 files changed

+1494
-980
lines changed

.ci/scripts/test_llama.sh

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -156,8 +156,7 @@ cmake_install_executorch_libraries() {
156156
-DCMAKE_INSTALL_PREFIX=cmake-out \
157157
-DCMAKE_BUILD_TYPE="$CMAKE_BUILD_TYPE" \
158158
-DEXECUTORCH_BUILD_QNN="$QNN" \
159-
-DQNN_SDK_ROOT="$QNN_SDK_ROOT" \
160-
-Bcmake-out .
159+
-DQNN_SDK_ROOT="$QNN_SDK_ROOT"
161160
cmake --build cmake-out -j9 --target install --config "$CMAKE_BUILD_TYPE"
162161
}
163162

.ci/scripts/unittest-buck2.sh

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,9 @@ BUILDABLE_KERNELS_PRIM_OPS_TARGETS=$(buck2 query //kernels/prim_ops/... | grep -
2525
# //runtime/kernel/... is failing because //third-party:torchgen_files's shell script can't find python on PATH.
2626
# //runtime/test/... requires Python torch, which we don't have in our OSS buck setup.
2727
for op in "build" "test"; do
28-
buck2 $op $BUILDABLE_OPTIMIZED_OPS //kernels/portable/... \
28+
buck2 $op $BUILDABLE_OPTIMIZED_OPS \
29+
//examples/selective_build:select_all_dtype_selective_lib_portable_lib \
30+
//kernels/portable/... \
2931
$BUILDABLE_KERNELS_PRIM_OPS_TARGETS //runtime/backend/... //runtime/core/... \
3032
//runtime/executor: //runtime/kernel/... //runtime/platform/...
3133
done

CMakeLists.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -554,6 +554,10 @@ if(EXECUTORCH_BUILD_PTHREADPOOL AND EXECUTORCH_BUILD_CPUINFO)
554554
endif()
555555

556556
if(EXECUTORCH_BUILD_PYBIND)
557+
558+
# Add codegen tools subdirectory for selective_build pybind module
559+
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/codegen/tools)
560+
557561
if(NOT EXECUTORCH_BUILD_EXTENSION_DATA_LOADER)
558562
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/data_loader)
559563
endif()

backends/cadence/fusion_g3/operators/op_clamp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,13 +21,13 @@
2121
#include <executorch/kernels/portable/cpu/util/math_util.h>
2222
#include <executorch/runtime/kernel/kernel_includes.h>
2323

24-
using ::executorch::aten::optional;
2524
using ::executorch::aten::Scalar;
2625
using ::executorch::aten::ScalarType;
2726
using ::executorch::aten::Tensor;
2827
using ::executorch::runtime::canCast;
2928
using ::executorch::runtime::Error;
3029
using ::executorch::runtime::KernelRuntimeContext;
30+
using std::optional;
3131

3232
namespace cadence {
3333
namespace impl {

backends/cadence/fusion_g3/operators/op_dequantize.cpp

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ using ::executorch::runtime::Error;
2424
using ::executorch::runtime::KernelRuntimeContext;
2525

2626
template <typename T>
27-
using optional = ::executorch::aten::optional<T>;
27+
using optional = std::optional<T>;
2828
/* ScalarType in Executorch do not have support for below data types.
2929
* So, creating a placeholder for these data types. Once, ScalarTypes is
3030
* updated to have support for below data types, these can be removed and
@@ -51,7 +51,7 @@ void check_dequantize_per_tensor_args(
5151
int64_t quant_min,
5252
int64_t quant_max,
5353
ScalarType dtype,
54-
::executorch::aten::optional<ScalarType>& out_dtype,
54+
std::optional<ScalarType>& out_dtype,
5555
Tensor& out) {
5656
ET_CHECK_MSG(
5757
input.scalar_type() == ScalarType::Byte ||
@@ -93,7 +93,7 @@ Tensor& dequantize_impl(
9393
float* scale_data,
9494
int* zero_point_data,
9595
int* axis,
96-
::executorch::aten::optional<ScalarType> out_dtype) {
96+
std::optional<ScalarType> out_dtype) {
9797
const ::executorch::aten::ArrayRef<Tensor::SizesType> input_size =
9898
input.sizes();
9999

@@ -260,8 +260,8 @@ Tensor& dequantize_impl(
260260
}
261261
}
262262

263-
::executorch::aten::optional<::executorch::aten::ArrayRef<int64_t>>
264-
optional_dim_list{::executorch::aten::ArrayRef<int64_t>{
263+
std::optional<::executorch::aten::ArrayRef<int64_t>> optional_dim_list{
264+
::executorch::aten::ArrayRef<int64_t>{
265265
dims, size_t(input.dim() - 1)}};
266266

267267
// Actual dequantization logic
@@ -466,8 +466,8 @@ Tensor& dequantize_impl(
466466
}
467467
}
468468

469-
::executorch::aten::optional<::executorch::aten::ArrayRef<int64_t>>
470-
optional_dim_list{::executorch::aten::ArrayRef<int64_t>{
469+
std::optional<::executorch::aten::ArrayRef<int64_t>> optional_dim_list{
470+
::executorch::aten::ArrayRef<int64_t>{
471471
dims, size_t(input.dim() - 1)}};
472472

473473
// Actual dequantization logic
@@ -600,7 +600,7 @@ Tensor& dequantize_per_tensor_tensor_args_out(
600600
int64_t quant_min,
601601
int64_t quant_max,
602602
ScalarType dtype,
603-
::executorch::aten::optional<ScalarType> out_dtype,
603+
std::optional<ScalarType> out_dtype,
604604
Tensor& out) {
605605
#ifdef OP_ARG_CHECK
606606
ET_CHECK_MSG(
@@ -639,12 +639,12 @@ Tensor& dequantize_per_channel_out(
639639
KernelRuntimeContext& context,
640640
const Tensor& input,
641641
const Tensor& scale,
642-
const ::executorch::aten::optional<Tensor>& opt_zero_points,
642+
const std::optional<Tensor>& opt_zero_points,
643643
int64_t axis,
644644
int64_t quant_min,
645645
int64_t quant_max,
646646
ScalarType dtype,
647-
::executorch::aten::optional<ScalarType> out_dtype,
647+
std::optional<ScalarType> out_dtype,
648648
Tensor& out) {
649649
if (axis < 0) {
650650
axis += executorch::runtime::nonzero_dim(input);

backends/cadence/fusion_g3/operators/op_div.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,14 +19,14 @@
1919
#include <executorch/runtime/kernel/kernel_includes.h>
2020
#include <executorch/runtime/platform/assert.h>
2121

22-
using ::executorch::aten::optional;
2322
using ::executorch::aten::Scalar;
2423
using ::executorch::aten::ScalarType;
25-
using ::executorch::aten::string_view;
2624
using ::executorch::aten::Tensor;
2725
using ::executorch::runtime::canCast;
2826
using ::executorch::runtime::Error;
2927
using ::executorch::runtime::KernelRuntimeContext;
28+
using std::optional;
29+
using std::string_view;
3030

3131
namespace cadence {
3232
namespace impl {
@@ -686,4 +686,4 @@ Tensor& div_scalar_mode_out(
686686
} // namespace native
687687
} // namespace G3
688688
} // namespace impl
689-
} // namespace cadence
689+
} // namespace cadence

backends/cadence/fusion_g3/operators/op_mean.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,11 @@
1717
#include <executorch/runtime/platform/assert.h>
1818

1919
using ::executorch::aten::ArrayRef;
20-
using ::executorch::aten::optional;
2120
using ::executorch::aten::ScalarType;
2221
using ::executorch::aten::Tensor;
2322
using ::executorch::runtime::Error;
2423
using ::executorch::runtime::KernelRuntimeContext;
24+
using std::optional;
2525

2626
namespace cadence {
2727
namespace impl {

backends/cadence/fusion_g3/operators/op_native_layer_norm.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,11 @@
1919
#include <executorch/runtime/kernel/kernel_includes.h>
2020

2121
using ::executorch::aten::IntArrayRef;
22-
using ::executorch::aten::optional;
2322
using ::executorch::aten::ScalarType;
2423
using ::executorch::aten::Tensor;
2524
using ::executorch::runtime::Error;
2625
using ::executorch::runtime::KernelRuntimeContext;
26+
using std::optional;
2727

2828
namespace cadence {
2929
namespace impl {

backends/cadence/fusion_g3/operators/op_quantize.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -329,8 +329,8 @@ Tensor& quantize_impl(
329329
}
330330
}
331331

332-
::executorch::aten::optional<::executorch::aten::ArrayRef<int64_t>>
333-
optional_dim_list{::executorch::aten::ArrayRef<int64_t>{
332+
std::optional<::executorch::aten::ArrayRef<int64_t>> optional_dim_list{
333+
::executorch::aten::ArrayRef<int64_t>{
334334
dims, size_t(input.dim() - 1)}};
335335

336336
// Actual quantization logic
@@ -534,8 +534,8 @@ Tensor& quantize_impl(
534534
}
535535
}
536536

537-
::executorch::aten::optional<::executorch::aten::ArrayRef<int64_t>>
538-
optional_dim_list{::executorch::aten::ArrayRef<int64_t>{
537+
std::optional<::executorch::aten::ArrayRef<int64_t>> optional_dim_list{
538+
::executorch::aten::ArrayRef<int64_t>{
539539
dims, size_t(input.dim() - 1)}};
540540

541541
// Actual quantization logic

backends/cadence/fusion_g3/operators/op_slice_copy.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,8 @@ Tensor& slice_copy_Tensor_out(
3737
KernelRuntimeContext& ctx,
3838
const Tensor& in,
3939
int64_t dim,
40-
::executorch::aten::optional<int64_t> start_val,
41-
::executorch::aten::optional<int64_t> end_val,
40+
std::optional<int64_t> start_val,
41+
std::optional<int64_t> end_val,
4242
int64_t step,
4343
Tensor& out) {
4444
(void)ctx;

0 commit comments

Comments
 (0)