Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,9 @@
[submodule "third-party/pybind11"]
path = third-party/pybind11
url = https://github.com/pybind/pybind11.git
[submodule "backends/cadence/fusion_g3/third-party/nnlib/nnlib-FusionG3"]
path = backends/cadence/fusion_g3/third-party/nnlib/nnlib-FusionG3
url = https://github.com/foss-xtensa/nnlib-FusionG3/
[submodule "third-party/ao"]
path = third-party/ao
url = https://github.com/pytorch/ao.git
7 changes: 6 additions & 1 deletion backends/cadence/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,12 @@ endif()
if(EXECUTORCH_NNLIB_OPT)
set(TARGET_DIR hifi)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/${TARGET_DIR}/third-party/nnlib)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/${TARGET_DIR}/kernels)
endif()

if(EXECUTORCH_FUSION_G3_OPT)
set(TARGET_DIR fusion_g3)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/${TARGET_DIR}/third-party/nnlib)
endif()

add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/${TARGET_DIR}/operators)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/${TARGET_DIR}/kernels)
118 changes: 118 additions & 0 deletions backends/cadence/aot/functions_fusion_g3.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please change the kernel_name here to match cadence::impl::G3::native::OP_NAME. For example: cadence::impl::G3::native::add_out

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Updated the operator name as cadence::impl::G3::add_out. native is not explicitly mentioned.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

With the change to operator namespace, I think this would now fail to compile. Can you please check on your end?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We are able to compile and verify the operator with toy model. Are you having any issue with this name space?

#
# This yaml file contains operators that are also defined by the ATen library.
# For lean mode:
# - Codegen'd target `executorch_generated_lib` will be reading all the information
# from this file, including operator schema and kernel metadata.
# - Selective build target `codegen:executorch_defined_ops` now is selecting all the
# operators in this file, by dumping all the op names into `selected_operators.yaml`.
#
# See the README.md file in executorch/kernels/portable for a description of the syntax used
# by this file.


# aten ops
- op: _to_copy.out
kernels:
- arg_meta: null
kernel_name: torch::executor::to_copy_out

- op: _softmax.out
kernels:
- arg_meta: null
kernel_name: cadence::impl::G3::softmax_out
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

need to be _softmax to be consistent, fixed in #7061


- op: add.out
kernels:
- arg_meta: null
kernel_name: cadence::impl::G3::add_out
Comment on lines +25 to +28
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@ckmadhira the name used here (kernel_name: cadence::impl::G3::add_out) does not match the actual kernel in op_add.cpp (cadence::impl::G3::native::add_out). Is this compiling on your end?

Copy link
Contributor Author

@ckmadhira ckmadhira Nov 21, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This got compiled for us and we are able to test it. We should not add "native" in the functions_fusion_g3.yaml file. The word "native" implicitly gets appended. If we try to add "native", we get build errors saying, native is added twice.


- op: add.Scalar_out
kernels:
- arg_meta: null
kernel_name: cadence::impl::G3::add_scalar_out

- op: bmm.out
kernels:
- arg_meta: null
kernel_name: torch::executor::bmm_out

- op: cat.out
kernels:
- arg_meta: null
kernel_name: cadence::impl::G3::cat_out

- op: clone.out
kernels:
- arg_meta: null
kernel_name: torch::executor::clone_out

- op: div.out
kernels:
- arg_meta: null
kernel_name: torch::executor::div_out

- op: div.out_mode
kernels:
- arg_meta: null
kernel_name: torch::executor::div_out_mode

- op: embedding.out
kernels:
- arg_meta: null
kernel_name: torch::executor::embedding_out

- op: full.out
kernels:
- arg_meta: null
kernel_name: torch::executor::full_out

- op: mul.out
kernels:
- arg_meta: null
kernel_name: cadence::impl::G3::mul_out

- op: mul.Scalar_out
kernels:
- arg_meta: null
kernel_name: cadence::impl::G3::mul_scalar_out

- op: permute_copy.out
kernels:
- arg_meta: null
kernel_name: torch::executor::permute_copy_out

- op: sigmoid.out
kernels:
- arg_meta: null
kernel_name: torch::executor::sigmoid_out

- op: slice_copy.Tensor_out
kernels:
- arg_meta: null
kernel_name: torch::executor::slice_copy_Tensor_out

- op: split_with_sizes_copy.out
kernels:
- arg_meta: null
kernel_name: torch::executor::split_with_sizes_copy_out

- op: sub.out
kernels:
- arg_meta: null
kernel_name: torch::executor::sub_out

- op: view_copy.out
kernels:
- arg_meta: null
kernel_name: torch::executor::view_copy_out

- op: where.self_out
kernels:
- arg_meta: null
kernel_name: torch::executor::where_out

- op: native_layer_norm.out
kernels:
- arg_meta: null
kernel_name: cadence::impl::G3::native_layer_norm_out
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

missing quant dequant per tensor func definition. fixed in #7061

85 changes: 85 additions & 0 deletions backends/cadence/fusion_g3/operators/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

cmake_minimum_required(VERSION 3.19)

set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
if(NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 17)
endif()

include(${EXECUTORCH_ROOT}/build/Utils.cmake)
include(${EXECUTORCH_ROOT}/build/Codegen.cmake)

if(NOT PYTHON_EXECUTABLE)
resolve_python_executable()
endif()

# ATen compliant ops that are needed to run this model.
set(_aten_ops__srcs
"${EXECUTORCH_ROOT}/kernels/portable/cpu/util/activation_ops_util.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/util/copy_ops_util.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/util/broadcast_util.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/util/index_util.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/util/kernel_ops_util.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/util/matmul_ops_util.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/util/reduce_util.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/util/repeat_util.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/util/slice_util.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/op_add.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/op_mul.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/op_cat.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/op_softmax.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/op_native_layer_norm.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/op_quantize.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/op_dequantize.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_bmm.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_clone.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_div.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_embedding.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_full.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_permute_copy.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_sigmoid.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_slice_copy.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_split_with_sizes_copy.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_sub.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_to_copy.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_view_copy.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/op_where.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/util/dtype_util.cpp"
"${EXECUTORCH_ROOT}/kernels/portable/cpu/util/normalization_ops_util.cpp"
)
add_library(aten_ops_cadence ${_aten_ops__srcs})
target_link_libraries(aten_ops_cadence PUBLIC executorch)
target_link_libraries(aten_ops_cadence PRIVATE xa_nnlib)

# Let files say "include <executorch/path/to/header.h>".
set(_common_include_directories ${EXECUTORCH_ROOT}/..)

target_include_directories(
aten_ops_cadence PUBLIC ${ROOT_DIR}/.. ${CMAKE_BINARY_DIR}
${_common_include_directories}
${EXECUTORCH_ROOT}/backends/cadence/fusion_g3/third-party/nnlib/nnlib-FusionG3/xa_nnlib/algo/common/include/
${EXECUTORCH_ROOT}/backends/cadence/fusion_g3/third-party/nnlib/nnlib-FusionG3/xa_nnlib/include/nnlib
${EXECUTORCH_ROOT}/backends/cadence/fusion_g3/third-party/nnlib/nnlib-FusionG3/xa_nnlib/include
${EXECUTORCH_ROOT}/backends/cadence/fusion_g3/third-party/nnlib/nnlib-FusionG3/xa_nnlib/algo/kernels/tables/include
)

# Generate C++ bindings to register kernels into both PyTorch (for AOT) and
# Executorch (for runtime). Here select all ops in functions.yaml
gen_selected_ops(
LIB_NAME "cadence_ops_lib" OPS_SCHEMA_YAML
"${CMAKE_CURRENT_LIST_DIR}/../../aot/functions_fusion_g3.yaml" "" ""
)
generate_bindings_for_kernels(
LIB_NAME "cadence_ops_lib" OPS_SCHEMA_YAML FUNCTIONS_YAML
${CMAKE_CURRENT_SOURCE_DIR}/../../aot/functions_fusion_g3.yaml
)
message("Generated files ${gen_command_sources}")

gen_operators_lib(
LIB_NAME "cadence_ops_lib" KERNEL_LIBS DEPS aten_ops_cadence
)
Loading
Loading