From e6c76666b2e81f768d97c08ddf4699c019737000 Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Tue, 8 Jul 2025 15:14:13 -0700 Subject: [PATCH 01/26] README and CMake changes --- CMakeLists.txt | 10 ++++++ examples/wasm/README.md | 72 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+) create mode 100644 examples/wasm/README.md diff --git a/CMakeLists.txt b/CMakeLists.txt index d3c81fd1b38..3f6133af74f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -726,6 +726,16 @@ if(EXECUTORCH_BUILD_EXECUTOR_RUNNER) endif() target_link_libraries(executor_runner ${_executor_runner_libs}) target_compile_options(executor_runner PUBLIC ${_common_compile_options}) + + if(EMSCRIPTEN) + # Directory of model pte files to embed in the wasm binary. + if(NOT DEFINED WASM_MODEL_DIR) + set(WASM_MODEL_DIR "${CMAKE_SOURCE_DIR}/models/") + endif() + + set(CMAKE_EXECUTABLE_SUFFIX ".html") + target_link_options(executor_runner PUBLIC -sALLOW_MEMORY_GROWTH --embed-file "${WASM_MODEL_DIR}@/") + endif() endif() if(EXECUTORCH_BUILD_VULKAN) diff --git a/examples/wasm/README.md b/examples/wasm/README.md new file mode 100644 index 00000000000..ecc65896920 --- /dev/null +++ b/examples/wasm/README.md @@ -0,0 +1,72 @@ +# ExecuTorch Wasm Build + +This guide describes how to build ExecuTorch for WebAssembly (Wasm). + +## Directory Structure + +``` +examples/wasm +└── README.md # This file +``` + +## Prerequisites + +- [emscripten](https://emscripten.org/docs/getting_started/Tutorial.html) +- [Node.js](https://nodejs.org/en/) (Optional) + +## Generate Models + +JavaScript does not have access to the filesystem. To load a model, it needs to be preloaded or embedded into the virtual filesystem. In this example, models in the `./models/` directory are embedded by default. We will then build `executorch_runner` in Wasm. + +1. Following the setup guide in [Setting up ExecuTorch](https://pytorch.org/executorch/main/getting-started-setup) +you should be able to get the basic development environment for ExecuTorch working. + +2. Using the script `portable/scripts/export.py` generate a model binary file by selecting a +model name from the list of available models in the `examples/models` dir. + +```bash +cd executorch # To the top level dir + +mkdir models + +# To get a list of example models +python3 -m examples.portable.script.export -h + +# To generate a specific pte model into the models/ directory +python3 -m examples.portable.scripts.export --model_name="mv2" --output_dir="models/" # for MobileNetv2 + +# This should generate ./models/mv2.pte file, if successful. +``` + +Use -h (or --help) to see all the supported models. For the browser example, make sure you have a model with the file name `model.pte` in the `./models/` directory. + +3. Once we have the model binaries (.pte) in `./models/`, we can build `executor_runner` in Wasm with Emscripten. When calling `emcmake cmake`, you can pass the `-DWASM_MODEL_DIR=` option to specify the directory containing the model files instead of `./models/`. + +```bash +./install_executorch.sh --clean +(mkdir cmake-out-wasm \ + && cd cmake-out-wasm \ + && emcmake cmake -DEXECUTORCH_PAL_DEFAULT=posix ..) \ + && cmake --build cmake-out-wasm -j32 --target executor_runner +``` + +If you need to rebuild `executor_runner` after modifying the contents of `./models/`, you can run the following command + +```bash +cmake --build cmake-out-wasm -j32 --target executor_runner --clean-first +``` + +4. Run the model with Node.js. + +```bash +# Run the tool on the generated model. +node cmake-out-wasm/executor_runner.js --model_path mv2.pte +``` + +5. You can also run the model in the browser. Note that you cannot pass command line arguments to the browser version of the tool. By default, the program will load the model `model.pte` and run it. Several browsers do not support `file://` XHR requests to load the Wasm file. To get around this, you can use a local web server. For example, with Python: + +```bash +python3 -m http.server --directory cmake-out-wasm +``` + +The page will be available at http://localhost:8000/executor_runner.html. From b322cb84e4cf45c61c83560f03d02a1180ea3c83 Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Tue, 8 Jul 2025 17:21:20 -0700 Subject: [PATCH 02/26] Added build test script for wasm example --- examples/wasm/test_build_wasm.sh | 37 ++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 examples/wasm/test_build_wasm.sh diff --git a/examples/wasm/test_build_wasm.sh b/examples/wasm/test_build_wasm.sh new file mode 100644 index 00000000000..87ef91ce4b2 --- /dev/null +++ b/examples/wasm/test_build_wasm.sh @@ -0,0 +1,37 @@ + +set -e + +source "$(dirname "${BASH_SOURCE[0]}")/../../.ci/scripts/utils.sh" + +test_build_wasm() { + local model_name=$1 + local model_export_name="${model_name}.pte" + local model_dir_name="./models_test/" + echo "Exporting ${model_name}" + mkdir -p "${model_dir_name}" + ${PYTHON_EXECUTABLE} -m examples.portable.scripts.export --model_name="${model_name}" --output_dir="$model_dir_name" + + local example_dir=examples/wasm + local build_dir=cmake-out/${example_dir} + rm -rf ${build_dir} + retry emcmake cmake -DWASM_MODEL_DIR="$(realpath "${model_dir_name}")" -B${build_dir} . + + echo "Building ${example_dir}" + cmake --build ${build_dir} -j9 --target executor_runner + + echo "Removing ${model_dir_name}" + rm -rf "${model_dir_name}" + + echo 'Running wasm build test' + node ${build_dir}/executor_runner.js --model_path="${model_export_name}" +} + +if [[ -z $PYTHON_EXECUTABLE ]]; +then + PYTHON_EXECUTABLE=python3 +fi + +cmake_install_executorch_lib + +test_build_wasm add_mul +test_build_wasm mv2 From 43561d7a8a3b25af97eb705d35f4aae19169b20f Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Wed, 9 Jul 2025 11:15:43 -0700 Subject: [PATCH 03/26] Added CI tests --- .ci/scripts/setup-emscripten.sh | 20 ++++++++++++++++++++ .github/workflows/pull.yml | 28 ++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100644 .ci/scripts/setup-emscripten.sh diff --git a/.ci/scripts/setup-emscripten.sh b/.ci/scripts/setup-emscripten.sh new file mode 100644 index 00000000000..6ecdad37007 --- /dev/null +++ b/.ci/scripts/setup-emscripten.sh @@ -0,0 +1,20 @@ + +set -ex + +install_nodejs() { + curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.3/install.sh | bash + \. "$HOME/.nvm/nvm.sh" + nvm install 22 +} + +install_emscripten() { + git clone https://github.com/emscripten-core/emsdk.git + pushd emsdk || return + ./emsdk install 4.0.10 + ./emsdk activate 4.0.10 + source ./emsdk_env.sh + popd || return +} + +install_nodejs +install_emscripten diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml index df254b7f409..02903918fe5 100644 --- a/.github/workflows/pull.yml +++ b/.github/workflows/pull.yml @@ -734,3 +734,31 @@ jobs: PYTHON_EXECUTABLE=python bash .ci/scripts/setup-openvino.sh PYTHON_EXECUTABLE=python bash .ci/scripts/test_openvino.sh + + test-build-wasm-linux: + name: test-build-wasm-linux + uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main + permissions: + id-token: write + contents: read + strategy: + fail-fast: false + with: + runner: linux.2xlarge + docker-image: executorch-ubuntu-22.04-clang12 + submodules: 'recursive' + ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + timeout: 90 + script: | + # The generic Linux job chooses to use base env, not the one setup by the image + CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") + conda activate "${CONDA_ENV}" + + BUILD_TOOL="cmake" + PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool "${BUILD_TOOL}" + + # Install Node.js and Emscripten + source .ci/scripts/setup-emscripten.sh + + # Test selective build + PYTHON_EXECUTABLE=python bash examples/wasm/test_build_wasm.sh From d19c6b497d0f496fcf05588af970b260a710b8c0 Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Wed, 9 Jul 2025 15:29:17 -0700 Subject: [PATCH 04/26] Emscripten automatically installs node.js --- examples/wasm/README.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/examples/wasm/README.md b/examples/wasm/README.md index ecc65896920..cb0395d3376 100644 --- a/examples/wasm/README.md +++ b/examples/wasm/README.md @@ -12,11 +12,10 @@ examples/wasm ## Prerequisites - [emscripten](https://emscripten.org/docs/getting_started/Tutorial.html) -- [Node.js](https://nodejs.org/en/) (Optional) ## Generate Models -JavaScript does not have access to the filesystem. To load a model, it needs to be preloaded or embedded into the virtual filesystem. In this example, models in the `./models/` directory are embedded by default. We will then build `executorch_runner` in Wasm. +JavaScript does not have direct access to the host file system. To load a model, it needs to be preloaded or embedded into the virtual file system. In this example, models in the `./models/` directory are embedded by default. We will then build `executorch_runner` in Wasm. 1. Following the setup guide in [Setting up ExecuTorch](https://pytorch.org/executorch/main/getting-started-setup) you should be able to get the basic development environment for ExecuTorch working. @@ -56,7 +55,7 @@ If you need to rebuild `executor_runner` after modifying the contents of `./mode cmake --build cmake-out-wasm -j32 --target executor_runner --clean-first ``` -4. Run the model with Node.js. +4. Run the model with Node.js (automatically installed with Emscripten). ```bash # Run the tool on the generated model. From 4b0e1afd2d21dd99254b430566bc3605a1492c2c Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Thu, 10 Jul 2025 13:24:50 -0700 Subject: [PATCH 05/26] Use Emscripten provided version of Node.js --- .ci/scripts/setup-emscripten.sh | 7 ------- examples/wasm/README.md | 11 ++--------- examples/wasm/test_build_wasm.sh | 2 +- 3 files changed, 3 insertions(+), 17 deletions(-) diff --git a/.ci/scripts/setup-emscripten.sh b/.ci/scripts/setup-emscripten.sh index 6ecdad37007..637f3cbda0d 100644 --- a/.ci/scripts/setup-emscripten.sh +++ b/.ci/scripts/setup-emscripten.sh @@ -1,12 +1,6 @@ set -ex -install_nodejs() { - curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.3/install.sh | bash - \. "$HOME/.nvm/nvm.sh" - nvm install 22 -} - install_emscripten() { git clone https://github.com/emscripten-core/emsdk.git pushd emsdk || return @@ -16,5 +10,4 @@ install_emscripten() { popd || return } -install_nodejs install_emscripten diff --git a/examples/wasm/README.md b/examples/wasm/README.md index cb0395d3376..f9eec78984e 100644 --- a/examples/wasm/README.md +++ b/examples/wasm/README.md @@ -2,16 +2,9 @@ This guide describes how to build ExecuTorch for WebAssembly (Wasm). -## Directory Structure - -``` -examples/wasm -└── README.md # This file -``` - ## Prerequisites -- [emscripten](https://emscripten.org/docs/getting_started/Tutorial.html) +- [Emscripten](https://emscripten.org/docs/getting_started/Tutorial.html) ## Generate Models @@ -55,7 +48,7 @@ If you need to rebuild `executor_runner` after modifying the contents of `./mode cmake --build cmake-out-wasm -j32 --target executor_runner --clean-first ``` -4. Run the model with Node.js (automatically installed with Emscripten). +4. Run the model with Node.js. Emscripten should come preinstalled with a compatible version of Node.js. If you have an incompatible version of Node.js installed, you can use the Emscripten-provided version by running `$EMSDK_NODE` instead of `node`. ```bash # Run the tool on the generated model. diff --git a/examples/wasm/test_build_wasm.sh b/examples/wasm/test_build_wasm.sh index 87ef91ce4b2..8218a0ebb9b 100644 --- a/examples/wasm/test_build_wasm.sh +++ b/examples/wasm/test_build_wasm.sh @@ -23,7 +23,7 @@ test_build_wasm() { rm -rf "${model_dir_name}" echo 'Running wasm build test' - node ${build_dir}/executor_runner.js --model_path="${model_export_name}" + $EMSDK_NODE ${build_dir}/executor_runner.js --model_path="${model_export_name}" } if [[ -z $PYTHON_EXECUTABLE ]]; From a21942d31585f434ef2d3bbff6da20f243d07d7a Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Thu, 10 Jul 2025 16:49:06 -0700 Subject: [PATCH 06/26] Applied suggestions --- CMakeLists.txt | 1 + examples/wasm/README.md | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3f6133af74f..5071cca55b4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -727,6 +727,7 @@ if(EXECUTORCH_BUILD_EXECUTOR_RUNNER) target_link_libraries(executor_runner ${_executor_runner_libs}) target_compile_options(executor_runner PUBLIC ${_common_compile_options}) + # Automatically set when using `emcmake cmake` for Wasm build. if(EMSCRIPTEN) # Directory of model pte files to embed in the wasm binary. if(NOT DEFINED WASM_MODEL_DIR) diff --git a/examples/wasm/README.md b/examples/wasm/README.md index f9eec78984e..15ce07493d1 100644 --- a/examples/wasm/README.md +++ b/examples/wasm/README.md @@ -2,6 +2,18 @@ This guide describes how to build ExecuTorch for WebAssembly (Wasm). +## Quick Start + +To quickly test the build, you can run the following commands + +```bash +cd executorch # To the top level dir + +source .ci/scripts/setup-emscripten.sh # Install Emscripten and set up the environment variables + +bash examples/wasm/test_build_wasm.sh # Run the test build script +``` + ## Prerequisites - [Emscripten](https://emscripten.org/docs/getting_started/Tutorial.html) @@ -62,3 +74,24 @@ python3 -m http.server --directory cmake-out-wasm ``` The page will be available at http://localhost:8000/executor_runner.html. + +## Common Issues + +### CompileError: WebAssembly.instantiate() [...] failed: expected table index 0... + +This seems to be an issue with Node.js v16. Emscripten should come preinstalled with a compatible version of Node.js. You can use the Emscripten-provided version by running `$EMSDK_NODE` instead of `node`. + +```bash +echo $EMSDK_NODE +.../emsdk/node/22.16.0_64bit/bin/node # example output +``` + +### Failed to open [...]: No such file or directory (44) + +The file may not have been present while building the Wasm binary. You can rebuild with the following command + +```bash +cmake --build cmake-out-wasm -j32 --target executor_runner --clean-first +``` + +The path may also be incorrect. The files in the `WASM_MODEL_DIR` are placed into the root directory of the virtual file system, so you would use `--model_path mv2.pte` instead of `--model_path models/mv2.pte`, for example. From 5378272e83faecca17ef6589727b3c20a9a3222c Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Tue, 15 Jul 2025 18:14:42 -0700 Subject: [PATCH 07/26] Module test working --- CMakeLists.txt | 4 + extension/wasm/CMakeLists.txt | 47 ++++ extension/wasm/build_wasm.sh | 12 + extension/wasm/test_module.js | 27 +++ extension/wasm/wasm_bindings.cpp | 392 +++++++++++++++++++++++++++++++ 5 files changed, 482 insertions(+) create mode 100644 extension/wasm/CMakeLists.txt create mode 100644 extension/wasm/build_wasm.sh create mode 100644 extension/wasm/test_module.js create mode 100644 extension/wasm/wasm_bindings.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index afa745478a9..f1e73d1f3fb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -673,6 +673,10 @@ if(EXECUTORCH_BUILD_PYBIND) ) endif() +if(EXECUTORCH_BUILD_WASM) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/wasm) +endif() + if(EXECUTORCH_BUILD_EXTENSION_TRAINING) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/training) endif() diff --git a/extension/wasm/CMakeLists.txt b/extension/wasm/CMakeLists.txt new file mode 100644 index 00000000000..b7f8d6920c7 --- /dev/null +++ b/extension/wasm/CMakeLists.txt @@ -0,0 +1,47 @@ + +cmake_minimum_required(VERSION 3.24) # 3.24 is required for WHOLE_ARCHIVE + +project(executorch_wasm) + +if(NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 17) +endif() + +if(NOT EMSCRIPTEN) + message(FATAL_ERROR "Emscripten is required to build this target") +endif() + +# Source root directory for executorch. +if(NOT EXECUTORCH_ROOT) + set(EXECUTORCH_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../..) +endif() + +include(${EXECUTORCH_ROOT}/tools/cmake/Utils.cmake) +set(_common_compile_options -Wno-deprecated-declarations -fPIC) +set(_common_include_directories ${EXECUTORCH_ROOT}/..) + +set(link_libraries) +list( + APPEND + link_libraries + embind + executorch_core + extension_data_loader + portable_ops_lib + extension_module_static + extension_tensor + extension_runner_util +) + +add_executable(executorch_wasm wasm_bindings.cpp) + +target_compile_options(executorch_wasm PUBLIC ${_common_compile_options}) +target_include_directories(executorch_wasm PUBLIC ${_common_include_directories}) +target_link_libraries(executorch_wasm PUBLIC ${link_libraries}) +target_link_options(executorch_wasm PUBLIC -sALLOW_MEMORY_GROWTH=1 --embed-file=${CMAKE_CURRENT_SOURCE_DIR}/model.pte@model.pte) + +add_custom_target(executorch_wasm_test ALL + COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/test_module.js ${CMAKE_CURRENT_BINARY_DIR}/test_module.js + DEPENDS executorch_wasm + COMMENT "Copying test_module.js to build output directory" +) diff --git a/extension/wasm/build_wasm.sh b/extension/wasm/build_wasm.sh new file mode 100644 index 00000000000..e40499530fc --- /dev/null +++ b/extension/wasm/build_wasm.sh @@ -0,0 +1,12 @@ + +cd "$(dirname "${BASH_SOURCE[0]}")/../../" +mkdir -p cmake-out-wasm +cd cmake-out-wasm +emcmake cmake -DEXECUTORCH_BUILD_WASM=ON \ + -DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \ + -DEXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR=ON \ + -DEXECUTORCH_BUILD_DEVTOOLS=ON \ + -DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \ + -DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \ + -DEXECUTORCH_BUILD_EXTENSION_RUNNER_UTIL=ON .. +make executorch_wasm_test -j32 diff --git a/extension/wasm/test_module.js b/extension/wasm/test_module.js new file mode 100644 index 00000000000..f64c09f28d4 --- /dev/null +++ b/extension/wasm/test_module.js @@ -0,0 +1,27 @@ + +const et = require("./executorch_wasm"); +et.onRuntimeInitialized = () => { + module = et.Module.load("model.pte"); + var methods = module.getMethods(); + console.log(methods); + var method = methods[0]; + var methodMeta = module.getMethodMeta(method); + var inputs = []; + for (var i = 0; i < methodMeta.numInputs; i++) { + var tensor = et.FloatTensor.ones(methodMeta.inputTensorMeta(i).sizes); + console.log("input", i, tensor.getData(), tensor.getSizes()); + inputs.push(tensor); + } + var output = module.execute(method, inputs); + + for (var i = 0; i < inputs.length; i++) { + inputs[i].delete(); + } + + for (var i = 0; i < output.length; i++) { + console.log("output", i, output[i].getData(), output[i].getSizes()); + output[i].delete(); + } + + module.delete(); +} diff --git a/extension/wasm/wasm_bindings.cpp b/extension/wasm/wasm_bindings.cpp new file mode 100644 index 00000000000..f4611f921bd --- /dev/null +++ b/extension/wasm/wasm_bindings.cpp @@ -0,0 +1,392 @@ + +#include +#include +#include +#include + +#define THROW_JS_ERROR(errorType, message, ...) \ + ({ \ + char msg_buf[128]; \ + snprintf(msg_buf, sizeof(msg_buf), message, ##__VA_ARGS__); \ + EM_ASM(throw new errorType(UTF8ToString($0)), msg_buf); \ + __builtin_unreachable(); \ + }) + +/// Throws a JavaScript Error with the provided message if `error` is not `Ok`. +#define THROW_IF_ERROR(error, message, ...) \ + ({ \ + if ET_UNLIKELY ((error) != Error::Ok) { \ + THROW_JS_ERROR(Error, message, ##__VA_ARGS__); \ + } \ + }) + +/// Throws a JavaScript Error with the provided message if `error` is not `Ok`. +#define THROW_IF_FALSE(cond, message, ...) \ + ({ \ + if ET_UNLIKELY (!(cond)) { \ + THROW_JS_ERROR(Error, message, ##__VA_ARGS__); \ + } \ + }) + +using namespace emscripten; +using executorch::aten::Tensor; +using ::executorch::runtime::Error; +using ::executorch::runtime::EValue; +using ::executorch::runtime::Result; +using ::executorch::runtime::TensorInfo; + +namespace executorch { +namespace extension { +namespace wasm { + +namespace { + +#define JS_FORALL_SUPPORTED_TENSOR_TYPES(_) \ + _(float, Float) \ + _(int, Int) + +inline ssize_t compute_expected_numel( + const std::vector& sizes) { + return executorch::aten::compute_numel(sizes.data(), sizes.size()); +} + +template +inline void assert_valid_numel( + const std::vector& data, + const std::vector& sizes) { + auto computed_numel = compute_expected_numel(sizes); + THROW_IF_FALSE( + data.size() >= computed_numel, + "Required %ld elements, given %ld", + computed_numel, + data.size()); +} + +class JsBaseTensor { + public: + virtual ~JsBaseTensor() = default; + + virtual Tensor get_tensor() = 0; + virtual int get_scalar_type() const = 0; + val get_data() { + switch (get_scalar_type()) { +#define JS_CASE_TENSOR_TO_VAL_TYPE(T, NAME) \ + case static_cast(aten::ScalarType::NAME): \ + return val::array( \ + get_tensor().data_ptr(), \ + get_tensor().data_ptr() + get_tensor().numel()); + JS_FORALL_SUPPORTED_TENSOR_TYPES(JS_CASE_TENSOR_TO_VAL_TYPE) + default: + THROW_JS_ERROR( + TypeError, "Unsupported Tensor type: %d", get_scalar_type()); + } + } + val get_sizes() { + return val::array(get_tensor().sizes().begin(), get_tensor().sizes().end()); + } +}; + +template +class JsTensor final : public JsBaseTensor { + public: + JsTensor(std::vector data, TensorPtr tensor) + : data_(std::move(data)), tensor_(std::move(tensor)) {} + + static std::unique_ptr fill_internal( + const std::vector&& sizes, + T fill_value) { + std::vector data_vec(compute_expected_numel(sizes), fill_value); + TensorPtr tensor = from_blob(data_vec.data(), sizes, S); + return std::make_unique(std::move(data_vec), std::move(tensor)); + } + + static std::unique_ptr full(val sizes, val fill_value) { + auto sizes_vec = + convertJSArrayToNumberVector(sizes); + return fill_internal(std::move(sizes_vec), fill_value.as()); + } + + static std::unique_ptr zeros(val sizes) { + auto sizes_vec = + convertJSArrayToNumberVector(sizes); + return fill_internal(std::move(sizes_vec), 0); + } + + static std::unique_ptr ones(val sizes) { + auto sizes_vec = + convertJSArrayToNumberVector(sizes); + return fill_internal(std::move(sizes_vec), 1); + } + + static std::unique_ptr from_array(val data, val sizes) { + return from_array(data, sizes, val::null()); + } + + static std::unique_ptr + from_array(val data, val sizes, val strides) { + auto data_vec = convertJSArrayToNumberVector(data); + auto sizes_vec = + convertJSArrayToNumberVector(sizes); + assert_valid_numel(data_vec, sizes_vec); + + if (strides.isNull()) { + TensorPtr tensor = from_blob(data_vec.data(), std::move(sizes_vec), S); + return std::make_unique(std::move(data_vec), std::move(tensor)); + } + auto strides_vec = + convertJSArrayToNumberVector( + strides); + TensorPtr tensor = from_blob( + data_vec.data(), std::move(sizes_vec), std::move(strides_vec), S); + return std::make_unique(std::move(data_vec), std::move(tensor)); + } + Tensor get_tensor() override { + return *tensor_; + } + int get_scalar_type() const override { + return static_cast(S); + } + + private: + std::vector data_; + TensorPtr tensor_; +}; + +#define JS_DECLARE_TENSOR_TYPE(T, NAME) \ + using Js##NAME##Tensor = JsTensor; + +JS_FORALL_SUPPORTED_TENSOR_TYPES(JS_DECLARE_TENSOR_TYPE) + +class JsOutputTensor final : public JsBaseTensor { + public: + JsOutputTensor() = delete; + JsOutputTensor(const JsOutputTensor&) = delete; + JsOutputTensor& operator=(const JsOutputTensor&) = delete; + JsOutputTensor(JsOutputTensor&&) = default; + JsOutputTensor& operator=(JsOutputTensor&&) = default; + + explicit JsOutputTensor(std::unique_ptr tensor) + : tensor_(std::move(tensor)) {} + + Tensor get_tensor() override { + return *tensor_; + } + + int get_scalar_type() const override { + return static_cast(tensor_->scalar_type()); + } + + private: + std::unique_ptr tensor_; +}; + +EValue to_evalue(val v) { + if (v.isNull()) { + return EValue(); + } else if (v.isNumber()) { + return EValue(v.as()); + } else if (v.isTrue()) { + return EValue(true); + } else if (v.isFalse()) { + return EValue(false); + } else { + const std::string& type_str = v.typeOf().as(); + if (type_str == "object") { + // If it is an object, assume it is a tensor. + return EValue(v.as().get_tensor()); + } + THROW_JS_ERROR( + TypeError, "Unsupported JavaScript type: %s", type_str.c_str()); + } +} + +val to_val(EValue v) { + if (v.isNone()) { + return val::null(); + } else if (v.isInt()) { + return val(v.toInt()); + } else if (v.isDouble()) { + return val(v.toDouble()); + } else if (v.isBool()) { + return val(v.toBool()); + } else if (v.isTensor()) { + Tensor tensor = v.toTensor(); + std::unique_ptr wrapper = std::make_unique( + std::make_unique(std::move(tensor))); + return val(std::move(wrapper)); + } else { + char tag_buf[32]; + runtime::tag_to_string(v.tag, tag_buf, 32); + THROW_JS_ERROR(TypeError, "Unsupported EValue type: %s", tag_buf); + } +} + +class JsTensorInfo final { + public: + JsTensorInfo() = delete; + JsTensorInfo(const JsTensorInfo&) = delete; + JsTensorInfo& operator=(const JsTensorInfo&) = delete; + JsTensorInfo(JsTensorInfo&&) = default; + JsTensorInfo& operator=(JsTensorInfo&&) = default; + + explicit JsTensorInfo(std::unique_ptr tensor_info) + : tensor_info_(std::move(tensor_info)) {} + + val sizes() const { + return val::array( + tensor_info_->sizes().begin(), tensor_info_->sizes().end()); + } + + private: + std::unique_ptr tensor_info_; +}; + +class JsMethodMeta final { + public: + JsMethodMeta() = delete; + JsMethodMeta(const JsMethodMeta&) = delete; + JsMethodMeta& operator=(const JsMethodMeta&) = delete; + JsMethodMeta(JsMethodMeta&&) = default; + JsMethodMeta& operator=(JsMethodMeta&&) = default; + + explicit JsMethodMeta(std::unique_ptr meta) + : meta_(std::move(meta)) {} + + val name() const { + return val::u8string(meta_->name()); + } + + size_t num_inputs() const { + return meta_->num_inputs(); + } + + std::unique_ptr input_tensor_meta(size_t index) { + auto res = meta_->input_tensor_meta(index); + THROW_IF_ERROR( + res.error(), + "Failed to get input tensor info for index %zu, error: 0x%" PRIx32, + index, + static_cast(res.error())); + return std::make_unique( + std::make_unique(std::move(res.get()))); + } + + private: + std::unique_ptr meta_; +}; + +class JsModule final { + public: + JsModule() = delete; + JsModule(const JsModule&) = delete; + JsModule& operator=(const JsModule&) = delete; + JsModule(JsModule&&) = default; + JsModule& operator=(JsModule&&) = default; + + explicit JsModule(std::unique_ptr module) + : module_(std::move(module)) {} + + static std::unique_ptr load(const std::string& path) { + return std::make_unique(std::make_unique(path)); + } + + val get_methods() { + auto res = module_->method_names(); + THROW_IF_ERROR( + res.error(), + "Failed to get methods, error: 0x%" PRIx32, + static_cast(res.error())); + return val::array(res.get().begin(), res.get().end()); + } + + void load_method(const std::string& method_name) { + Error res = module_->load_method(method_name); + THROW_IF_ERROR( + res, + "Failed to load method %s, error: 0x%" PRIx32, + method_name.c_str(), + static_cast(res)); + } + + std::unique_ptr get_method_meta( + const std::string& method_name) { + auto res = module_->method_meta(method_name); + THROW_IF_ERROR( + res.error(), + "Failed to get method meta for %s, error: 0x%" PRIx32, + method_name.c_str(), + static_cast(res.error())); + return std::make_unique( + std::make_unique(std::move(res.get()))); + } + + val execute(const std::string& method, val js_inputs) { + std::vector inputs; + if (js_inputs.isArray()) { + inputs.reserve(js_inputs["length"].as()); + for (val v : js_inputs) { + inputs.push_back(to_evalue(v)); + } + } else { + inputs.push_back(to_evalue(js_inputs)); + } + auto res = module_->execute(method, inputs); + THROW_IF_ERROR( + res.error(), + "Failed to execute method %s, error: 0x%" PRIx32, + method.c_str(), + static_cast(res.error())); + std::vector outputs = res.get(); + val js_outputs = val::array(); + for (auto& output : outputs) { + js_outputs.call("push", to_val(std::move(output))); + } + return js_outputs; + } + + val forward(val inputs) { + return execute("forward", inputs); + } + + private: + std::unique_ptr module_; +}; + +} // namespace + +EMSCRIPTEN_BINDINGS(WasmBindings) { + class_("Module") + .class_function("load", &JsModule::load) + .function("getMethods", &JsModule::get_methods) + .function("loadMethod", &JsModule::load_method) + .function("getMethodMeta", &JsModule::get_method_meta) + .function("execute", &JsModule::execute) + .function("forward", &JsModule::forward); + class_("Tensor") + .property("scalarType", &JsBaseTensor::get_scalar_type) + .function("getData", &JsBaseTensor::get_data) + .function("getSizes", &JsBaseTensor::get_sizes); + class_("MethodMeta") + .property("name", &JsMethodMeta::name) + .property("numInputs", &JsMethodMeta::num_inputs) + .function("inputTensorMeta", &JsMethodMeta::input_tensor_meta); + class_("TensorInfo").property("sizes", &JsTensorInfo::sizes); +#define JS_DECLARE_TENSOR_BINDINGS(T, NAME) \ + class_(#NAME "Tensor") \ + .class_function("zeros", &Js##NAME##Tensor::zeros) \ + .class_function("ones", &Js##NAME##Tensor::ones) \ + .class_function("full", &Js##NAME##Tensor::full) \ + .class_function( \ + "fromArray", \ + select_overload(val, val, val)>( \ + &Js##NAME##Tensor::from_array)) \ + .class_function( \ + "fromArray", \ + select_overload(val, val)>( \ + &Js##NAME##Tensor::from_array)); + JS_FORALL_SUPPORTED_TENSOR_TYPES(JS_DECLARE_TENSOR_BINDINGS) +} + +} // namespace wasm +} // namespace extension +} // namespace executorch From 070199628f40ac84822d4fa488283c91c0347eb1 Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Wed, 16 Jul 2025 17:19:41 -0700 Subject: [PATCH 08/26] Added unit tests for wasm bindings --- extension/wasm/CMakeLists.txt | 13 +- extension/wasm/build_wasm.sh | 7 +- extension/wasm/test/CMakeLists.txt | 33 +++ extension/wasm/test/executorch_wasm.test.js | 211 ++++++++++++++++++ extension/wasm/test/package.json | 5 + extension/wasm/test_module.js | 27 --- extension/wasm/wasm_bindings.cpp | 12 +- .../portable_type/c10/c10/macros/Macros.h | 2 + 8 files changed, 271 insertions(+), 39 deletions(-) create mode 100644 extension/wasm/test/CMakeLists.txt create mode 100644 extension/wasm/test/executorch_wasm.test.js create mode 100644 extension/wasm/test/package.json delete mode 100644 extension/wasm/test_module.js diff --git a/extension/wasm/CMakeLists.txt b/extension/wasm/CMakeLists.txt index b7f8d6920c7..6db5644d6d5 100644 --- a/extension/wasm/CMakeLists.txt +++ b/extension/wasm/CMakeLists.txt @@ -1,5 +1,5 @@ -cmake_minimum_required(VERSION 3.24) # 3.24 is required for WHOLE_ARCHIVE +cmake_minimum_required(VERSION 3.24) project(executorch_wasm) @@ -33,15 +33,12 @@ list( extension_runner_util ) -add_executable(executorch_wasm wasm_bindings.cpp) +add_library(executorch_wasm OBJECT wasm_bindings.cpp) target_compile_options(executorch_wasm PUBLIC ${_common_compile_options}) target_include_directories(executorch_wasm PUBLIC ${_common_include_directories}) target_link_libraries(executorch_wasm PUBLIC ${link_libraries}) -target_link_options(executorch_wasm PUBLIC -sALLOW_MEMORY_GROWTH=1 --embed-file=${CMAKE_CURRENT_SOURCE_DIR}/model.pte@model.pte) -add_custom_target(executorch_wasm_test ALL - COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/test_module.js ${CMAKE_CURRENT_BINARY_DIR}/test_module.js - DEPENDS executorch_wasm - COMMENT "Copying test_module.js to build output directory" -) +if(BUILD_TESTING) + add_subdirectory(test) +endif() diff --git a/extension/wasm/build_wasm.sh b/extension/wasm/build_wasm.sh index e40499530fc..e53856d2247 100644 --- a/extension/wasm/build_wasm.sh +++ b/extension/wasm/build_wasm.sh @@ -8,5 +8,8 @@ emcmake cmake -DEXECUTORCH_BUILD_WASM=ON \ -DEXECUTORCH_BUILD_DEVTOOLS=ON \ -DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \ -DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \ - -DEXECUTORCH_BUILD_EXTENSION_RUNNER_UTIL=ON .. -make executorch_wasm_test -j32 + -DEXECUTORCH_BUILD_EXTENSION_RUNNER_UTIL=ON \ + -DBUILD_TESTING=ON \ + -DCMAKE_BUILD_TYPE=Release \ + .. +make executorch_wasm_tests -j32 diff --git a/extension/wasm/test/CMakeLists.txt b/extension/wasm/test/CMakeLists.txt new file mode 100644 index 00000000000..224071ce4a9 --- /dev/null +++ b/extension/wasm/test/CMakeLists.txt @@ -0,0 +1,33 @@ + +set(MODELS_DIR ${CMAKE_CURRENT_BINARY_DIR}/models/) + +add_custom_command( + OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/models/add_mul.pte ${CMAKE_CURRENT_BINARY_DIR}/models/add.pte + COMMAND ${CMAKE_COMMAND} -E make_directory "${MODELS_DIR}" + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/../../.. + COMMAND python3 -m examples.portable.scripts.export --model_name="add_mul" --output_dir="${MODELS_DIR}" + COMMAND python3 -m examples.portable.scripts.export --model_name="add" --output_dir="${MODELS_DIR}" +) + +add_custom_target(executorch_wasm_test_models DEPENDS ${MODELS_DIR}/add_mul.pte ${MODELS_DIR}/add.pte) + +add_executable(executorch_wasm_test_lib) +target_link_libraries(executorch_wasm_test_lib PUBLIC executorch_wasm) +target_link_options(executorch_wasm_test_lib PUBLIC --embed-file "${MODELS_DIR}@/") +add_dependencies(executorch_wasm_test_lib executorch_wasm_test_models) + +add_custom_command( + OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/executorch_wasm.test.js + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/executorch_wasm.test.js ${CMAKE_CURRENT_BINARY_DIR}/executorch_wasm.test.js + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/executorch_wasm.test.js + COMMENT "Copying executorch_wasm.test.js to build output directory" +) + +add_custom_command( + OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/package.json + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/package.json ${CMAKE_CURRENT_BINARY_DIR}/package.json + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/package.json + COMMENT "Copying package.json to build output directory" +) + +add_custom_target(executorch_wasm_tests DEPENDS executorch_wasm_test_lib ${CMAKE_CURRENT_BINARY_DIR}/executorch_wasm.test.js ${CMAKE_CURRENT_BINARY_DIR}/package.json) diff --git a/extension/wasm/test/executorch_wasm.test.js b/extension/wasm/test/executorch_wasm.test.js new file mode 100644 index 00000000000..0bbe74c9691 --- /dev/null +++ b/extension/wasm/test/executorch_wasm.test.js @@ -0,0 +1,211 @@ + +let et; +beforeAll((done) => { + et = require("./executorch_wasm_test_lib"); + et.onRuntimeInitialized = () => { + done(); + } +}); + +describe("Tensor", () => { + test("ones", () => { + const tensor = et.FloatTensor.ones([2, 2]); + expect(tensor.getData()).toEqual([1, 1, 1, 1]); + expect(tensor.getSizes()).toEqual([2, 2]); + tensor.delete(); + }); + + test("zeros", () => { + const tensor = et.FloatTensor.zeros([2, 2]); + expect(tensor.getData()).toEqual([0, 0, 0, 0]); + expect(tensor.getSizes()).toEqual([2, 2]); + tensor.delete(); + }); + + test("fromArray", () => { + const tensor = et.FloatTensor.fromArray([1, 2, 3, 4], [2, 2]); + expect(tensor.getData()).toEqual([1, 2, 3, 4]); + expect(tensor.getSizes()).toEqual([2, 2]); + tensor.delete(); + }); + + test("fromArray wrong size", () => { + expect(() => et.FloatTensor.fromArray([1, 2, 3, 4], [3, 2])).toThrow(); + }); + + test("full", () => { + const tensor = et.FloatTensor.full([2, 2], 3); + expect(tensor.getData()).toEqual([3, 3, 3, 3]); + expect(tensor.getSizes()).toEqual([2, 2]); + tensor.delete(); + }); +}); + +describe("Module", () => { + test("getMethods has foward", () => { + const module = et.Module.load("add.pte"); + const methods = module.getMethods(); + expect(methods).toEqual(["forward"]); + module.delete(); + }); + + test("loadMethod forward", () => { + const module = et.Module.load("add.pte"); + expect(() => module.loadMethod("forward")).not.toThrow(); + module.delete(); + }); + + test("loadMethod does not exist", () => { + const module = et.Module.load("add.pte"); + expect(() => module.loadMethod("does_not_exist")).toThrow(); + module.delete(); + }); + + describe("MethodMeta", () => { + test("name is forward", () => { + const module = et.Module.load("add_mul.pte"); + const methodMeta = module.getMethodMeta("forward"); + expect(methodMeta.name).toEqual("forward"); + methodMeta.delete(); + module.delete(); + }); + + test("numInputs is 3", () => { + const module = et.Module.load("add_mul.pte"); + const methodMeta = module.getMethodMeta("forward"); + expect(methodMeta.numInputs).toEqual(3); + methodMeta.delete(); + module.delete(); + }); + + test("method does not exist", () => { + const module = et.Module.load("add_mul.pte"); + expect(() => module.getMethodMeta("does_not_exist")).toThrow(); + module.delete(); + }); + + describe("TensorInfo", () => { + test("sizes is 2x2", () => { + const module = et.Module.load("add_mul.pte"); + const methodMeta = module.getMethodMeta("forward"); + for (var i = 0; i < methodMeta.numInputs; i++) { + const tensorInfo = methodMeta.inputTensorMeta(i); + expect(tensorInfo.sizes).toEqual([2, 2]); + tensorInfo.delete(); + } + methodMeta.delete(); + module.delete(); + }); + + test("out of range", () => { + const module = et.Module.load("add_mul.pte"); + const methodMeta = module.getMethodMeta("forward"); + expect(() => methodMeta.inputTensorMeta(3)).toThrow(); + methodMeta.delete(); + module.delete(); + }); + }); + }); + + describe("execute", () => { + test("add normally", () => { + const module = et.Module.load("add.pte"); + const inputs = [et.FloatTensor.ones([1]), et.FloatTensor.ones([1])]; + const output = module.execute("forward", inputs); + + expect(output.length).toEqual(1); + expect(output[0].getData()).toEqual([2]); + expect(output[0].getSizes()).toEqual([1]); + + inputs.forEach((input) => input.delete()); + output.forEach((output) => output.delete()); + module.delete(); + }); + + test("add_mul normally", () => { + const module = et.Module.load("add_mul.pte"); + const inputs = [et.FloatTensor.ones([2, 2]), et.FloatTensor.ones([2, 2]), et.FloatTensor.ones([2, 2])]; + const output = module.execute("forward", inputs); + + expect(output.length).toEqual(1); + expect(output[0].getData()).toEqual([3, 3, 3, 3]); + expect(output[0].getSizes()).toEqual([2, 2]); + + inputs.forEach((input) => input.delete()); + output.forEach((output) => output.delete()); + module.delete(); + }); + + test("forward directly", () => { + const module = et.Module.load("add_mul.pte"); + const inputs = [et.FloatTensor.ones([2, 2]), et.FloatTensor.ones([2, 2]), et.FloatTensor.ones([2, 2])]; + const output = module.forward(inputs); + + expect(output.length).toEqual(1); + expect(output[0].getData()).toEqual([3, 3, 3, 3]); + expect(output[0].getSizes()).toEqual([2, 2]); + + inputs.forEach((input) => input.delete()); + output.forEach((output) => output.delete()); + module.delete(); + }); + + test("wrong number of inputs", () => { + const module = et.Module.load("add_mul.pte"); + const inputs = [et.FloatTensor.ones([2, 2]), et.FloatTensor.ones([2, 2])]; + expect(() => module.execute("forward", inputs)).toThrow(); + + inputs.forEach((input) => input.delete()); + module.delete(); + }); + + test("wrong input size", () => { + const module = et.Module.load("add.pte"); + const inputs = [et.FloatTensor.ones([2, 1]), et.FloatTensor.ones([2, 1])]; + expect(() => module.execute("forward", inputs)).toThrow(); + + inputs.forEach((input) => input.delete()); + module.delete(); + }); + + test("wrong input type", () => { + const module = et.Module.load("add.pte"); + const inputs = [et.FloatTensor.ones([1]), et.IntTensor.ones([1])]; + expect(() => module.execute("forward", inputs)).toThrow(); + + inputs.forEach((input) => input.delete()); + module.delete(); + }); + + test("method does not exist", () => { + const module = et.Module.load("add.pte"); + const inputs = [et.FloatTensor.ones([1]), et.FloatTensor.ones([1])]; + expect(() => module.execute("does_not_exist", inputs)).toThrow(); + + inputs.forEach((input) => input.delete()); + module.delete(); + }); + + test("output tensor can be reused", () => { + const module = et.Module.load("add_mul.pte"); + const inputs = [et.FloatTensor.ones([2, 2]), et.FloatTensor.ones([2, 2]), et.FloatTensor.ones([2, 2])]; + const output = module.forward(inputs); + + expect(output.length).toEqual(1); + expect(output[0].getData()).toEqual([3, 3, 3, 3]); + expect(output[0].getSizes()).toEqual([2, 2]); + + const inputs2 = [output[0], output[0], output[0]]; + const output2 = module.forward(inputs2); + + expect(output2.length).toEqual(1); + expect(output2[0].getData()).toEqual([21, 21, 21, 21]); + expect(output2[0].getSizes()).toEqual([2, 2]); + + inputs.forEach((input) => input.delete()); + output.forEach((output) => output.delete()); + output2.forEach((output) => output.delete()); + module.delete(); + }); + }); +}); diff --git a/extension/wasm/test/package.json b/extension/wasm/test/package.json new file mode 100644 index 00000000000..a25522fa51b --- /dev/null +++ b/extension/wasm/test/package.json @@ -0,0 +1,5 @@ +{ + "scripts": { + "test": "jest" + } +} diff --git a/extension/wasm/test_module.js b/extension/wasm/test_module.js deleted file mode 100644 index f64c09f28d4..00000000000 --- a/extension/wasm/test_module.js +++ /dev/null @@ -1,27 +0,0 @@ - -const et = require("./executorch_wasm"); -et.onRuntimeInitialized = () => { - module = et.Module.load("model.pte"); - var methods = module.getMethods(); - console.log(methods); - var method = methods[0]; - var methodMeta = module.getMethodMeta(method); - var inputs = []; - for (var i = 0; i < methodMeta.numInputs; i++) { - var tensor = et.FloatTensor.ones(methodMeta.inputTensorMeta(i).sizes); - console.log("input", i, tensor.getData(), tensor.getSizes()); - inputs.push(tensor); - } - var output = module.execute(method, inputs); - - for (var i = 0; i < inputs.length; i++) { - inputs[i].delete(); - } - - for (var i = 0; i < output.length; i++) { - console.log("output", i, output[i].getData(), output[i].getSizes()); - output[i].delete(); - } - - module.delete(); -} diff --git a/extension/wasm/wasm_bindings.cpp b/extension/wasm/wasm_bindings.cpp index f4611f921bd..9cf33d8c744 100644 --- a/extension/wasm/wasm_bindings.cpp +++ b/extension/wasm/wasm_bindings.cpp @@ -20,7 +20,7 @@ } \ }) -/// Throws a JavaScript Error with the provided message if `error` is not `Ok`. +/// Throws a JavaScript Error with the provided message if `cond` is not `true`. #define THROW_IF_FALSE(cond, message, ...) \ ({ \ if ET_UNLIKELY (!(cond)) { \ @@ -62,6 +62,7 @@ inline void assert_valid_numel( data.size()); } +// Base class for all JS Tensor types. Subclasses are not exposed to JS. class JsBaseTensor { public: virtual ~JsBaseTensor() = default; @@ -86,6 +87,7 @@ class JsBaseTensor { } }; +// Tensor that owns its own data. JS only has access to the static methods. template class JsTensor final : public JsBaseTensor { public: @@ -154,9 +156,10 @@ class JsTensor final : public JsBaseTensor { #define JS_DECLARE_TENSOR_TYPE(T, NAME) \ using Js##NAME##Tensor = JsTensor; - JS_FORALL_SUPPORTED_TENSOR_TYPES(JS_DECLARE_TENSOR_TYPE) +// Tensor that does not own its own data. It is a wrapper around a C++ Tensor. +// This class is not exposed to JS. class JsOutputTensor final : public JsBaseTensor { public: JsOutputTensor() = delete; @@ -180,6 +183,7 @@ class JsOutputTensor final : public JsBaseTensor { std::unique_ptr tensor_; }; +// Converts JS value to EValue. EValue to_evalue(val v) { if (v.isNull()) { return EValue(); @@ -200,6 +204,7 @@ EValue to_evalue(val v) { } } +// Converts EValue to JS value. val to_val(EValue v) { if (v.isNone()) { return val::null(); @@ -221,6 +226,7 @@ val to_val(EValue v) { } } +// Wrapper around TensorInfo. class JsTensorInfo final { public: JsTensorInfo() = delete; @@ -241,6 +247,7 @@ class JsTensorInfo final { std::unique_ptr tensor_info_; }; +// Wrapper around MethodMeta. class JsMethodMeta final { public: JsMethodMeta() = delete; @@ -275,6 +282,7 @@ class JsMethodMeta final { std::unique_ptr meta_; }; +// Wrapper around extension/Module. class JsModule final { public: JsModule() = delete; diff --git a/runtime/core/portable_type/c10/c10/macros/Macros.h b/runtime/core/portable_type/c10/c10/macros/Macros.h index 6b51a39f2a9..6f2953c349a 100644 --- a/runtime/core/portable_type/c10/c10/macros/Macros.h +++ b/runtime/core/portable_type/c10/c10/macros/Macros.h @@ -396,6 +396,8 @@ extern SYCL_EXTERNAL void __assert_fail( const char* file, unsigned int line, const char* func); +#elif (defined(__EMSCRIPTEN__)) +_Noreturn void __assert_fail (const char *, const char *, int, const char *); #else // __SYCL_DEVICE_ONLY__ #if (defined(__CUDA_ARCH__) && !(defined(__clang__) && defined(__CUDA__))) // CUDA supports __assert_fail function which are common for both device From 160f15f4d81019278937cc1ba40bf4e6e48d702b Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Thu, 17 Jul 2025 18:04:34 -0700 Subject: [PATCH 09/26] Applied suggestions --- extension/wasm/CMakeLists.txt | 14 +- extension/wasm/build_wasm.sh | 15 -- extension/wasm/test/CMakeLists.txt | 42 +++- extension/wasm/test/executorch_wasm.test.js | 103 ++++++-- extension/wasm/wasm_bindings.cpp | 247 ++++++++++++-------- scripts/build_wasm.sh | 28 +++ 6 files changed, 319 insertions(+), 130 deletions(-) delete mode 100644 extension/wasm/build_wasm.sh create mode 100644 scripts/build_wasm.sh diff --git a/extension/wasm/CMakeLists.txt b/extension/wasm/CMakeLists.txt index 6db5644d6d5..93c7e283b49 100644 --- a/extension/wasm/CMakeLists.txt +++ b/extension/wasm/CMakeLists.txt @@ -1,3 +1,13 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# Please this file formatted by running: +# ~~~ +# cmake-format -i CMakeLists.txt +# ~~~ cmake_minimum_required(VERSION 3.24) @@ -36,7 +46,9 @@ list( add_library(executorch_wasm OBJECT wasm_bindings.cpp) target_compile_options(executorch_wasm PUBLIC ${_common_compile_options}) -target_include_directories(executorch_wasm PUBLIC ${_common_include_directories}) +target_include_directories( + executorch_wasm PUBLIC ${_common_include_directories} +) target_link_libraries(executorch_wasm PUBLIC ${link_libraries}) if(BUILD_TESTING) diff --git a/extension/wasm/build_wasm.sh b/extension/wasm/build_wasm.sh deleted file mode 100644 index e53856d2247..00000000000 --- a/extension/wasm/build_wasm.sh +++ /dev/null @@ -1,15 +0,0 @@ - -cd "$(dirname "${BASH_SOURCE[0]}")/../../" -mkdir -p cmake-out-wasm -cd cmake-out-wasm -emcmake cmake -DEXECUTORCH_BUILD_WASM=ON \ - -DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \ - -DEXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR=ON \ - -DEXECUTORCH_BUILD_DEVTOOLS=ON \ - -DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \ - -DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \ - -DEXECUTORCH_BUILD_EXTENSION_RUNNER_UTIL=ON \ - -DBUILD_TESTING=ON \ - -DCMAKE_BUILD_TYPE=Release \ - .. -make executorch_wasm_tests -j32 diff --git a/extension/wasm/test/CMakeLists.txt b/extension/wasm/test/CMakeLists.txt index 224071ce4a9..7aa3f23895a 100644 --- a/extension/wasm/test/CMakeLists.txt +++ b/extension/wasm/test/CMakeLists.txt @@ -1,33 +1,59 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# Please this file formatted by running: +# ~~~ +# cmake-format -i CMakeLists.txt +# ~~~ set(MODELS_DIR ${CMAKE_CURRENT_BINARY_DIR}/models/) add_custom_command( - OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/models/add_mul.pte ${CMAKE_CURRENT_BINARY_DIR}/models/add.pte + OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/models/add_mul.pte + ${CMAKE_CURRENT_BINARY_DIR}/models/add.pte COMMAND ${CMAKE_COMMAND} -E make_directory "${MODELS_DIR}" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/../../.. - COMMAND python3 -m examples.portable.scripts.export --model_name="add_mul" --output_dir="${MODELS_DIR}" - COMMAND python3 -m examples.portable.scripts.export --model_name="add" --output_dir="${MODELS_DIR}" + COMMAND python3 -m examples.portable.scripts.export --model_name="add_mul" + --output_dir="${MODELS_DIR}" + COMMAND python3 -m examples.portable.scripts.export --model_name="add" + --output_dir="${MODELS_DIR}" ) -add_custom_target(executorch_wasm_test_models DEPENDS ${MODELS_DIR}/add_mul.pte ${MODELS_DIR}/add.pte) +add_custom_target( + executorch_wasm_test_models DEPENDS ${MODELS_DIR}/add_mul.pte + ${MODELS_DIR}/add.pte +) add_executable(executorch_wasm_test_lib) target_link_libraries(executorch_wasm_test_lib PUBLIC executorch_wasm) -target_link_options(executorch_wasm_test_lib PUBLIC --embed-file "${MODELS_DIR}@/") +target_link_options( + executorch_wasm_test_lib PUBLIC --embed-file "${MODELS_DIR}@/" +) add_dependencies(executorch_wasm_test_lib executorch_wasm_test_models) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/executorch_wasm.test.js - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/executorch_wasm.test.js ${CMAKE_CURRENT_BINARY_DIR}/executorch_wasm.test.js + COMMAND + ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/executorch_wasm.test.js + ${CMAKE_CURRENT_BINARY_DIR}/executorch_wasm.test.js DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/executorch_wasm.test.js COMMENT "Copying executorch_wasm.test.js to build output directory" ) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/package.json - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/package.json ${CMAKE_CURRENT_BINARY_DIR}/package.json + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/package.json + ${CMAKE_CURRENT_BINARY_DIR}/package.json DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/package.json COMMENT "Copying package.json to build output directory" ) -add_custom_target(executorch_wasm_tests DEPENDS executorch_wasm_test_lib ${CMAKE_CURRENT_BINARY_DIR}/executorch_wasm.test.js ${CMAKE_CURRENT_BINARY_DIR}/package.json) +add_custom_target( + executorch_wasm_tests + DEPENDS executorch_wasm_test_lib + ${CMAKE_CURRENT_BINARY_DIR}/executorch_wasm.test.js + ${CMAKE_CURRENT_BINARY_DIR}/package.json +) diff --git a/extension/wasm/test/executorch_wasm.test.js b/extension/wasm/test/executorch_wasm.test.js index 0bbe74c9691..a002faeca1e 100644 --- a/extension/wasm/test/executorch_wasm.test.js +++ b/extension/wasm/test/executorch_wasm.test.js @@ -1,3 +1,10 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ let et; beforeAll((done) => { @@ -39,6 +46,23 @@ describe("Tensor", () => { expect(tensor.getSizes()).toEqual([2, 2]); tensor.delete(); }); + + test("scalar type", () => { + const tensor = et.FloatTensor.ones([2, 2]); + // ScalarType can only be checked by strict equality. + expect(tensor.scalarType).toBe(et.ScalarType.Float); + tensor.delete(); + }); + + test("long tensor", () => { + // Number cannot be converted to Long, so we use BigInt instead. + const tensor = et.LongTensor.fromArray([1n, 2n, 3n, 4n], [2, 2]); + expect(tensor.getData()).toEqual([1n, 2n, 3n, 4n]); + expect(tensor.getSizes()).toEqual([2, 2]); + // ScalarType can only be checked by strict equality. + expect(tensor.scalarType).toBe(et.ScalarType.Long); + tensor.delete(); + }); }); describe("Module", () => { @@ -66,15 +90,31 @@ describe("Module", () => { const module = et.Module.load("add_mul.pte"); const methodMeta = module.getMethodMeta("forward"); expect(methodMeta.name).toEqual("forward"); - methodMeta.delete(); module.delete(); }); - test("numInputs is 3", () => { + test("inputs are tensors", () => { const module = et.Module.load("add_mul.pte"); const methodMeta = module.getMethodMeta("forward"); - expect(methodMeta.numInputs).toEqual(3); - methodMeta.delete(); + expect(methodMeta.inputTags.length).toEqual(3); + // Tags can only be checked by strict equality. + methodMeta.inputTags.forEach((tag) => expect(tag).toBe(et.Tag.Tensor)); + module.delete(); + }); + + test("outputs are tensors", () => { + const module = et.Module.load("add_mul.pte"); + const methodMeta = module.getMethodMeta("forward"); + expect(methodMeta.outputTags.length).toEqual(1); + // Tags can only be checked by strict equality. + expect(methodMeta.outputTags[0]).toBe(et.Tag.Tensor); + module.delete(); + }); + + test("num instructions is 2", () => { + const module = et.Module.load("add_mul.pte"); + const methodMeta = module.getMethodMeta("forward"); + expect(methodMeta.numInstructions).toEqual(2); module.delete(); }); @@ -85,23 +125,58 @@ describe("Module", () => { }); describe("TensorInfo", () => { - test("sizes is 2x2", () => { + test("input sizes is 2x2", () => { const module = et.Module.load("add_mul.pte"); const methodMeta = module.getMethodMeta("forward"); - for (var i = 0; i < methodMeta.numInputs; i++) { - const tensorInfo = methodMeta.inputTensorMeta(i); + expect(methodMeta.inputTensorMeta.length).toEqual(3); + methodMeta.inputTensorMeta.forEach((tensorInfo) => { expect(tensorInfo.sizes).toEqual([2, 2]); - tensorInfo.delete(); - } - methodMeta.delete(); + }); + module.delete(); + }); + + test("output sizes is 2x2", () => { + const module = et.Module.load("add_mul.pte"); + const methodMeta = module.getMethodMeta("forward"); + expect(methodMeta.outputTensorMeta.length).toEqual(1); + expect(methodMeta.outputTensorMeta[0].sizes).toEqual([2, 2]); + module.delete(); + }); + + test("dim order is contiguous", () => { + const module = et.Module.load("add_mul.pte"); + const methodMeta = module.getMethodMeta("forward"); + methodMeta.inputTensorMeta.forEach((tensorInfo) => { + expect(tensorInfo.dimOrder).toEqual([0, 1]); + }); + module.delete(); + }); + + test("scalar type is float", () => { + const module = et.Module.load("add_mul.pte"); + const methodMeta = module.getMethodMeta("forward"); + methodMeta.inputTensorMeta.forEach((tensorInfo) => { + // ScalarType can only be checked by strict equality. + expect(tensorInfo.scalarType).toBe(et.ScalarType.Float); + }); + module.delete(); + }); + + test("memory planned", () => { + const module = et.Module.load("add_mul.pte"); + const methodMeta = module.getMethodMeta("forward"); + methodMeta.inputTensorMeta.forEach((tensorInfo) => { + expect(tensorInfo.isMemoryPlanned).toBe(true); + }); module.delete(); }); - test("out of range", () => { + test("nbytes is 16", () => { const module = et.Module.load("add_mul.pte"); const methodMeta = module.getMethodMeta("forward"); - expect(() => methodMeta.inputTensorMeta(3)).toThrow(); - methodMeta.delete(); + methodMeta.inputTensorMeta.forEach((tensorInfo) => { + expect(tensorInfo.nbytes).toEqual(16); + }); module.delete(); }); }); @@ -170,7 +245,7 @@ describe("Module", () => { test("wrong input type", () => { const module = et.Module.load("add.pte"); - const inputs = [et.FloatTensor.ones([1]), et.IntTensor.ones([1])]; + const inputs = [et.FloatTensor.ones([1]), et.LongTensor.ones([1])]; expect(() => module.execute("forward", inputs)).toThrow(); inputs.forEach((input) => input.delete()); diff --git a/extension/wasm/wasm_bindings.cpp b/extension/wasm/wasm_bindings.cpp index 9cf33d8c744..67d159338b1 100644 --- a/extension/wasm/wasm_bindings.cpp +++ b/extension/wasm/wasm_bindings.cpp @@ -1,3 +1,10 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ #include #include @@ -29,10 +36,12 @@ }) using namespace emscripten; +using executorch::aten::ScalarType; using executorch::aten::Tensor; using ::executorch::runtime::Error; using ::executorch::runtime::EValue; using ::executorch::runtime::Result; +using ::executorch::runtime::Tag; using ::executorch::runtime::TensorInfo; namespace executorch { @@ -41,9 +50,19 @@ namespace wasm { namespace { +// val represents all JS values. Using val_array to specify that we specifically +// want an array. +template +using val_array = val; + +template +inline void js_array_push(val_array& array, const T& value) { + array.call("push", value); +} + #define JS_FORALL_SUPPORTED_TENSOR_TYPES(_) \ _(float, Float) \ - _(int, Int) + _(int64_t, Long) inline ssize_t compute_expected_numel( const std::vector& sizes) { @@ -67,14 +86,14 @@ class JsBaseTensor { public: virtual ~JsBaseTensor() = default; - virtual Tensor get_tensor() = 0; - virtual int get_scalar_type() const = 0; - val get_data() { + virtual const Tensor& get_tensor() = 0; + virtual ScalarType get_scalar_type() const = 0; + val_array get_data() { switch (get_scalar_type()) { -#define JS_CASE_TENSOR_TO_VAL_TYPE(T, NAME) \ - case static_cast(aten::ScalarType::NAME): \ - return val::array( \ - get_tensor().data_ptr(), \ +#define JS_CASE_TENSOR_TO_VAL_TYPE(T, NAME) \ + case ScalarType::NAME: \ + return val::array( \ + get_tensor().data_ptr(), \ get_tensor().data_ptr() + get_tensor().numel()); JS_FORALL_SUPPORTED_TENSOR_TYPES(JS_CASE_TENSOR_TO_VAL_TYPE) default: @@ -82,13 +101,13 @@ class JsBaseTensor { TypeError, "Unsupported Tensor type: %d", get_scalar_type()); } } - val get_sizes() { + val_array get_sizes() { return val::array(get_tensor().sizes().begin(), get_tensor().sizes().end()); } }; // Tensor that owns its own data. JS only has access to the static methods. -template +template class JsTensor final : public JsBaseTensor { public: JsTensor(std::vector data, TensorPtr tensor) @@ -102,30 +121,36 @@ class JsTensor final : public JsBaseTensor { return std::make_unique(std::move(data_vec), std::move(tensor)); } - static std::unique_ptr full(val sizes, val fill_value) { + static std::unique_ptr full( + val_array sizes, + val fill_value) { auto sizes_vec = convertJSArrayToNumberVector(sizes); return fill_internal(std::move(sizes_vec), fill_value.as()); } - static std::unique_ptr zeros(val sizes) { + static std::unique_ptr zeros(val_array sizes) { auto sizes_vec = convertJSArrayToNumberVector(sizes); return fill_internal(std::move(sizes_vec), 0); } - static std::unique_ptr ones(val sizes) { + static std::unique_ptr ones(val_array sizes) { auto sizes_vec = convertJSArrayToNumberVector(sizes); return fill_internal(std::move(sizes_vec), 1); } - static std::unique_ptr from_array(val data, val sizes) { + static std::unique_ptr from_array( + val_array data, + val_array sizes) { return from_array(data, sizes, val::null()); } - static std::unique_ptr - from_array(val data, val sizes, val strides) { + static std::unique_ptr from_array( + val_array data, + val_array sizes, + val_array strides) { auto data_vec = convertJSArrayToNumberVector(data); auto sizes_vec = convertJSArrayToNumberVector(sizes); @@ -142,11 +167,11 @@ class JsTensor final : public JsBaseTensor { data_vec.data(), std::move(sizes_vec), std::move(strides_vec), S); return std::make_unique(std::move(data_vec), std::move(tensor)); } - Tensor get_tensor() override { + const Tensor& get_tensor() override { return *tensor_; } - int get_scalar_type() const override { - return static_cast(S); + ScalarType get_scalar_type() const override { + return S; } private: @@ -155,7 +180,7 @@ class JsTensor final : public JsBaseTensor { }; #define JS_DECLARE_TENSOR_TYPE(T, NAME) \ - using Js##NAME##Tensor = JsTensor; + using Js##NAME##Tensor = JsTensor; JS_FORALL_SUPPORTED_TENSOR_TYPES(JS_DECLARE_TENSOR_TYPE) // Tensor that does not own its own data. It is a wrapper around a C++ Tensor. @@ -168,19 +193,18 @@ class JsOutputTensor final : public JsBaseTensor { JsOutputTensor(JsOutputTensor&&) = default; JsOutputTensor& operator=(JsOutputTensor&&) = default; - explicit JsOutputTensor(std::unique_ptr tensor) - : tensor_(std::move(tensor)) {} + explicit JsOutputTensor(Tensor tensor) : tensor_(tensor) {} - Tensor get_tensor() override { - return *tensor_; + const Tensor& get_tensor() override { + return tensor_; } - int get_scalar_type() const override { - return static_cast(tensor_->scalar_type()); + ScalarType get_scalar_type() const override { + return tensor_.scalar_type(); } private: - std::unique_ptr tensor_; + Tensor tensor_; }; // Converts JS value to EValue. @@ -195,7 +219,9 @@ EValue to_evalue(val v) { return EValue(false); } else { const std::string& type_str = v.typeOf().as(); - if (type_str == "object") { + if (type_str == "bigint") { + return EValue(v.as()); + } else if (type_str == "object") { // If it is an object, assume it is a tensor. return EValue(v.as().get_tensor()); } @@ -216,8 +242,8 @@ val to_val(EValue v) { return val(v.toBool()); } else if (v.isTensor()) { Tensor tensor = v.toTensor(); - std::unique_ptr wrapper = std::make_unique( - std::make_unique(std::move(tensor))); + std::unique_ptr wrapper = + std::make_unique(std::move(tensor)); return val(std::move(wrapper)); } else { char tag_buf[32]; @@ -226,60 +252,77 @@ val to_val(EValue v) { } } -// Wrapper around TensorInfo. -class JsTensorInfo final { - public: - JsTensorInfo() = delete; - JsTensorInfo(const JsTensorInfo&) = delete; - JsTensorInfo& operator=(const JsTensorInfo&) = delete; - JsTensorInfo(JsTensorInfo&&) = default; - JsTensorInfo& operator=(JsTensorInfo&&) = default; - - explicit JsTensorInfo(std::unique_ptr tensor_info) - : tensor_info_(std::move(tensor_info)) {} - - val sizes() const { - return val::array( - tensor_info_->sizes().begin(), tensor_info_->sizes().end()); +// JS object containing tensor metadata. +struct JsTensorInfo { + val_array sizes; + val_array dim_order; + ScalarType scalar_type; + bool is_memory_planned; + size_t nbytes; + std::string name; + + static JsTensorInfo from_tensor_info(const TensorInfo& info) { + return { + val::array(info.sizes().begin(), info.sizes().end()), + val::array(info.dim_order().begin(), info.dim_order().end()), + info.scalar_type(), + info.is_memory_planned(), + info.nbytes(), + std::string(info.name())}; } - - private: - std::unique_ptr tensor_info_; }; -// Wrapper around MethodMeta. -class JsMethodMeta final { - public: - JsMethodMeta() = delete; - JsMethodMeta(const JsMethodMeta&) = delete; - JsMethodMeta& operator=(const JsMethodMeta&) = delete; - JsMethodMeta(JsMethodMeta&&) = default; - JsMethodMeta& operator=(JsMethodMeta&&) = default; - - explicit JsMethodMeta(std::unique_ptr meta) - : meta_(std::move(meta)) {} - - val name() const { - return val::u8string(meta_->name()); - } - - size_t num_inputs() const { - return meta_->num_inputs(); - } - - std::unique_ptr input_tensor_meta(size_t index) { - auto res = meta_->input_tensor_meta(index); - THROW_IF_ERROR( - res.error(), - "Failed to get input tensor info for index %zu, error: 0x%" PRIx32, - index, - static_cast(res.error())); - return std::make_unique( - std::make_unique(std::move(res.get()))); +// JS object containing method metadata. +struct JsMethodMeta { + std::string name; + val_array input_tags; + val_array input_tensor_meta; + val_array output_tags; + val_array output_tensor_meta; + val_array attribute_tensor_meta; + val_array memory_planned_buffer_sizes; + val_array backends; + ET_DEPRECATED size_t num_instructions; + + static JsMethodMeta from_method_meta(const MethodMeta& meta) { + JsMethodMeta new_meta{ + meta.name(), + val::array(), + val::array(), + val::array(), + val::array(), + val::array(), + val::array(), + val::array(), + meta.num_instructions()}; + for (int i = 0; i < meta.num_inputs(); i++) { + js_array_push(new_meta.input_tags, meta.input_tag(i).get()); + js_array_push( + new_meta.input_tensor_meta, + JsTensorInfo::from_tensor_info(meta.input_tensor_meta(i).get())); + } + for (int i = 0; i < meta.num_outputs(); i++) { + js_array_push(new_meta.output_tags, meta.output_tag(i).get()); + js_array_push( + new_meta.output_tensor_meta, + JsTensorInfo::from_tensor_info(meta.output_tensor_meta(i).get())); + } + for (int i = 0; i < meta.num_attributes(); i++) { + js_array_push( + new_meta.attribute_tensor_meta, + JsTensorInfo::from_tensor_info(meta.attribute_tensor_meta(i).get())); + } + for (int i = 0; i < meta.num_memory_planned_buffers(); i++) { + js_array_push( + new_meta.memory_planned_buffer_sizes, + meta.memory_planned_buffer_size(i).get()); + } + for (int i = 0; i < meta.num_backends(); i++) { + js_array_push( + new_meta.backends, val::u8string(meta.get_backend_name(i).get())); + } + return new_meta; } - - private: - std::unique_ptr meta_; }; // Wrapper around extension/Module. @@ -316,19 +359,17 @@ class JsModule final { static_cast(res)); } - std::unique_ptr get_method_meta( - const std::string& method_name) { + JsMethodMeta get_method_meta(const std::string& method_name) { auto res = module_->method_meta(method_name); THROW_IF_ERROR( res.error(), "Failed to get method meta for %s, error: 0x%" PRIx32, method_name.c_str(), static_cast(res.error())); - return std::make_unique( - std::make_unique(std::move(res.get()))); + return JsMethodMeta::from_method_meta(res.get()); } - val execute(const std::string& method, val js_inputs) { + val_array execute(const std::string& method, val js_inputs) { std::vector inputs; if (js_inputs.isArray()) { inputs.reserve(js_inputs["length"].as()); @@ -347,12 +388,12 @@ class JsModule final { std::vector outputs = res.get(); val js_outputs = val::array(); for (auto& output : outputs) { - js_outputs.call("push", to_val(std::move(output))); + js_array_push(js_outputs, to_val(std::move(output))); } return js_outputs; } - val forward(val inputs) { + val_array forward(val inputs) { return execute("forward", inputs); } @@ -363,6 +404,13 @@ class JsModule final { } // namespace EMSCRIPTEN_BINDINGS(WasmBindings) { + enum_("ScalarType") +#define JS_DECLARE_SCALAR_TYPE(T, NAME) .value(#NAME, ScalarType::NAME) + JS_FORALL_SUPPORTED_TENSOR_TYPES(JS_DECLARE_SCALAR_TYPE); + enum_("Tag") +#define JS_DECLARE_TAG(NAME) .value(#NAME, Tag::NAME) + EXECUTORCH_FORALL_TAGS(JS_DECLARE_TAG); + class_("Module") .class_function("load", &JsModule::load) .function("getMethods", &JsModule::get_methods) @@ -374,11 +422,26 @@ EMSCRIPTEN_BINDINGS(WasmBindings) { .property("scalarType", &JsBaseTensor::get_scalar_type) .function("getData", &JsBaseTensor::get_data) .function("getSizes", &JsBaseTensor::get_sizes); - class_("MethodMeta") - .property("name", &JsMethodMeta::name) - .property("numInputs", &JsMethodMeta::num_inputs) - .function("inputTensorMeta", &JsMethodMeta::input_tensor_meta); - class_("TensorInfo").property("sizes", &JsTensorInfo::sizes); + value_object("TensorInfo") + .field("sizes", &JsTensorInfo::sizes) + .field("dimOrder", &JsTensorInfo::dim_order) + .field("scalarType", &JsTensorInfo::scalar_type) + .field("isMemoryPlanned", &JsTensorInfo::is_memory_planned) + .field("nbytes", &JsTensorInfo::nbytes) + .field("name", &JsTensorInfo::name); + value_object("MethodMeta") + .field("name", &JsMethodMeta::name) + .field("inputTags", &JsMethodMeta::input_tags) + .field("inputTensorMeta", &JsMethodMeta::input_tensor_meta) + .field("outputTags", &JsMethodMeta::output_tags) + .field("outputTensorMeta", &JsMethodMeta::output_tensor_meta) + .field("attributeTensorMeta", &JsMethodMeta::attribute_tensor_meta) + .field( + "memoryPlannedBufferSizes", + &JsMethodMeta::memory_planned_buffer_sizes) + .field("backends", &JsMethodMeta::backends) + .field("numInstructions", &JsMethodMeta::num_instructions); + #define JS_DECLARE_TENSOR_BINDINGS(T, NAME) \ class_(#NAME "Tensor") \ .class_function("zeros", &Js##NAME##Tensor::zeros) \ diff --git a/scripts/build_wasm.sh b/scripts/build_wasm.sh new file mode 100644 index 00000000000..fca5cc0c3d5 --- /dev/null +++ b/scripts/build_wasm.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +CMAKE_OUT=cmake-out-wasm + +cd "$(dirname "${BASH_SOURCE[0]}")/../" +emcmake cmake . -DEXECUTORCH_BUILD_WASM=ON \ + -DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \ + -DEXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR=ON \ + -DEXECUTORCH_BUILD_DEVTOOLS=ON \ + -DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \ + -DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \ + -DEXECUTORCH_BUILD_EXTENSION_RUNNER_UTIL=ON \ + -DBUILD_TESTING=ON \ + -DCMAKE_BUILD_TYPE=Release \ + -B"${CMAKE_OUT}" + +if [ "$(uname)" == "Darwin" ]; then + CMAKE_JOBS=$(( $(sysctl -n hw.ncpu) - 1 )) +else + CMAKE_JOBS=$(( $(nproc) - 1 )) +fi + +cmake --build ${CMAKE_OUT} --target executorch_wasm_tests -j ${CMAKE_JOBS} From bbc46dffa0a1f4dac3a0db47c7de65994677a5b9 Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Fri, 18 Jul 2025 12:05:09 -0700 Subject: [PATCH 10/26] Cleaned up tensor classes --- extension/wasm/test/executorch_wasm.test.js | 79 ++++---- extension/wasm/wasm_bindings.cpp | 213 +++++++++----------- 2 files changed, 144 insertions(+), 148 deletions(-) diff --git a/extension/wasm/test/executorch_wasm.test.js b/extension/wasm/test/executorch_wasm.test.js index a002faeca1e..e230895838b 100644 --- a/extension/wasm/test/executorch_wasm.test.js +++ b/extension/wasm/test/executorch_wasm.test.js @@ -16,49 +16,58 @@ beforeAll((done) => { describe("Tensor", () => { test("ones", () => { - const tensor = et.FloatTensor.ones([2, 2]); - expect(tensor.getData()).toEqual([1, 1, 1, 1]); - expect(tensor.getSizes()).toEqual([2, 2]); + const tensor = et.Tensor.ones([2, 2]); + expect(tensor.data).toEqual([1, 1, 1, 1]); + expect(tensor.sizes).toEqual([2, 2]); tensor.delete(); }); test("zeros", () => { - const tensor = et.FloatTensor.zeros([2, 2]); - expect(tensor.getData()).toEqual([0, 0, 0, 0]); - expect(tensor.getSizes()).toEqual([2, 2]); + const tensor = et.Tensor.zeros([2, 2]); + expect(tensor.data).toEqual([0, 0, 0, 0]); + expect(tensor.sizes).toEqual([2, 2]); tensor.delete(); }); test("fromArray", () => { - const tensor = et.FloatTensor.fromArray([1, 2, 3, 4], [2, 2]); - expect(tensor.getData()).toEqual([1, 2, 3, 4]); - expect(tensor.getSizes()).toEqual([2, 2]); + const tensor = et.Tensor.fromArray([2, 2], [1, 2, 3, 4]); + expect(tensor.data).toEqual([1, 2, 3, 4]); + expect(tensor.sizes).toEqual([2, 2]); tensor.delete(); }); test("fromArray wrong size", () => { - expect(() => et.FloatTensor.fromArray([1, 2, 3, 4], [3, 2])).toThrow(); + expect(() => et.Tensor.fromArray([3, 2], [1, 2, 3, 4])).toThrow(); }); test("full", () => { - const tensor = et.FloatTensor.full([2, 2], 3); - expect(tensor.getData()).toEqual([3, 3, 3, 3]); - expect(tensor.getSizes()).toEqual([2, 2]); + const tensor = et.Tensor.full([2, 2], 3); + expect(tensor.data).toEqual([3, 3, 3, 3]); + expect(tensor.sizes).toEqual([2, 2]); tensor.delete(); }); test("scalar type", () => { - const tensor = et.FloatTensor.ones([2, 2]); + const tensor = et.Tensor.ones([2, 2]); // ScalarType can only be checked by strict equality. expect(tensor.scalarType).toBe(et.ScalarType.Float); tensor.delete(); }); test("long tensor", () => { + const tensor = et.Tensor.ones([2, 2], et.ScalarType.Long); + expect(tensor.data).toEqual([1n, 1n, 1n, 1n]); + expect(tensor.sizes).toEqual([2, 2]); + // ScalarType can only be checked by strict equality. + expect(tensor.scalarType).toBe(et.ScalarType.Long); + tensor.delete(); + }); + + test("infer long tensor", () => { // Number cannot be converted to Long, so we use BigInt instead. - const tensor = et.LongTensor.fromArray([1n, 2n, 3n, 4n], [2, 2]); - expect(tensor.getData()).toEqual([1n, 2n, 3n, 4n]); - expect(tensor.getSizes()).toEqual([2, 2]); + const tensor = et.Tensor.fromArray([2, 2], [1n, 2n, 3n, 4n]); + expect(tensor.data).toEqual([1n, 2n, 3n, 4n]); + expect(tensor.sizes).toEqual([2, 2]); // ScalarType can only be checked by strict equality. expect(tensor.scalarType).toBe(et.ScalarType.Long); tensor.delete(); @@ -185,12 +194,12 @@ describe("Module", () => { describe("execute", () => { test("add normally", () => { const module = et.Module.load("add.pte"); - const inputs = [et.FloatTensor.ones([1]), et.FloatTensor.ones([1])]; + const inputs = [et.Tensor.ones([1]), et.Tensor.ones([1])]; const output = module.execute("forward", inputs); expect(output.length).toEqual(1); - expect(output[0].getData()).toEqual([2]); - expect(output[0].getSizes()).toEqual([1]); + expect(output[0].data).toEqual([2]); + expect(output[0].sizes).toEqual([1]); inputs.forEach((input) => input.delete()); output.forEach((output) => output.delete()); @@ -199,12 +208,12 @@ describe("Module", () => { test("add_mul normally", () => { const module = et.Module.load("add_mul.pte"); - const inputs = [et.FloatTensor.ones([2, 2]), et.FloatTensor.ones([2, 2]), et.FloatTensor.ones([2, 2])]; + const inputs = [et.Tensor.ones([2, 2]), et.Tensor.ones([2, 2]), et.Tensor.ones([2, 2])]; const output = module.execute("forward", inputs); expect(output.length).toEqual(1); - expect(output[0].getData()).toEqual([3, 3, 3, 3]); - expect(output[0].getSizes()).toEqual([2, 2]); + expect(output[0].data).toEqual([3, 3, 3, 3]); + expect(output[0].sizes).toEqual([2, 2]); inputs.forEach((input) => input.delete()); output.forEach((output) => output.delete()); @@ -213,12 +222,12 @@ describe("Module", () => { test("forward directly", () => { const module = et.Module.load("add_mul.pte"); - const inputs = [et.FloatTensor.ones([2, 2]), et.FloatTensor.ones([2, 2]), et.FloatTensor.ones([2, 2])]; + const inputs = [et.Tensor.ones([2, 2]), et.Tensor.ones([2, 2]), et.Tensor.ones([2, 2])]; const output = module.forward(inputs); expect(output.length).toEqual(1); - expect(output[0].getData()).toEqual([3, 3, 3, 3]); - expect(output[0].getSizes()).toEqual([2, 2]); + expect(output[0].data).toEqual([3, 3, 3, 3]); + expect(output[0].sizes).toEqual([2, 2]); inputs.forEach((input) => input.delete()); output.forEach((output) => output.delete()); @@ -227,7 +236,7 @@ describe("Module", () => { test("wrong number of inputs", () => { const module = et.Module.load("add_mul.pte"); - const inputs = [et.FloatTensor.ones([2, 2]), et.FloatTensor.ones([2, 2])]; + const inputs = [et.Tensor.ones([2, 2]), et.Tensor.ones([2, 2])]; expect(() => module.execute("forward", inputs)).toThrow(); inputs.forEach((input) => input.delete()); @@ -236,7 +245,7 @@ describe("Module", () => { test("wrong input size", () => { const module = et.Module.load("add.pte"); - const inputs = [et.FloatTensor.ones([2, 1]), et.FloatTensor.ones([2, 1])]; + const inputs = [et.Tensor.ones([2, 1]), et.Tensor.ones([2, 1])]; expect(() => module.execute("forward", inputs)).toThrow(); inputs.forEach((input) => input.delete()); @@ -245,7 +254,7 @@ describe("Module", () => { test("wrong input type", () => { const module = et.Module.load("add.pte"); - const inputs = [et.FloatTensor.ones([1]), et.LongTensor.ones([1])]; + const inputs = [et.Tensor.ones([1]), et.Tensor.ones([1], et.ScalarType.Long)]; expect(() => module.execute("forward", inputs)).toThrow(); inputs.forEach((input) => input.delete()); @@ -254,7 +263,7 @@ describe("Module", () => { test("method does not exist", () => { const module = et.Module.load("add.pte"); - const inputs = [et.FloatTensor.ones([1]), et.FloatTensor.ones([1])]; + const inputs = [et.Tensor.ones([1]), et.Tensor.ones([1])]; expect(() => module.execute("does_not_exist", inputs)).toThrow(); inputs.forEach((input) => input.delete()); @@ -263,19 +272,19 @@ describe("Module", () => { test("output tensor can be reused", () => { const module = et.Module.load("add_mul.pte"); - const inputs = [et.FloatTensor.ones([2, 2]), et.FloatTensor.ones([2, 2]), et.FloatTensor.ones([2, 2])]; + const inputs = [et.Tensor.ones([2, 2]), et.Tensor.ones([2, 2]), et.Tensor.ones([2, 2])]; const output = module.forward(inputs); expect(output.length).toEqual(1); - expect(output[0].getData()).toEqual([3, 3, 3, 3]); - expect(output[0].getSizes()).toEqual([2, 2]); + expect(output[0].data).toEqual([3, 3, 3, 3]); + expect(output[0].sizes).toEqual([2, 2]); const inputs2 = [output[0], output[0], output[0]]; const output2 = module.forward(inputs2); expect(output2.length).toEqual(1); - expect(output2[0].getData()).toEqual([21, 21, 21, 21]); - expect(output2[0].getSizes()).toEqual([2, 2]); + expect(output2[0].data).toEqual([21, 21, 21, 21]); + expect(output2[0].sizes).toEqual([2, 2]); inputs.forEach((input) => input.delete()); output.forEach((output) => output.delete()); diff --git a/extension/wasm/wasm_bindings.cpp b/extension/wasm/wasm_bindings.cpp index 67d159338b1..d7373ab17f4 100644 --- a/extension/wasm/wasm_bindings.cpp +++ b/extension/wasm/wasm_bindings.cpp @@ -81,14 +81,26 @@ inline void assert_valid_numel( data.size()); } -// Base class for all JS Tensor types. Subclasses are not exposed to JS. -class JsBaseTensor { +class JsTensor { public: - virtual ~JsBaseTensor() = default; + JsTensor() = delete; + JsTensor(const JsTensor&) = delete; + JsTensor& operator=(const JsTensor&) = delete; + JsTensor(JsTensor&&) = default; + JsTensor& operator=(JsTensor&&) = default; - virtual const Tensor& get_tensor() = 0; - virtual ScalarType get_scalar_type() const = 0; - val_array get_data() { + explicit JsTensor(TensorPtr tensor) : tensor_(std::move(tensor)) {} + explicit JsTensor(Tensor&& tensor) + : tensor_(std::make_shared(tensor)) {} + + const Tensor& get_tensor() const { + return *tensor_; + } + + ScalarType get_scalar_type() const { + return tensor_->scalar_type(); + } + val_array get_data() const { switch (get_scalar_type()) { #define JS_CASE_TENSOR_TO_VAL_TYPE(T, NAME) \ case ScalarType::NAME: \ @@ -101,112 +113,98 @@ class JsBaseTensor { TypeError, "Unsupported Tensor type: %d", get_scalar_type()); } } - val_array get_sizes() { + val_array get_sizes() const { return val::array(get_tensor().sizes().begin(), get_tensor().sizes().end()); } -}; -// Tensor that owns its own data. JS only has access to the static methods. -template -class JsTensor final : public JsBaseTensor { - public: - JsTensor(std::vector data, TensorPtr tensor) - : data_(std::move(data)), tensor_(std::move(tensor)) {} - - static std::unique_ptr fill_internal( - const std::vector&& sizes, - T fill_value) { - std::vector data_vec(compute_expected_numel(sizes), fill_value); - TensorPtr tensor = from_blob(data_vec.data(), sizes, S); - return std::make_unique(std::move(data_vec), std::move(tensor)); - } - - static std::unique_ptr full( - val_array sizes, - val fill_value) { + static std::unique_ptr + full(val_array sizes, val fill_value, val type = val::undefined()) { auto sizes_vec = - convertJSArrayToNumberVector(sizes); - return fill_internal(std::move(sizes_vec), fill_value.as()); + convertJSArrayToNumberVector(sizes); + ScalarType scalar_type = + type.isUndefined() ? ScalarType::Float : type.as(); + switch (scalar_type) { +#define JS_CASE_FULL_VECTOR_TYPE(T, NAME) \ + case ScalarType::NAME: { \ + TensorPtr tensor = \ + extension::full(sizes_vec, fill_value.as(), ScalarType::NAME); \ + return std::make_unique(std::move(tensor)); \ } - - static std::unique_ptr zeros(val_array sizes) { - auto sizes_vec = - convertJSArrayToNumberVector(sizes); - return fill_internal(std::move(sizes_vec), 0); + JS_FORALL_SUPPORTED_TENSOR_TYPES(JS_CASE_FULL_VECTOR_TYPE) + default: + THROW_JS_ERROR(TypeError, "Unsupported Tensor type: %d", scalar_type); + } } - static std::unique_ptr ones(val_array sizes) { + static std::unique_ptr zeros( + val_array sizes, + val type = val::undefined()) { auto sizes_vec = - convertJSArrayToNumberVector(sizes); - return fill_internal(std::move(sizes_vec), 1); + convertJSArrayToNumberVector(sizes); + ScalarType scalar_type = + type.isUndefined() ? ScalarType::Float : type.as(); + TensorPtr tensor = extension::zeros(sizes_vec, scalar_type); + return std::make_unique(std::move(tensor)); } - static std::unique_ptr from_array( - val_array data, - val_array sizes) { - return from_array(data, sizes, val::null()); + static std::unique_ptr ones( + val_array sizes, + val type = val::undefined()) { + auto sizes_vec = + convertJSArrayToNumberVector(sizes); + ScalarType scalar_type = + type.isUndefined() ? ScalarType::Float : type.as(); + TensorPtr tensor = extension::ones(sizes_vec, scalar_type); + return std::make_unique(std::move(tensor)); } - static std::unique_ptr from_array( - val_array data, + static std::unique_ptr from_array( val_array sizes, - val_array strides) { - auto data_vec = convertJSArrayToNumberVector(data); + val_array data, + val type = val::undefined(), + val_array dim_order = val::undefined(), + val_array strides = val::undefined()) { auto sizes_vec = - convertJSArrayToNumberVector(sizes); - assert_valid_numel(data_vec, sizes_vec); - - if (strides.isNull()) { - TensorPtr tensor = from_blob(data_vec.data(), std::move(sizes_vec), S); - return std::make_unique(std::move(data_vec), std::move(tensor)); - } - auto strides_vec = - convertJSArrayToNumberVector( - strides); - TensorPtr tensor = from_blob( - data_vec.data(), std::move(sizes_vec), std::move(strides_vec), S); - return std::make_unique(std::move(data_vec), std::move(tensor)); - } - const Tensor& get_tensor() override { - return *tensor_; + convertJSArrayToNumberVector(sizes); + + auto dim_order_vec = dim_order.isUndefined() + ? std::vector() + : convertJSArrayToNumberVector( + dim_order); + auto strides_vec = strides.isUndefined() + ? std::vector() + : convertJSArrayToNumberVector(strides); + + // If type is undefined, infer the type from the data. + // Assume it is a Bigint if not Number. + ScalarType scalar_type = type.isUndefined() + ? (data["length"].as() == 0 || data[0].isNumber() + ? ScalarType::Float + : ScalarType::Long) + : type.as(); + switch (scalar_type) { +#define JS_CASE_FROM_ARRAY_VECTOR_TYPE(T, NAME) \ + case ScalarType::NAME: { \ + auto data_vec = convertJSArrayToNumberVector(data); \ + assert_valid_numel(data_vec, sizes_vec); \ + TensorPtr tensor = make_tensor_ptr( \ + std::move(sizes_vec), \ + std::move(data_vec), \ + std::move(dim_order_vec), \ + std::move(strides_vec), \ + ScalarType::NAME); \ + return std::make_unique(std::move(tensor)); \ } - ScalarType get_scalar_type() const override { - return S; + JS_FORALL_SUPPORTED_TENSOR_TYPES(JS_CASE_FROM_ARRAY_VECTOR_TYPE) + default: + THROW_JS_ERROR(TypeError, "Unsupported Tensor type: %d", scalar_type); + } } private: - std::vector data_; TensorPtr tensor_; }; -#define JS_DECLARE_TENSOR_TYPE(T, NAME) \ - using Js##NAME##Tensor = JsTensor; -JS_FORALL_SUPPORTED_TENSOR_TYPES(JS_DECLARE_TENSOR_TYPE) - -// Tensor that does not own its own data. It is a wrapper around a C++ Tensor. -// This class is not exposed to JS. -class JsOutputTensor final : public JsBaseTensor { - public: - JsOutputTensor() = delete; - JsOutputTensor(const JsOutputTensor&) = delete; - JsOutputTensor& operator=(const JsOutputTensor&) = delete; - JsOutputTensor(JsOutputTensor&&) = default; - JsOutputTensor& operator=(JsOutputTensor&&) = default; - - explicit JsOutputTensor(Tensor tensor) : tensor_(tensor) {} - - const Tensor& get_tensor() override { - return tensor_; - } - - ScalarType get_scalar_type() const override { - return tensor_.scalar_type(); - } - - private: - Tensor tensor_; -}; - // Converts JS value to EValue. EValue to_evalue(val v) { if (v.isNull()) { @@ -223,7 +221,7 @@ EValue to_evalue(val v) { return EValue(v.as()); } else if (type_str == "object") { // If it is an object, assume it is a tensor. - return EValue(v.as().get_tensor()); + return EValue(v.as().get_tensor()); } THROW_JS_ERROR( TypeError, "Unsupported JavaScript type: %s", type_str.c_str()); @@ -242,8 +240,8 @@ val to_val(EValue v) { return val(v.toBool()); } else if (v.isTensor()) { Tensor tensor = v.toTensor(); - std::unique_ptr wrapper = - std::make_unique(std::move(tensor)); + std::unique_ptr wrapper = + std::make_unique(std::move(tensor)); return val(std::move(wrapper)); } else { char tag_buf[32]; @@ -418,10 +416,14 @@ EMSCRIPTEN_BINDINGS(WasmBindings) { .function("getMethodMeta", &JsModule::get_method_meta) .function("execute", &JsModule::execute) .function("forward", &JsModule::forward); - class_("Tensor") - .property("scalarType", &JsBaseTensor::get_scalar_type) - .function("getData", &JsBaseTensor::get_data) - .function("getSizes", &JsBaseTensor::get_sizes); + class_("Tensor") + .class_function("zeros", &JsTensor::zeros) + .class_function("ones", &JsTensor::ones) + .class_function("full", &JsTensor::full) + .class_function("fromArray", &JsTensor::from_array) + .property("scalarType", &JsTensor::get_scalar_type) + .property("data", &JsTensor::get_data) + .property("sizes", &JsTensor::get_sizes); value_object("TensorInfo") .field("sizes", &JsTensorInfo::sizes) .field("dimOrder", &JsTensorInfo::dim_order) @@ -441,21 +443,6 @@ EMSCRIPTEN_BINDINGS(WasmBindings) { &JsMethodMeta::memory_planned_buffer_sizes) .field("backends", &JsMethodMeta::backends) .field("numInstructions", &JsMethodMeta::num_instructions); - -#define JS_DECLARE_TENSOR_BINDINGS(T, NAME) \ - class_(#NAME "Tensor") \ - .class_function("zeros", &Js##NAME##Tensor::zeros) \ - .class_function("ones", &Js##NAME##Tensor::ones) \ - .class_function("full", &Js##NAME##Tensor::full) \ - .class_function( \ - "fromArray", \ - select_overload(val, val, val)>( \ - &Js##NAME##Tensor::from_array)) \ - .class_function( \ - "fromArray", \ - select_overload(val, val)>( \ - &Js##NAME##Tensor::from_array)); - JS_FORALL_SUPPORTED_TENSOR_TYPES(JS_DECLARE_TENSOR_BINDINGS) } } // namespace wasm From 79d2769df33a549cc58abc0d1ef51f772b1d3cbb Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Fri, 18 Jul 2025 15:56:46 -0700 Subject: [PATCH 11/26] Added loading from Uint8Array --- extension/wasm/wasm_bindings.cpp | 31 ++++++++++++++++--- .../{build_wasm.sh => build_wasm_tests.sh} | 0 2 files changed, 27 insertions(+), 4 deletions(-) rename scripts/{build_wasm.sh => build_wasm_tests.sh} (100%) diff --git a/extension/wasm/wasm_bindings.cpp b/extension/wasm/wasm_bindings.cpp index d7373ab17f4..b69689d5cb4 100644 --- a/extension/wasm/wasm_bindings.cpp +++ b/extension/wasm/wasm_bindings.cpp @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -38,6 +39,7 @@ using namespace emscripten; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using ::executorch::extension::BufferDataLoader; using ::executorch::runtime::Error; using ::executorch::runtime::EValue; using ::executorch::runtime::Result; @@ -333,10 +335,29 @@ class JsModule final { JsModule& operator=(JsModule&&) = default; explicit JsModule(std::unique_ptr module) - : module_(std::move(module)) {} - - static std::unique_ptr load(const std::string& path) { - return std::make_unique(std::make_unique(path)); + : buffer_(0), module_(std::move(module)) {} + + explicit JsModule(std::vector buffer, std::unique_ptr module) + : buffer_(std::move(buffer)), module_(std::move(module)) {} + + static std::unique_ptr load(val data) { + if (data.isString()) { + return std::make_unique( + std::make_unique(data.as())); + } else if (data.instanceof (val::global("Uint8Array"))) { + size_t length = data["length"].as(); + std::vector buffer(length); + val memory_view = val(typed_memory_view(length, buffer.data())); + memory_view.call("set", data); + auto loader = std::make_unique(buffer.data(), length); + return std::make_unique( + std::move(buffer), std::make_unique(std::move(loader))); + } else { + THROW_JS_ERROR( + TypeError, + "Unsupported data type: %s", + data.typeOf().as().c_str()); + } } val get_methods() { @@ -396,6 +417,8 @@ class JsModule final { } private: + // If loaded from a buffer, keeps it alive for the lifetime of the module. + std::vector buffer_; std::unique_ptr module_; }; diff --git a/scripts/build_wasm.sh b/scripts/build_wasm_tests.sh similarity index 100% rename from scripts/build_wasm.sh rename to scripts/build_wasm_tests.sh From a83868587056939d185154c6895c3596097a8f4a Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Fri, 18 Jul 2025 17:34:34 -0700 Subject: [PATCH 12/26] Added CI tests --- .ci/scripts/setup-emscripten.sh | 8 +++++++ .github/workflows/pull.yml | 39 +++++++++++++++++++++++++++++++-- 2 files changed, 45 insertions(+), 2 deletions(-) diff --git a/.ci/scripts/setup-emscripten.sh b/.ci/scripts/setup-emscripten.sh index 637f3cbda0d..36b9b4df14b 100644 --- a/.ci/scripts/setup-emscripten.sh +++ b/.ci/scripts/setup-emscripten.sh @@ -1,6 +1,13 @@ set -ex +# need version >= 17 +install_node() { + curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.3/install.sh | bash + source "$HOME/.nvm/nvm.sh" + nvm install 22 +} + install_emscripten() { git clone https://github.com/emscripten-core/emsdk.git pushd emsdk || return @@ -10,4 +17,5 @@ install_emscripten() { popd || return } +install_node install_emscripten diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml index 88a82d1a4b3..ba3de824f94 100644 --- a/.github/workflows/pull.yml +++ b/.github/workflows/pull.yml @@ -763,6 +763,41 @@ jobs: # Test selective build PYTHON_EXECUTABLE=python bash examples/wasm/test_build_wasm.sh + unittest-wasm-bindings: + name: unittest-wasm-bindings + uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main + permissions: + id-token: write + contents: read + strategy: + fail-fast: false + with: + runner: linux.2xlarge + docker-image: executorch-ubuntu-22.04-clang12 + submodules: 'recursive' + ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + timeout: 90 + script: | + # The generic Linux job chooses to use base env, not the one setup by the image + CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") + conda activate "${CONDA_ENV}" + + BUILD_TOOL="cmake" + PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool "${BUILD_TOOL}" + + # Install Node.js and Emscripten + source .ci/scripts/setup-emscripten.sh + + # Test selective build + scripts/build_wasm_tests.sh + + # Install Jest + cd cmake-out-wasm/extension/wasm/test + npm install --save-dev jest + + # Run unit test + npm test + unittest-nxp-neutron: uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main permissions: @@ -791,8 +826,8 @@ jobs: # Run pytest PYTHON_EXECUTABLE=python bash backends/nxp/run_unittests.sh - - # Run aot example: + + # Run aot example: PYTHON_EXECUTABLE=python bash examples/nxp/run_aot_example.sh From e63961c9a8a57af11b587bc9a947085134cb97cb Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Sat, 19 Jul 2025 16:58:35 -0700 Subject: [PATCH 13/26] Fix CI --- .github/workflows/pull.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml index c4bdc1d8c18..7afbf5a6348 100644 --- a/.github/workflows/pull.yml +++ b/.github/workflows/pull.yml @@ -789,7 +789,7 @@ jobs: source .ci/scripts/setup-emscripten.sh # Test selective build - scripts/build_wasm_tests.sh + bash scripts/build_wasm_tests.sh # Install Jest cd cmake-out-wasm/extension/wasm/test From bfbbfc556050cd4ff6b59ff0f2139a9550a4f23a Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Sat, 19 Jul 2025 18:07:02 -0700 Subject: [PATCH 14/26] Build now uses --post-js to test load from buffer --- extension/wasm/test/CMakeLists.txt | 41 ++++++++++--------- .../{executorch_wasm.test.js => unittests.js} | 10 ++++- 2 files changed, 30 insertions(+), 21 deletions(-) rename extension/wasm/test/{executorch_wasm.test.js => unittests.js} (97%) diff --git a/extension/wasm/test/CMakeLists.txt b/extension/wasm/test/CMakeLists.txt index 7aa3f23895a..04acd99d326 100644 --- a/extension/wasm/test/CMakeLists.txt +++ b/extension/wasm/test/CMakeLists.txt @@ -27,22 +27,6 @@ add_custom_target( ${MODELS_DIR}/add.pte ) -add_executable(executorch_wasm_test_lib) -target_link_libraries(executorch_wasm_test_lib PUBLIC executorch_wasm) -target_link_options( - executorch_wasm_test_lib PUBLIC --embed-file "${MODELS_DIR}@/" -) -add_dependencies(executorch_wasm_test_lib executorch_wasm_test_models) - -add_custom_command( - OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/executorch_wasm.test.js - COMMAND - ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/executorch_wasm.test.js - ${CMAKE_CURRENT_BINARY_DIR}/executorch_wasm.test.js - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/executorch_wasm.test.js - COMMENT "Copying executorch_wasm.test.js to build output directory" -) - add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/package.json COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/package.json @@ -52,8 +36,25 @@ add_custom_command( ) add_custom_target( - executorch_wasm_tests - DEPENDS executorch_wasm_test_lib - ${CMAKE_CURRENT_BINARY_DIR}/executorch_wasm.test.js - ${CMAKE_CURRENT_BINARY_DIR}/package.json + executorch_wasm_test_package_json + DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/package.json +) + +add_executable(executorch_wasm_tests) +target_link_libraries(executorch_wasm_tests PUBLIC executorch_wasm) +target_link_options( + executorch_wasm_tests PUBLIC --embed-file "${MODELS_DIR}@/" --post-js + ${CMAKE_CURRENT_SOURCE_DIR}/unittests.js +) +set_target_properties( + executorch_wasm_tests PROPERTIES OUTPUT_NAME "executorch_wasm.test" +) +set_property( + TARGET executorch_wasm_tests + APPEND + PROPERTY LINK_DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/unittests.js +) +add_dependencies( + executorch_wasm_tests executorch_wasm_test_models + executorch_wasm_test_package_json ) diff --git a/extension/wasm/test/executorch_wasm.test.js b/extension/wasm/test/unittests.js similarity index 97% rename from extension/wasm/test/executorch_wasm.test.js rename to extension/wasm/test/unittests.js index e230895838b..49e39f9bf2b 100644 --- a/extension/wasm/test/executorch_wasm.test.js +++ b/extension/wasm/test/unittests.js @@ -8,7 +8,7 @@ let et; beforeAll((done) => { - et = require("./executorch_wasm_test_lib"); + et = Module; et.onRuntimeInitialized = () => { done(); } @@ -94,6 +94,14 @@ describe("Module", () => { module.delete(); }); + test("load from buffer", () => { + const data = FS.readFile('add.pte'); + const module = et.Module.load(data); + const methods = module.getMethods(); + expect(methods).toEqual(["forward"]); + module.delete(); + }); + describe("MethodMeta", () => { test("name is forward", () => { const module = et.Module.load("add_mul.pte"); From b274daeca8da6e68a58142b5e83ef1a7993b9671 Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Thu, 24 Jul 2025 10:34:28 -0700 Subject: [PATCH 15/26] Changed JsTensor data to return memory view --- extension/wasm/test/unittests.js | 22 +++++++++++----------- extension/wasm/wasm_bindings.cpp | 7 +++---- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/extension/wasm/test/unittests.js b/extension/wasm/test/unittests.js index 49e39f9bf2b..619bed9666f 100644 --- a/extension/wasm/test/unittests.js +++ b/extension/wasm/test/unittests.js @@ -17,21 +17,21 @@ beforeAll((done) => { describe("Tensor", () => { test("ones", () => { const tensor = et.Tensor.ones([2, 2]); - expect(tensor.data).toEqual([1, 1, 1, 1]); + expect(tensor.data).toEqual(new Float32Array([1, 1, 1, 1])); expect(tensor.sizes).toEqual([2, 2]); tensor.delete(); }); test("zeros", () => { const tensor = et.Tensor.zeros([2, 2]); - expect(tensor.data).toEqual([0, 0, 0, 0]); + expect(tensor.data).toEqual(new Float32Array([0, 0, 0, 0])); expect(tensor.sizes).toEqual([2, 2]); tensor.delete(); }); test("fromArray", () => { const tensor = et.Tensor.fromArray([2, 2], [1, 2, 3, 4]); - expect(tensor.data).toEqual([1, 2, 3, 4]); + expect(tensor.data).toEqual(new Float32Array([1, 2, 3, 4])); expect(tensor.sizes).toEqual([2, 2]); tensor.delete(); }); @@ -42,7 +42,7 @@ describe("Tensor", () => { test("full", () => { const tensor = et.Tensor.full([2, 2], 3); - expect(tensor.data).toEqual([3, 3, 3, 3]); + expect(tensor.data).toEqual(new Float32Array([3, 3, 3, 3])); expect(tensor.sizes).toEqual([2, 2]); tensor.delete(); }); @@ -56,7 +56,7 @@ describe("Tensor", () => { test("long tensor", () => { const tensor = et.Tensor.ones([2, 2], et.ScalarType.Long); - expect(tensor.data).toEqual([1n, 1n, 1n, 1n]); + expect(tensor.data).toEqual(new BigInt64Array([1n, 1n, 1n, 1n])); expect(tensor.sizes).toEqual([2, 2]); // ScalarType can only be checked by strict equality. expect(tensor.scalarType).toBe(et.ScalarType.Long); @@ -66,7 +66,7 @@ describe("Tensor", () => { test("infer long tensor", () => { // Number cannot be converted to Long, so we use BigInt instead. const tensor = et.Tensor.fromArray([2, 2], [1n, 2n, 3n, 4n]); - expect(tensor.data).toEqual([1n, 2n, 3n, 4n]); + expect(tensor.data).toEqual(new BigInt64Array([1n, 2n, 3n, 4n])); expect(tensor.sizes).toEqual([2, 2]); // ScalarType can only be checked by strict equality. expect(tensor.scalarType).toBe(et.ScalarType.Long); @@ -206,7 +206,7 @@ describe("Module", () => { const output = module.execute("forward", inputs); expect(output.length).toEqual(1); - expect(output[0].data).toEqual([2]); + expect(output[0].data).toEqual(new Float32Array([2])); expect(output[0].sizes).toEqual([1]); inputs.forEach((input) => input.delete()); @@ -220,7 +220,7 @@ describe("Module", () => { const output = module.execute("forward", inputs); expect(output.length).toEqual(1); - expect(output[0].data).toEqual([3, 3, 3, 3]); + expect(output[0].data).toEqual(new Float32Array([3, 3, 3, 3])); expect(output[0].sizes).toEqual([2, 2]); inputs.forEach((input) => input.delete()); @@ -234,7 +234,7 @@ describe("Module", () => { const output = module.forward(inputs); expect(output.length).toEqual(1); - expect(output[0].data).toEqual([3, 3, 3, 3]); + expect(output[0].data).toEqual(new Float32Array([3, 3, 3, 3])); expect(output[0].sizes).toEqual([2, 2]); inputs.forEach((input) => input.delete()); @@ -284,14 +284,14 @@ describe("Module", () => { const output = module.forward(inputs); expect(output.length).toEqual(1); - expect(output[0].data).toEqual([3, 3, 3, 3]); + expect(output[0].data).toEqual(new Float32Array([3, 3, 3, 3])); expect(output[0].sizes).toEqual([2, 2]); const inputs2 = [output[0], output[0], output[0]]; const output2 = module.forward(inputs2); expect(output2.length).toEqual(1); - expect(output2[0].data).toEqual([21, 21, 21, 21]); + expect(output2[0].data).toEqual(new Float32Array([21, 21, 21, 21])); expect(output2[0].sizes).toEqual([2, 2]); inputs.forEach((input) => input.delete()); diff --git a/extension/wasm/wasm_bindings.cpp b/extension/wasm/wasm_bindings.cpp index b69689d5cb4..dd6250bc117 100644 --- a/extension/wasm/wasm_bindings.cpp +++ b/extension/wasm/wasm_bindings.cpp @@ -102,13 +102,12 @@ class JsTensor { ScalarType get_scalar_type() const { return tensor_->scalar_type(); } - val_array get_data() const { + val get_data() const { switch (get_scalar_type()) { #define JS_CASE_TENSOR_TO_VAL_TYPE(T, NAME) \ case ScalarType::NAME: \ - return val::array( \ - get_tensor().data_ptr(), \ - get_tensor().data_ptr() + get_tensor().numel()); + return val( \ + typed_memory_view(get_tensor().numel(), get_tensor().data_ptr())); JS_FORALL_SUPPORTED_TENSOR_TYPES(JS_CASE_TENSOR_TO_VAL_TYPE) default: THROW_JS_ERROR( From 7de34b90c31d315843f2b7e95c7e1e3ebce9b614 Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Thu, 24 Jul 2025 11:12:21 -0700 Subject: [PATCH 16/26] Applied suggestions --- extension/wasm/CMakeLists.txt | 4 ++-- extension/wasm/wasm_bindings.cpp | 17 +++++++++++------ 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/extension/wasm/CMakeLists.txt b/extension/wasm/CMakeLists.txt index 93c7e283b49..5d51e3fc859 100644 --- a/extension/wasm/CMakeLists.txt +++ b/extension/wasm/CMakeLists.txt @@ -9,7 +9,7 @@ # cmake-format -i CMakeLists.txt # ~~~ -cmake_minimum_required(VERSION 3.24) +cmake_minimum_required(VERSION 3.29) project(executorch_wasm) @@ -27,7 +27,7 @@ if(NOT EXECUTORCH_ROOT) endif() include(${EXECUTORCH_ROOT}/tools/cmake/Utils.cmake) -set(_common_compile_options -Wno-deprecated-declarations -fPIC) +set(_common_compile_options -Wno-deprecated-declarations -fPIC -Wall -Werror) set(_common_include_directories ${EXECUTORCH_ROOT}/..) set(link_libraries) diff --git a/extension/wasm/wasm_bindings.cpp b/extension/wasm/wasm_bindings.cpp index dd6250bc117..b3b519c545c 100644 --- a/extension/wasm/wasm_bindings.cpp +++ b/extension/wasm/wasm_bindings.cpp @@ -14,7 +14,7 @@ #define THROW_JS_ERROR(errorType, message, ...) \ ({ \ - char msg_buf[128]; \ + char msg_buf[256]; \ snprintf(msg_buf, sizeof(msg_buf), message, ##__VA_ARGS__); \ EM_ASM(throw new errorType(UTF8ToString($0)), msg_buf); \ __builtin_unreachable(); \ @@ -96,18 +96,20 @@ class JsTensor { : tensor_(std::make_shared(tensor)) {} const Tensor& get_tensor() const { + THROW_IF_FALSE(tensor_, "Tensor is null"); return *tensor_; } ScalarType get_scalar_type() const { + THROW_IF_FALSE(tensor_, "Tensor is null"); return tensor_->scalar_type(); } val get_data() const { switch (get_scalar_type()) { -#define JS_CASE_TENSOR_TO_VAL_TYPE(T, NAME) \ - case ScalarType::NAME: \ - return val( \ - typed_memory_view(get_tensor().numel(), get_tensor().data_ptr())); +#define JS_CASE_TENSOR_TO_VAL_TYPE(T, NAME) \ + case ScalarType::NAME: \ + THROW_IF_FALSE(tensor_->data_ptr(), "Tensor data is null"); \ + return val(typed_memory_view(tensor_->numel(), tensor_->data_ptr())); JS_FORALL_SUPPORTED_TENSOR_TYPES(JS_CASE_TENSOR_TO_VAL_TYPE) default: THROW_JS_ERROR( @@ -246,7 +248,7 @@ val to_val(EValue v) { return val(std::move(wrapper)); } else { char tag_buf[32]; - runtime::tag_to_string(v.tag, tag_buf, 32); + runtime::tag_to_string(v.tag, tag_buf, sizeof(tag_buf)); THROW_JS_ERROR(TypeError, "Unsupported EValue type: %s", tag_buf); } } @@ -340,6 +342,9 @@ class JsModule final { : buffer_(std::move(buffer)), module_(std::move(module)) {} static std::unique_ptr load(val data) { + if (data.isNull() || data.isUndefined()) { + THROW_JS_ERROR(TypeError, "Data cannot be null or undefined"); + } if (data.isString()) { return std::make_unique( std::make_unique(data.as())); From 4aab20fad9e90303e03a50051186aeeda5bf9bc9 Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Thu, 24 Jul 2025 11:56:50 -0700 Subject: [PATCH 17/26] Add making JsTensor from iterator --- extension/wasm/test/unittests.js | 10 ++++++ extension/wasm/wasm_bindings.cpp | 57 ++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+) diff --git a/extension/wasm/test/unittests.js b/extension/wasm/test/unittests.js index 619bed9666f..71b0e943251 100644 --- a/extension/wasm/test/unittests.js +++ b/extension/wasm/test/unittests.js @@ -36,6 +36,16 @@ describe("Tensor", () => { tensor.delete(); }); + test("fromGenerator", () => { + function* generator() { + yield* [1, 2, 3, 4]; + } + const tensor = et.Tensor.fromIter([2, 2], generator()); + expect(tensor.data).toEqual(new Float32Array([1, 2, 3, 4])); + expect(tensor.sizes).toEqual([2, 2]); + tensor.delete(); + }); + test("fromArray wrong size", () => { expect(() => et.Tensor.fromArray([3, 2], [1, 2, 3, 4])).toThrow(); }); diff --git a/extension/wasm/wasm_bindings.cpp b/extension/wasm/wasm_bindings.cpp index b3b519c545c..6de5bfb140a 100644 --- a/extension/wasm/wasm_bindings.cpp +++ b/extension/wasm/wasm_bindings.cpp @@ -83,6 +83,19 @@ inline void assert_valid_numel( data.size()); } +template +std::vector convertJSGeneratorToNumberVector(val generator) { + std::vector data; + while (true) { + val next = generator.call("next"); + if (next["done"].as()) { + break; + } + data.push_back(next["value"].as()); + } + return data; +} + class JsTensor { public: JsTensor() = delete; @@ -204,6 +217,49 @@ class JsTensor { } } + static std::unique_ptr from_iter( + val_array sizes, + val_array data, + val type = val::undefined(), + val_array dim_order = val::undefined(), + val_array strides = val::undefined()) { + auto sizes_vec = + convertJSArrayToNumberVector(sizes); + + auto dim_order_vec = dim_order.isUndefined() + ? std::vector() + : convertJSArrayToNumberVector( + dim_order); + auto strides_vec = strides.isUndefined() + ? std::vector() + : convertJSArrayToNumberVector(strides); + + // If type is undefined, infer the type from the data. + // Assume it is a Bigint if not Number. + ScalarType scalar_type = type.isUndefined() + ? (data["length"].as() == 0 || data[0].isNumber() + ? ScalarType::Float + : ScalarType::Long) + : type.as(); + switch (scalar_type) { +#define JS_CASE_FROM_ITER_VECTOR_TYPE(T, NAME) \ + case ScalarType::NAME: { \ + auto data_vec = convertJSGeneratorToNumberVector(data); \ + assert_valid_numel(data_vec, sizes_vec); \ + TensorPtr tensor = make_tensor_ptr( \ + std::move(sizes_vec), \ + std::move(data_vec), \ + std::move(dim_order_vec), \ + std::move(strides_vec), \ + ScalarType::NAME); \ + return std::make_unique(std::move(tensor)); \ + } + JS_FORALL_SUPPORTED_TENSOR_TYPES(JS_CASE_FROM_ITER_VECTOR_TYPE) + default: + THROW_JS_ERROR(TypeError, "Unsupported Tensor type: %d", scalar_type); + } + } + private: TensorPtr tensor_; }; @@ -448,6 +504,7 @@ EMSCRIPTEN_BINDINGS(WasmBindings) { .class_function("ones", &JsTensor::ones) .class_function("full", &JsTensor::full) .class_function("fromArray", &JsTensor::from_array) + .class_function("fromIter", &JsTensor::from_iter) .property("scalarType", &JsTensor::get_scalar_type) .property("data", &JsTensor::get_data) .property("sizes", &JsTensor::get_sizes); From da58ab2feec24f6a2af80298eb0bf84c2eba1be1 Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Thu, 24 Jul 2025 12:56:47 -0700 Subject: [PATCH 18/26] to_evalue check for undefined --- extension/wasm/wasm_bindings.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/extension/wasm/wasm_bindings.cpp b/extension/wasm/wasm_bindings.cpp index 6de5bfb140a..1394c6903b3 100644 --- a/extension/wasm/wasm_bindings.cpp +++ b/extension/wasm/wasm_bindings.cpp @@ -266,6 +266,9 @@ class JsTensor { // Converts JS value to EValue. EValue to_evalue(val v) { + if (v.isUndefined()) { + THROW_JS_ERROR(TypeError, "Value cannot be undefined"); + } if (v.isNull()) { return EValue(); } else if (v.isNumber()) { From fb5ae688e766e9b8577588f4ac3889daf7585e2f Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Thu, 24 Jul 2025 14:03:17 -0700 Subject: [PATCH 19/26] Renamed option to build unit tests --- extension/wasm/CMakeLists.txt | 2 +- scripts/build_wasm_tests.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/extension/wasm/CMakeLists.txt b/extension/wasm/CMakeLists.txt index 5d51e3fc859..f6095c144ec 100644 --- a/extension/wasm/CMakeLists.txt +++ b/extension/wasm/CMakeLists.txt @@ -51,6 +51,6 @@ target_include_directories( ) target_link_libraries(executorch_wasm PUBLIC ${link_libraries}) -if(BUILD_TESTING) +if(EXECUTORCH_BUILD_WASM_TESTS) add_subdirectory(test) endif() diff --git a/scripts/build_wasm_tests.sh b/scripts/build_wasm_tests.sh index fca5cc0c3d5..0a6b6f0b243 100644 --- a/scripts/build_wasm_tests.sh +++ b/scripts/build_wasm_tests.sh @@ -15,7 +15,7 @@ emcmake cmake . -DEXECUTORCH_BUILD_WASM=ON \ -DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \ -DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \ -DEXECUTORCH_BUILD_EXTENSION_RUNNER_UTIL=ON \ - -DBUILD_TESTING=ON \ + -DEXECUTORCH_BUILD_WASM_TESTS=ON \ -DCMAKE_BUILD_TYPE=Release \ -B"${CMAKE_OUT}" From f4e7144e12524fc1f8378ae4629c82024bb77868 Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Thu, 24 Jul 2025 14:25:31 -0700 Subject: [PATCH 20/26] Marked wasm bindings API as experimental --- extension/wasm/wasm_bindings.cpp | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/extension/wasm/wasm_bindings.cpp b/extension/wasm/wasm_bindings.cpp index 1394c6903b3..f9ad7911c3e 100644 --- a/extension/wasm/wasm_bindings.cpp +++ b/extension/wasm/wasm_bindings.cpp @@ -96,7 +96,10 @@ std::vector convertJSGeneratorToNumberVector(val generator) { return data; } -class JsTensor { +/** + * EXPERIMENTAL: JavaScript wrapper for ExecuTorch Tensor. + */ +class ET_EXPERIMENTAL JsTensor { public: JsTensor() = delete; JsTensor(const JsTensor&) = delete; @@ -312,8 +315,10 @@ val to_val(EValue v) { } } -// JS object containing tensor metadata. -struct JsTensorInfo { +/** + * EXPERIMENTAL: JavaScript object containing tensor metadata. + */ +struct ET_EXPERIMENTAL JsTensorInfo { val_array sizes; val_array dim_order; ScalarType scalar_type; @@ -332,8 +337,10 @@ struct JsTensorInfo { } }; -// JS object containing method metadata. -struct JsMethodMeta { +/** + * EXPERIMENTAL: JavaScript object containing method metadata. + */ +struct ET_EXPERIMENTAL JsMethodMeta { std::string name; val_array input_tags; val_array input_tensor_meta; @@ -385,8 +392,10 @@ struct JsMethodMeta { } }; -// Wrapper around extension/Module. -class JsModule final { +/** + * EXPERIMENTAL: Wrapper around extension/Module for JavaScript. + */ +class ET_EXPERIMENTAL JsModule final { public: JsModule() = delete; JsModule(const JsModule&) = delete; From aa48efb507d65e4b979d2f314c1dd4f57fb66160 Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Thu, 24 Jul 2025 17:03:37 -0700 Subject: [PATCH 21/26] Set enum names for convenience --- extension/wasm/test/unittests.js | 26 ++++++++++++++------------ extension/wasm/wasm_bindings.cpp | 9 +++++++++ 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/extension/wasm/test/unittests.js b/extension/wasm/test/unittests.js index 71b0e943251..3a2c4fe7fd8 100644 --- a/extension/wasm/test/unittests.js +++ b/extension/wasm/test/unittests.js @@ -59,8 +59,7 @@ describe("Tensor", () => { test("scalar type", () => { const tensor = et.Tensor.ones([2, 2]); - // ScalarType can only be checked by strict equality. - expect(tensor.scalarType).toBe(et.ScalarType.Float); + expect(tensor.scalarType).toEqual(et.ScalarType.Float); tensor.delete(); }); @@ -68,8 +67,7 @@ describe("Tensor", () => { const tensor = et.Tensor.ones([2, 2], et.ScalarType.Long); expect(tensor.data).toEqual(new BigInt64Array([1n, 1n, 1n, 1n])); expect(tensor.sizes).toEqual([2, 2]); - // ScalarType can only be checked by strict equality. - expect(tensor.scalarType).toBe(et.ScalarType.Long); + expect(tensor.scalarType).toEqual(et.ScalarType.Long); tensor.delete(); }); @@ -78,8 +76,7 @@ describe("Tensor", () => { const tensor = et.Tensor.fromArray([2, 2], [1n, 2n, 3n, 4n]); expect(tensor.data).toEqual(new BigInt64Array([1n, 2n, 3n, 4n])); expect(tensor.sizes).toEqual([2, 2]); - // ScalarType can only be checked by strict equality. - expect(tensor.scalarType).toBe(et.ScalarType.Long); + expect(tensor.scalarType).toEqual(et.ScalarType.Long); tensor.delete(); }); }); @@ -124,8 +121,7 @@ describe("Module", () => { const module = et.Module.load("add_mul.pte"); const methodMeta = module.getMethodMeta("forward"); expect(methodMeta.inputTags.length).toEqual(3); - // Tags can only be checked by strict equality. - methodMeta.inputTags.forEach((tag) => expect(tag).toBe(et.Tag.Tensor)); + expect(methodMeta.inputTags).toEqual([et.Tag.Tensor, et.Tag.Tensor, et.Tag.Tensor]); module.delete(); }); @@ -133,8 +129,7 @@ describe("Module", () => { const module = et.Module.load("add_mul.pte"); const methodMeta = module.getMethodMeta("forward"); expect(methodMeta.outputTags.length).toEqual(1); - // Tags can only be checked by strict equality. - expect(methodMeta.outputTags[0]).toBe(et.Tag.Tensor); + expect(methodMeta.outputTags).toEqual([et.Tag.Tensor]); module.delete(); }); @@ -183,8 +178,7 @@ describe("Module", () => { const module = et.Module.load("add_mul.pte"); const methodMeta = module.getMethodMeta("forward"); methodMeta.inputTensorMeta.forEach((tensorInfo) => { - // ScalarType can only be checked by strict equality. - expect(tensorInfo.scalarType).toBe(et.ScalarType.Float); + expect(tensorInfo.scalarType).toEqual(et.ScalarType.Float); }); module.delete(); }); @@ -311,3 +305,11 @@ describe("Module", () => { }); }); }); + +describe("sanity", () => { + // Emscripten enums are equal by default for some reason. + test("different enums are not equal", () => { + expect(et.ScalarType.Float).not.toEqual(et.ScalarType.Long); + expect(et.Tag.Int).not.toEqual(et.Tag.Double); + }); +}); diff --git a/extension/wasm/wasm_bindings.cpp b/extension/wasm/wasm_bindings.cpp index f9ad7911c3e..7b03a3a1525 100644 --- a/extension/wasm/wasm_bindings.cpp +++ b/extension/wasm/wasm_bindings.cpp @@ -539,6 +539,15 @@ EMSCRIPTEN_BINDINGS(WasmBindings) { &JsMethodMeta::memory_planned_buffer_sizes) .field("backends", &JsMethodMeta::backends) .field("numInstructions", &JsMethodMeta::num_instructions); + +// For some reason Embind doesn't make it easy to get the names of enums. +// Additionally, different enums of the same type are considered to be equal. +// Assigning the name field fixes both of these issues. +#define JS_ASSIGN_SCALAR_TYPE_NAME(T, NAME) \ + EM_ASM(Module.ScalarType.NAME.name = #NAME); + JS_FORALL_SUPPORTED_TENSOR_TYPES(JS_ASSIGN_SCALAR_TYPE_NAME) +#define JS_ASSIGN_TAG_NAME(NAME) EM_ASM(Module.Tag.NAME.name = #NAME); + EXECUTORCH_FORALL_TAGS(JS_ASSIGN_TAG_NAME) } } // namespace wasm From 6f38b89a83b5489c9a40c7f773e723f0068ca153 Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Fri, 25 Jul 2025 11:52:07 -0700 Subject: [PATCH 22/26] Module load from ArrayBuffer --- extension/wasm/test/unittests.js | 10 +++++++++- extension/wasm/wasm_bindings.cpp | 20 +++++++++++++------- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/extension/wasm/test/unittests.js b/extension/wasm/test/unittests.js index 3a2c4fe7fd8..9abdd145f07 100644 --- a/extension/wasm/test/unittests.js +++ b/extension/wasm/test/unittests.js @@ -101,7 +101,7 @@ describe("Module", () => { module.delete(); }); - test("load from buffer", () => { + test("load from Uint8Array", () => { const data = FS.readFile('add.pte'); const module = et.Module.load(data); const methods = module.getMethods(); @@ -109,6 +109,14 @@ describe("Module", () => { module.delete(); }); + test("load from ArrayBuffer", () => { + const data = FS.readFile('add.pte'); + const module = et.Module.load(data.buffer); + const methods = module.getMethods(); + expect(methods).toEqual(["forward"]); + module.delete(); + }); + describe("MethodMeta", () => { test("name is forward", () => { const module = et.Module.load("add_mul.pte"); diff --git a/extension/wasm/wasm_bindings.cpp b/extension/wasm/wasm_bindings.cpp index 7b03a3a1525..71b41e5dde4 100644 --- a/extension/wasm/wasm_bindings.cpp +++ b/extension/wasm/wasm_bindings.cpp @@ -409,6 +409,16 @@ class ET_EXPERIMENTAL JsModule final { explicit JsModule(std::vector buffer, std::unique_ptr module) : buffer_(std::move(buffer)), module_(std::move(module)) {} + static std::unique_ptr load_from_uint8_array(val data) { + size_t length = data["length"].as(); + std::vector buffer(length); + val memory_view = val(typed_memory_view(length, buffer.data())); + memory_view.call("set", data); + auto loader = std::make_unique(buffer.data(), length); + return std::make_unique( + std::move(buffer), std::make_unique(std::move(loader))); + } + static std::unique_ptr load(val data) { if (data.isNull() || data.isUndefined()) { THROW_JS_ERROR(TypeError, "Data cannot be null or undefined"); @@ -417,13 +427,9 @@ class ET_EXPERIMENTAL JsModule final { return std::make_unique( std::make_unique(data.as())); } else if (data.instanceof (val::global("Uint8Array"))) { - size_t length = data["length"].as(); - std::vector buffer(length); - val memory_view = val(typed_memory_view(length, buffer.data())); - memory_view.call("set", data); - auto loader = std::make_unique(buffer.data(), length); - return std::make_unique( - std::move(buffer), std::make_unique(std::move(loader))); + return load_from_uint8_array(data); + } else if (data.instanceof (val::global("ArrayBuffer"))) { + return load_from_uint8_array(val::global("Uint8Array").new_(data)); } else { THROW_JS_ERROR( TypeError, From 5cfbc1d4ffbacf6af987ddbe6e4df9d932aaf786 Mon Sep 17 00:00:00 2001 From: Conan Truong Date: Fri, 25 Jul 2025 11:54:08 -0700 Subject: [PATCH 23/26] Bump PytorchTorch pin to 0725 Summary: Needed for pytorch/pytorch#158580 Differential Revision: D78989384 --- .ci/docker/ci_commit_pins/pytorch.txt | 2 +- install_requirements.py | 2 +- .../portable_type/c10/torch/headeronly/macros/Macros.h | 7 +++++++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.ci/docker/ci_commit_pins/pytorch.txt b/.ci/docker/ci_commit_pins/pytorch.txt index 993fc34f5f5..6305196d2ad 100644 --- a/.ci/docker/ci_commit_pins/pytorch.txt +++ b/.ci/docker/ci_commit_pins/pytorch.txt @@ -1 +1 @@ -2dccff7dcf56b0d168ebfd7ca08bdeca37273c56 +6fc0ad22f0a07b6f38d138861c56a765d5a9bb02 diff --git a/install_requirements.py b/install_requirements.py index a2372a12bb9..978cc8a84b2 100644 --- a/install_requirements.py +++ b/install_requirements.py @@ -71,7 +71,7 @@ def python_is_compatible(): # # NOTE: If you're changing, make the corresponding change in .ci/docker/ci_commit_pins/pytorch.txt # by picking the hash from the same date in https://hud.pytorch.org/hud/pytorch/pytorch/nightly/ -NIGHTLY_VERSION = "dev20250723" +NIGHTLY_VERSION = "dev20250725" def install_requirements(use_pytorch_nightly): diff --git a/runtime/core/portable_type/c10/torch/headeronly/macros/Macros.h b/runtime/core/portable_type/c10/torch/headeronly/macros/Macros.h index 1e07ab0446e..3a4fc393696 100644 --- a/runtime/core/portable_type/c10/torch/headeronly/macros/Macros.h +++ b/runtime/core/portable_type/c10/torch/headeronly/macros/Macros.h @@ -412,6 +412,13 @@ extern SYCL_EXTERNAL void __assert_fail( const char* file, unsigned int line, const char* func); +#elif (defined(__EMSCRIPTEN__)) +// As defined in assert.h in the Emscripten stdlib +_Noreturn void __assert_fail( + const char* expr, + const char* file, + int line, + const char* func); #else // __SYCL_DEVICE_ONLY__ #if (defined(__CUDA_ARCH__) && !(defined(__clang__) && defined(__CUDA__))) // CUDA supports __assert_fail function which are common for both device From 3345fe9b65f7a76bddb8fa09908ea4474bc27545 Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Mon, 28 Jul 2025 13:04:32 -0700 Subject: [PATCH 24/26] Applied suggestions, turned on assertions, added dim order strides to unit tests --- extension/wasm/test/CMakeLists.txt | 7 +- extension/wasm/test/unittests.js | 16 ++ extension/wasm/wasm_bindings.cpp | 259 ++++++++++++++++++++++------- 3 files changed, 221 insertions(+), 61 deletions(-) diff --git a/extension/wasm/test/CMakeLists.txt b/extension/wasm/test/CMakeLists.txt index 04acd99d326..02e4cb444a3 100644 --- a/extension/wasm/test/CMakeLists.txt +++ b/extension/wasm/test/CMakeLists.txt @@ -43,8 +43,13 @@ add_custom_target( add_executable(executorch_wasm_tests) target_link_libraries(executorch_wasm_tests PUBLIC executorch_wasm) target_link_options( - executorch_wasm_tests PUBLIC --embed-file "${MODELS_DIR}@/" --post-js + executorch_wasm_tests + PUBLIC + --embed-file + "${MODELS_DIR}@/" + --post-js ${CMAKE_CURRENT_SOURCE_DIR}/unittests.js + -sASSERTIONS=2 ) set_target_properties( executorch_wasm_tests PROPERTIES OUTPUT_NAME "executorch_wasm.test" diff --git a/extension/wasm/test/unittests.js b/extension/wasm/test/unittests.js index 9abdd145f07..1eeadd193d8 100644 --- a/extension/wasm/test/unittests.js +++ b/extension/wasm/test/unittests.js @@ -79,6 +79,22 @@ describe("Tensor", () => { expect(tensor.scalarType).toEqual(et.ScalarType.Long); tensor.delete(); }); + + test("with dim order and strides", () => { + const tensor = et.Tensor.fromArray([2, 2], [1, 2, 3, 4], et.ScalarType.Float, [0, 1], [2, 1]); + expect(tensor.data).toEqual(new Float32Array([1, 2, 3, 4])); + expect(tensor.sizes).toEqual([2, 2]); + tensor.delete(); + }); + + test("incorrect dim order", () => { + expect(() => et.Tensor.fromArray([2, 2], [1, 2, 3, 4], et.ScalarType.Float, [1])).toThrow(); + expect(() => et.Tensor.fromArray([2, 2], [1, 2, 3, 4], et.ScalarType.Float, [1, 2])).toThrow(); + }); + + test("incorrect strides", () => { + expect(() => et.Tensor.fromArray([2, 2], [1, 2, 3, 4], et.ScalarType.Float, [1, 1], [2, 1])).toThrow(); + }); }); describe("Module", () => { diff --git a/extension/wasm/wasm_bindings.cpp b/extension/wasm/wasm_bindings.cpp index 71b41e5dde4..2bc1a2b43e9 100644 --- a/extension/wasm/wasm_bindings.cpp +++ b/extension/wasm/wasm_bindings.cpp @@ -11,6 +11,8 @@ #include #include #include +#include +#include #define THROW_JS_ERROR(errorType, message, ...) \ ({ \ @@ -83,6 +85,8 @@ inline void assert_valid_numel( data.size()); } +constexpr size_t MAX_ELEMENTS = 8 * 1024 * 1024; + template std::vector convertJSGeneratorToNumberVector(val generator) { std::vector data; @@ -92,10 +96,61 @@ std::vector convertJSGeneratorToNumberVector(val generator) { break; } data.push_back(next["value"].as()); + if (data.size() >= MAX_ELEMENTS) { + THROW_JS_ERROR( + RangeError, + "Generator exceeded maximum element count of %zu", + MAX_ELEMENTS); + } } return data; } +// make_tensor_ptr() assertions will abort the program if they fail. +// These checks will throw a JS error instead. +void assert_dim_order_and_strides_valid( + const std::vector& sizes, + std::vector& dim_order, + std::vector& strides) { + THROW_IF_FALSE( + dim_order.size() == 0 || dim_order.size() == sizes.size(), + "dim_order size must match sizes or be empty."); + THROW_IF_FALSE( + strides.size() == 0 || strides.size() == sizes.size(), + "strides size must match sizes or be empty."); + + if (dim_order.empty()) { + dim_order.resize(sizes.size()); + std::iota(dim_order.begin(), dim_order.end(), 0); + if (!strides.empty()) { + std::sort(dim_order.begin(), dim_order.end(), [&](size_t a, size_t b) { + return strides[a] > strides[b]; + }); + } + } + std::vector computed_strides(sizes.size()); + + auto error = runtime::dim_order_to_stride( + sizes.data(), dim_order.data(), sizes.size(), computed_strides.data()); + THROW_IF_ERROR(error, "Failed to compute strides."); + + if (!strides.empty()) { + for (size_t i = 0; i < sizes.size(); i++) { + THROW_IF_FALSE( + strides[i] == computed_strides[i] || sizes[i] == 1, + "invalid strides for dim %zu: %" ET_PRI_SIZES_AND_STRIDES + "!= %" ET_PRI_SIZES_AND_STRIDES + " while its size is %" ET_PRI_SIZES_AND_STRIDES " != 1", + i, + strides[i], + computed_strides[i], + sizes[i]); + } + } + + strides = std::move(computed_strides); +} + /** * EXPERIMENTAL: JavaScript wrapper for ExecuTorch Tensor. */ @@ -136,13 +191,20 @@ class ET_EXPERIMENTAL JsTensor { return val::array(get_tensor().sizes().begin(), get_tensor().sizes().end()); } + static std::unique_ptr full(val_array sizes, val fill_value) { + // If type is unspecified, infer the type from the fill value. + // Assume it is a Bigint if not Number. + return full( + sizes, + fill_value, + fill_value.isNumber() ? ScalarType::Float : ScalarType::Long); + } + static std::unique_ptr - full(val_array sizes, val fill_value, val type = val::undefined()) { + full(val_array sizes, val fill_value, ScalarType type) { auto sizes_vec = convertJSArrayToNumberVector(sizes); - ScalarType scalar_type = - type.isUndefined() ? ScalarType::Float : type.as(); - switch (scalar_type) { + switch (type) { #define JS_CASE_FULL_VECTOR_TYPE(T, NAME) \ case ScalarType::NAME: { \ TensorPtr tensor = \ @@ -151,57 +213,76 @@ class ET_EXPERIMENTAL JsTensor { } JS_FORALL_SUPPORTED_TENSOR_TYPES(JS_CASE_FULL_VECTOR_TYPE) default: - THROW_JS_ERROR(TypeError, "Unsupported Tensor type: %d", scalar_type); + THROW_JS_ERROR(TypeError, "Unsupported Tensor type: %d", type); } } + static std::unique_ptr zeros(val_array sizes) { + return zeros(sizes, ScalarType::Float); + } + static std::unique_ptr zeros( val_array sizes, - val type = val::undefined()) { + ScalarType type) { auto sizes_vec = convertJSArrayToNumberVector(sizes); - ScalarType scalar_type = - type.isUndefined() ? ScalarType::Float : type.as(); - TensorPtr tensor = extension::zeros(sizes_vec, scalar_type); + TensorPtr tensor = extension::zeros(sizes_vec, type); return std::make_unique(std::move(tensor)); } - static std::unique_ptr ones( - val_array sizes, - val type = val::undefined()) { + static std::unique_ptr ones(val_array sizes) { + return ones(sizes, ScalarType::Float); + } + + static std::unique_ptr ones(val_array sizes, ScalarType type) { auto sizes_vec = convertJSArrayToNumberVector(sizes); - ScalarType scalar_type = - type.isUndefined() ? ScalarType::Float : type.as(); - TensorPtr tensor = extension::ones(sizes_vec, scalar_type); + TensorPtr tensor = extension::ones(sizes_vec, type); return std::make_unique(std::move(tensor)); } + static std::unique_ptr from_array( + val_array sizes, + val_array data) { + // If type is unspecified, infer the type from the data. + // Assume it is a Bigint if not Number. + return from_array( + sizes, + data, + data["length"].as() == 0 || data[0].isNumber() + ? ScalarType::Float + : ScalarType::Long); + } + + static std::unique_ptr + from_array(val_array sizes, val_array data, ScalarType type) { + return from_array(sizes, data, type, val::array()); + } + + static std::unique_ptr from_array( + val_array sizes, + val_array data, + ScalarType type, + val_array dim_order) { + return from_array(sizes, data, type, dim_order, val::array()); + } + static std::unique_ptr from_array( val_array sizes, val_array data, - val type = val::undefined(), - val_array dim_order = val::undefined(), - val_array strides = val::undefined()) { + ScalarType type, + val_array dim_order, + val_array strides) { auto sizes_vec = convertJSArrayToNumberVector(sizes); - auto dim_order_vec = dim_order.isUndefined() - ? std::vector() - : convertJSArrayToNumberVector( - dim_order); - auto strides_vec = strides.isUndefined() - ? std::vector() - : convertJSArrayToNumberVector(strides); + auto dim_order_vec = + convertJSArrayToNumberVector(dim_order); + auto strides_vec = + convertJSArrayToNumberVector(strides); - // If type is undefined, infer the type from the data. - // Assume it is a Bigint if not Number. - ScalarType scalar_type = type.isUndefined() - ? (data["length"].as() == 0 || data[0].isNumber() - ? ScalarType::Float - : ScalarType::Long) - : type.as(); - switch (scalar_type) { + assert_dim_order_and_strides_valid(sizes_vec, dim_order_vec, strides_vec); + switch (type) { #define JS_CASE_FROM_ARRAY_VECTOR_TYPE(T, NAME) \ case ScalarType::NAME: { \ auto data_vec = convertJSArrayToNumberVector(data); \ @@ -216,35 +297,45 @@ class ET_EXPERIMENTAL JsTensor { } JS_FORALL_SUPPORTED_TENSOR_TYPES(JS_CASE_FROM_ARRAY_VECTOR_TYPE) default: - THROW_JS_ERROR(TypeError, "Unsupported Tensor type: %d", scalar_type); + THROW_JS_ERROR(TypeError, "Unsupported Tensor type: %d", type); } } + static std::unique_ptr from_iter( + val_array sizes, + val_array data) { + return from_iter(sizes, data, ScalarType::Float); + } + + static std::unique_ptr + from_iter(val_array sizes, val_array data, ScalarType type) { + return from_iter(sizes, data, type, val::array()); + } + static std::unique_ptr from_iter( val_array sizes, val_array data, - val type = val::undefined(), - val_array dim_order = val::undefined(), - val_array strides = val::undefined()) { + ScalarType type, + val_array dim_order) { + return from_iter(sizes, data, type, dim_order, val::array()); + } + + static std::unique_ptr from_iter( + val_array sizes, + val_array data, + ScalarType type, + val_array dim_order, + val_array strides) { auto sizes_vec = convertJSArrayToNumberVector(sizes); + auto dim_order_vec = + convertJSArrayToNumberVector(dim_order); + auto strides_vec = + convertJSArrayToNumberVector(strides); - auto dim_order_vec = dim_order.isUndefined() - ? std::vector() - : convertJSArrayToNumberVector( - dim_order); - auto strides_vec = strides.isUndefined() - ? std::vector() - : convertJSArrayToNumberVector(strides); + assert_dim_order_and_strides_valid(sizes_vec, dim_order_vec, strides_vec); - // If type is undefined, infer the type from the data. - // Assume it is a Bigint if not Number. - ScalarType scalar_type = type.isUndefined() - ? (data["length"].as() == 0 || data[0].isNumber() - ? ScalarType::Float - : ScalarType::Long) - : type.as(); - switch (scalar_type) { + switch (type) { #define JS_CASE_FROM_ITER_VECTOR_TYPE(T, NAME) \ case ScalarType::NAME: { \ auto data_vec = convertJSGeneratorToNumberVector(data); \ @@ -259,7 +350,7 @@ class ET_EXPERIMENTAL JsTensor { } JS_FORALL_SUPPORTED_TENSOR_TYPES(JS_CASE_FROM_ITER_VECTOR_TYPE) default: - THROW_JS_ERROR(TypeError, "Unsupported Tensor type: %d", scalar_type); + THROW_JS_ERROR(TypeError, "Unsupported Tensor type: %d", type); } } @@ -294,7 +385,7 @@ EValue to_evalue(val v) { } // Converts EValue to JS value. -val to_val(EValue v) { +val to_val(EValue&& v) { if (v.isNone()) { return val::null(); } else if (v.isInt()) { @@ -304,7 +395,7 @@ val to_val(EValue v) { } else if (v.isBool()) { return val(v.toBool()); } else if (v.isTensor()) { - Tensor tensor = v.toTensor(); + Tensor tensor = std::move(v).toTensor(); std::unique_ptr wrapper = std::make_unique(std::move(tensor)); return val(std::move(wrapper)); @@ -518,11 +609,59 @@ EMSCRIPTEN_BINDINGS(WasmBindings) { .function("execute", &JsModule::execute) .function("forward", &JsModule::forward); class_("Tensor") - .class_function("zeros", &JsTensor::zeros) - .class_function("ones", &JsTensor::ones) - .class_function("full", &JsTensor::full) - .class_function("fromArray", &JsTensor::from_array) - .class_function("fromIter", &JsTensor::from_iter) + .class_function( + "zeros", + select_overload(val)>(&JsTensor::zeros)) + .class_function( + "zeros", + select_overload(val, ScalarType)>( + &JsTensor::zeros)) + .class_function( + "ones", + select_overload(val)>(&JsTensor::ones)) + .class_function( + "ones", + select_overload(val, ScalarType)>( + &JsTensor::ones)) + .class_function( + "full", + select_overload(val, val)>(&JsTensor::full)) + .class_function( + "full", + select_overload(val, val, ScalarType)>( + &JsTensor::full)) + .class_function( + "fromArray", + select_overload(val, val)>( + &JsTensor::from_array)) + .class_function( + "fromArray", + select_overload(val, val, ScalarType)>( + &JsTensor::from_array)) + .class_function( + "fromArray", + select_overload(val, val, ScalarType, val)>( + &JsTensor::from_array)) + .class_function( + "fromArray", + select_overload( + val, val, ScalarType, val, val)>(&JsTensor::from_array)) + .class_function( + "fromIter", + select_overload(val, val)>( + &JsTensor::from_iter)) + .class_function( + "fromIter", + select_overload(val, val, ScalarType)>( + &JsTensor::from_iter)) + .class_function( + "fromIter", + select_overload(val, val, ScalarType, val)>( + &JsTensor::from_iter)) + .class_function( + "fromIter", + select_overload( + val, val, ScalarType, val, val)>(&JsTensor::from_iter)) .property("scalarType", &JsTensor::get_scalar_type) .property("data", &JsTensor::get_data) .property("sizes", &JsTensor::get_sizes); From f3ba7491c6add007195e4248ffcf8f2c7b8a1d90 Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Mon, 28 Jul 2025 14:23:46 -0700 Subject: [PATCH 25/26] Added case for error messages longer than 256 characters --- extension/wasm/wasm_bindings.cpp | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/extension/wasm/wasm_bindings.cpp b/extension/wasm/wasm_bindings.cpp index 2bc1a2b43e9..3010f4a170b 100644 --- a/extension/wasm/wasm_bindings.cpp +++ b/extension/wasm/wasm_bindings.cpp @@ -14,12 +14,19 @@ #include #include -#define THROW_JS_ERROR(errorType, message, ...) \ - ({ \ - char msg_buf[256]; \ - snprintf(msg_buf, sizeof(msg_buf), message, ##__VA_ARGS__); \ - EM_ASM(throw new errorType(UTF8ToString($0)), msg_buf); \ - __builtin_unreachable(); \ +#define THROW_JS_ERROR(errorType, message, ...) \ + ({ \ + char msg_buf[256]; \ + int len = snprintf(msg_buf, sizeof(msg_buf), message, ##__VA_ARGS__); \ + if (len < sizeof(msg_buf)) { \ + EM_ASM(throw new errorType(UTF8ToString($0)), msg_buf); \ + } else { \ + std::string msg; \ + msg.resize(len); \ + snprintf(&msg[0], len + 1, message, ##__VA_ARGS__); \ + EM_ASM(throw new errorType(UTF8ToString($0)), msg.c_str()); \ + } \ + __builtin_unreachable(); \ }) /// Throws a JavaScript Error with the provided message if `error` is not `Ok`. From f8062597bc356e1b231fece23acc19b122f41dd9 Mon Sep 17 00:00:00 2001 From: Conan Jeffrey Truong Date: Mon, 28 Jul 2025 18:26:32 -0700 Subject: [PATCH 26/26] Added assertion to verify object is tensor --- extension/wasm/wasm_bindings.cpp | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/extension/wasm/wasm_bindings.cpp b/extension/wasm/wasm_bindings.cpp index 3010f4a170b..6ba41236868 100644 --- a/extension/wasm/wasm_bindings.cpp +++ b/extension/wasm/wasm_bindings.cpp @@ -384,6 +384,11 @@ EValue to_evalue(val v) { return EValue(v.as()); } else if (type_str == "object") { // If it is an object, assume it is a tensor. + THROW_IF_FALSE( + v.instanceof + (val::module_property("Tensor")), + "Received non-tensor object: %s", + val::global("JSON").call("stringify", v).c_str()); return EValue(v.as().get_tensor()); } THROW_JS_ERROR( @@ -696,9 +701,10 @@ EMSCRIPTEN_BINDINGS(WasmBindings) { // Additionally, different enums of the same type are considered to be equal. // Assigning the name field fixes both of these issues. #define JS_ASSIGN_SCALAR_TYPE_NAME(T, NAME) \ - EM_ASM(Module.ScalarType.NAME.name = #NAME); + val::module_property("ScalarType")[#NAME].set("name", #NAME); JS_FORALL_SUPPORTED_TENSOR_TYPES(JS_ASSIGN_SCALAR_TYPE_NAME) -#define JS_ASSIGN_TAG_NAME(NAME) EM_ASM(Module.Tag.NAME.name = #NAME); +#define JS_ASSIGN_TAG_NAME(NAME) \ + val::module_property("Tag")[#NAME].set("name", #NAME); EXECUTORCH_FORALL_TAGS(JS_ASSIGN_TAG_NAME) }