diff --git a/CMakeLists.txt b/CMakeLists.txt index e4a3322c24d..c5b9ae35ce8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,5 @@ # Copyright (c) Meta Platforms, Inc. and affiliates. +# Copyright 2024 Arm Limited and/or its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the @@ -581,6 +582,10 @@ if(EXECUTORCH_BUILD_EXECUTOR_RUNNER) list(APPEND _executor_runner_libs portable_ops_lib) endif() + if(EXECUTORCH_ENABLE_EVENT_TRACER) + list(APPEND _executor_runner_libs etdump flatccrt) + endif() + # Generate lib to register quantized ops if(EXECUTORCH_BUILD_KERNELS_QUANTIZED) list(APPEND _executor_runner_libs quantized_ops_lib) diff --git a/backends/xnnpack/CMakeLists.txt b/backends/xnnpack/CMakeLists.txt index 1ac7867f3c0..90c23c5aa07 100644 --- a/backends/xnnpack/CMakeLists.txt +++ b/backends/xnnpack/CMakeLists.txt @@ -1,4 +1,5 @@ # Copyright (c) Meta Platforms, Inc. and affiliates. +# Copyright 2024 Arm Limited and/or its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the @@ -113,8 +114,13 @@ if(NOT CMAKE_TOOLCHAIN_FILE MATCHES ".*(iOS|ios\.toolchain)\.cmake$") # list(TRANSFORM _xnn_executor_runner__srcs PREPEND "${EXECUTORCH_ROOT}/") add_executable(xnn_executor_runner ${_xnn_executor_runner__srcs}) + + if(EXECUTORCH_ENABLE_EVENT_TRACER) + list(APPEND xnn_executor_runner_libs etdump) + endif() + target_link_libraries( - xnn_executor_runner xnnpack_backend gflags portable_ops_lib + xnn_executor_runner gflags portable_ops_lib ${xnn_executor_runner_libs} ) target_compile_options(xnn_executor_runner PUBLIC ${_common_compile_options}) endif() diff --git a/docs/source/native-delegates-executorch-xnnpack-delegate.md b/docs/source/native-delegates-executorch-xnnpack-delegate.md index 1d12daef9d8..8d9ec45c620 100644 --- a/docs/source/native-delegates-executorch-xnnpack-delegate.md +++ b/docs/source/native-delegates-executorch-xnnpack-delegate.md @@ -74,7 +74,7 @@ Since weight packing creates an extra copy of the weights inside XNNPACK, We fre When executing the XNNPACK subgraphs, we prepare the tensor inputs and outputs and feed them to the XNNPACK runtime graph. After executing the runtime graph, the output pointers are filled with the computed tensors. #### **Profiling** -We have enabled basic profiling for XNNPACK delegate that can be enabled with the following compiler flag `-DENABLE_XNNPACK_PROFILING`. With ExecuTorch's SDK integration, you can also now use the SDK tools to profile the model. You can follow the steps in [Using the ExecuTorch SDK to Profile a Model](./tutorials/sdk-integration-tutorial) on how to profile ExecuTorch models and use SDK's Inspector API to view XNNPACK's internal profiling information. +We have enabled basic profiling for the XNNPACK delegate that can be enabled with the compiler flag `-DEXECUTORCH_ENABLE_EVENT_TRACER` (add `-DENABLE_XNNPACK_PROFILING` for additional details). With ExecuTorch's SDK integration, you can also now use the SDK tools to profile the model. You can follow the steps in [Using the ExecuTorch SDK to Profile a Model](./tutorials/sdk-integration-tutorial) on how to profile ExecuTorch models and use SDK's Inspector API to view XNNPACK's internal profiling information. An example implementation is available in the `xnn_executor_runner` (see [tutorial here](tutorial-xnnpack-delegate-lowering.md#profiling)). [comment]: <> (TODO: Refactor quantizer to a more official quantization doc) diff --git a/docs/source/tutorial-xnnpack-delegate-lowering.md b/docs/source/tutorial-xnnpack-delegate-lowering.md index 4491a6e8c80..e97de48d3b0 100644 --- a/docs/source/tutorial-xnnpack-delegate-lowering.md +++ b/docs/source/tutorial-xnnpack-delegate-lowering.md @@ -171,3 +171,6 @@ Now you should be able to find the executable built at `./cmake-out/backends/xnn ## Building and Linking with the XNNPACK Backend You can build the XNNPACK backend [CMake target](https://github.com/pytorch/executorch/blob/main/backends/xnnpack/CMakeLists.txt#L83), and link it with your application binary such as an Android or iOS application. For more information on this you may take a look at this [resource](demo-apps-android.md) next. + +## Profiling +To enable profiling in the `xnn_executor_runner` pass the flags `-DEXECUTORCH_ENABLE_EVENT_TRACER=ON` and `-DEXECUTORCH_BUILD_SDK=ON` to the build command (add `-DENABLE_XNNPACK_PROFILING=ON` for additional details). This will enable ETDump generation when running the inference and enables command line flags for profiling (see `xnn_executor_runner --help` for details). diff --git a/examples/portable/executor_runner/executor_runner.cpp b/examples/portable/executor_runner/executor_runner.cpp index a0644487d23..132f85feb1f 100644 --- a/examples/portable/executor_runner/executor_runner.cpp +++ b/examples/portable/executor_runner/executor_runner.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) Meta Platforms, Inc. and affiliates. + * Copyright 2024 Arm Limited and/or its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the @@ -29,6 +30,9 @@ #include #include #include +#ifdef ET_EVENT_TRACER_ENABLED +#include +#endif // ET_EVENT_TRACER_ENABLED static uint8_t method_allocator_pool[4 * 1024U * 1024U]; // 4 MB @@ -36,6 +40,13 @@ DEFINE_string( model_path, "model.pte", "Model serialized in flatbuffer format."); +DEFINE_uint32(num_executions, 1, "Number of times to run the model."); +#ifdef ET_EVENT_TRACER_ENABLED +DEFINE_string( + etdump_path, + "model.etdump", + "If ETDump generation is enabled an ETDump will be written out to this path."); +#endif // ET_EVENT_TRACER_ENABLED using namespace torch::executor; using torch::executor::util::FileDataLoader; @@ -142,8 +153,20 @@ int main(int argc, char** argv) { // the method can mutate the memory-planned buffers, so the method should only // be used by a single thread at at time, but it can be reused. // - - Result method = program->load_method(method_name, &memory_manager); + EventTracer* event_tracer_ptr = nullptr; +#ifdef ET_EVENT_TRACER_ENABLED + std::unique_ptr etdump_file( + fopen(FLAGS_etdump_path.c_str(), "w+"), fclose); + ET_CHECK_MSG( + etdump_file, + "Failed to open ETDump file at %s.", + FLAGS_etdump_path.c_str()); + + torch::executor::ETDumpGen etdump_gen = torch::executor::ETDumpGen(); + event_tracer_ptr = &etdump_gen; +#endif // ET_EVENT_TRACER_ENABLED + Result method = + program->load_method(method_name, &memory_manager, event_tracer_ptr); ET_CHECK_MSG( method.ok(), "Loading of method %s failed with status 0x%" PRIx32, @@ -162,18 +185,20 @@ int main(int argc, char** argv) { ET_LOG(Info, "Inputs prepared."); // Run the model. - Error status = method->execute(); - ET_CHECK_MSG( - status == Error::Ok, - "Execution of method %s failed with status 0x%" PRIx32, - method_name, - (uint32_t)status); - ET_LOG(Info, "Model executed successfully."); + for (uint32_t i = 0; i < FLAGS_num_executions; i++) { + Error status = method->execute(); + ET_CHECK_MSG( + status == Error::Ok, + "Execution of method %s failed with status 0x%" PRIx32, + method_name, + (uint32_t)status); + } + ET_LOG(Info, "Model executed successfully %i time(s).", FLAGS_num_executions); // Print the outputs. std::vector outputs(method->outputs_size()); ET_LOG(Info, "%zu outputs: ", outputs.size()); - status = method->get_outputs(outputs.data(), outputs.size()); + Error status = method->get_outputs(outputs.data(), outputs.size()); ET_CHECK(status == Error::Ok); // Print the first and last 100 elements of long lists of scalars. std::cout << torch::executor::util::evalue_edge_items(100); @@ -181,5 +206,18 @@ int main(int argc, char** argv) { std::cout << "Output " << i << ": " << outputs[i] << std::endl; } +#ifdef ET_EVENT_TRACER_ENABLED + // Dump the ETDump data containing profiling/debugging data to the specified + // file. + etdump_result result = etdump_gen.get_etdump_data(); + if (result.buf != nullptr && result.size > 0) { + fwrite((uint8_t*)result.buf, 1, result.size, etdump_file.get()); + free(result.buf); + ET_LOG(Info, "ETDump written to file '%s'.", FLAGS_etdump_path.c_str()); + } else { + ET_LOG(Error, "No ETDump data available!"); + } +#endif // ET_EVENT_TRACER_ENABLED + return 0; }