Skip to content

Commit f288816

Browse files
cccclaifacebook-github-bot
authored andcommitted
Optionally add qnn backend to llama runner buck file
Summary: Include qnn backend as part of the llama runner dependency, and it's control by a build flag. Default to false. Differential Revision: D64334713
1 parent 7493aae commit f288816

File tree

1 file changed

+9
-3
lines changed

1 file changed

+9
-3
lines changed

examples/models/llama/runner/targets.bzl

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@ def _get_operator_lib(aten = False):
1212
def define_common_targets():
1313
for aten in (True, False):
1414
aten_suffix = "_aten" if aten else ""
15-
1615
runtime.cxx_library(
1716
name = "runner" + aten_suffix,
1817
srcs = [
@@ -27,7 +26,6 @@ def define_common_targets():
2726
visibility = [
2827
"@EXECUTORCH_CLIENTS",
2928
],
30-
# qnn_executorch_backend can be added below //executorch/backends/qualcomm:qnn_executorch_backend
3129
exported_deps = [
3230
"//executorch/backends/xnnpack:xnnpack_backend",
3331
"//executorch/extension/llm/runner:stats",
@@ -46,7 +44,15 @@ def define_common_targets():
4644
# Vulkan API currently cannot build on some platforms (e.g. Apple, FBCODE)
4745
# Therefore enable it explicitly for now to avoid failing tests
4846
"//executorch/backends/vulkan:vulkan_backend_lib",
49-
] if native.read_config("llama", "use_vulkan", "0") == "1" else []),
47+
] if native.read_config("llama", "use_vulkan", "0") == "1" else []) + ([
48+
# //executorch/backends/qualcomm:qnn_executorch_backend doesn't work,
49+
# likely due to it's an empty library with dependency only
50+
"//executorch/backends/qualcomm/runtime:runtime",
51+
] if native.read_config(
52+
"executorch",
53+
"enable_qnn",
54+
"false",
55+
) else []),
5056
external_deps = [
5157
"libtorch",
5258
] if aten else [],

0 commit comments

Comments
 (0)