@@ -82,6 +82,8 @@ runtime.python_binary(
8282 ],
8383 deps = [
8484 ":export_library",
85+ ":export_llama_args",
86+ ":export_llama_hydra",
8587 "//caffe2:torch",
8688 "//executorch/extension/pybindings:aten_lib",
8789 ],
@@ -148,6 +150,8 @@ runtime.python_library(
148150 ":source_transformation",
149151 "//ai_codesign/gen_ai/fast_hadamard_transform:fast_hadamard_transform",
150152 "//caffe2:torch",
153+ "//executorch/examples/models/llama/config:llm_config",
154+ "//executorch/backends/vulkan/_passes:vulkan_passes",
151155 "//executorch/exir/passes:init_mutable_pass",
152156 "//executorch/examples/models:model_base",
153157 "//executorch/examples/models:models",
@@ -230,6 +234,40 @@ runtime.python_library(
230234 ],
231235)
232236
237+ runtime.python_library(
238+ name = "export_llama_args",
239+ srcs = [
240+ "export_llama_args.py",
241+ ],
242+ _is_external_target = True,
243+ base_module = "executorch.examples.models.llama",
244+ visibility = [
245+ "//executorch/examples/...",
246+ "@EXECUTORCH_CLIENTS",
247+ ],
248+ deps = [
249+ ":export_library",
250+ ],
251+ )
252+
253+ runtime.python_library(
254+ name = "export_llama_hydra",
255+ srcs = [
256+ "export_llama_hydra.py",
257+ ],
258+ _is_external_target = True,
259+ base_module = "executorch.examples.models.llama",
260+ visibility = [
261+ "//executorch/examples/...",
262+ "@EXECUTORCH_CLIENTS",
263+ ],
264+ deps = [
265+ ":export_library",
266+ "//executorch/examples/models/llama/config:llm_config",
267+ "fbsource//third-party/pypi/hydra-core:hydra-core",
268+ ],
269+ )
270+
233271runtime.python_test(
234272 name = "quantized_kv_cache_test",
235273 srcs = [
0 commit comments