@@ -82,6 +82,8 @@ runtime.python_binary(
8282    ],
8383    deps = [
8484        ":export_library",
85+         ":export_llama_args",
86+         ":export_llama_hydra",
8587        "//caffe2:torch",
8688        "//executorch/extension/pybindings:aten_lib",
8789    ],
@@ -148,6 +150,8 @@ runtime.python_library(
148150        ":source_transformation",
149151        "//ai_codesign/gen_ai/fast_hadamard_transform:fast_hadamard_transform",
150152        "//caffe2:torch",
153+         "//executorch/examples/models/llama/config:llm_config",
154+         "//executorch/examples/models/llama/config:llm_config_utils",
151155        "//executorch/backends/vulkan/_passes:vulkan_passes",
152156        "//executorch/exir/passes:init_mutable_pass",
153157        "//executorch/examples/models:model_base",
@@ -231,6 +235,40 @@ runtime.python_library(
231235    ],
232236)
233237
238+ runtime.python_library(
239+     name = "export_llama_args",
240+     srcs = [
241+         "export_llama_args.py",
242+     ],
243+     _is_external_target = True,
244+     base_module = "executorch.examples.models.llama",
245+     visibility = [
246+         "//executorch/examples/...",
247+         "@EXECUTORCH_CLIENTS",
248+     ],
249+     deps = [
250+         ":export_library",
251+     ],
252+ )
253+ 
254+ runtime.python_library(
255+     name = "export_llama_hydra",
256+     srcs = [
257+         "export_llama_hydra.py",
258+     ],
259+     _is_external_target = True,
260+     base_module = "executorch.examples.models.llama",
261+     visibility = [
262+         "//executorch/examples/...",
263+         "@EXECUTORCH_CLIENTS",
264+     ],
265+     deps = [
266+         ":export_library",
267+         "//executorch/examples/models/llama/config:llm_config",
268+         "fbsource//third-party/pypi/hydra-core:hydra-core",
269+     ],
270+ )
271+ 
234272runtime.python_test(
235273    name = "quantized_kv_cache_test",
236274    srcs = [
0 commit comments