@@ -82,8 +82,6 @@ runtime.python_binary(
8282 ],
8383 deps = [
8484 ":export_library",
85- ":export_llama_args",
86- ":export_llama_hydra",
8785 "//caffe2:torch",
8886 "//executorch/extension/pybindings:aten_lib",
8987 ],
@@ -134,6 +132,8 @@ runtime.python_library(
134132 name = "export_library",
135133 srcs = [
136134 "export_llama.py",
135+ "export_llama_args.py",
136+ "export_llama_hydra.py",
137137 "export_llama_lib.py",
138138 "model.py",
139139 ],
@@ -234,40 +234,6 @@ runtime.python_library(
234234 ],
235235)
236236
237- runtime.python_library(
238- name = "export_llama_args",
239- srcs = [
240- "export_llama_args.py",
241- ],
242- _is_external_target = True,
243- base_module = "executorch.examples.models.llama",
244- visibility = [
245- "//executorch/examples/...",
246- "@EXECUTORCH_CLIENTS",
247- ],
248- deps = [
249- ":export_library",
250- ],
251- )
252-
253- runtime.python_library(
254- name = "export_llama_hydra",
255- srcs = [
256- "export_llama_hydra.py",
257- ],
258- _is_external_target = True,
259- base_module = "executorch.examples.models.llama",
260- visibility = [
261- "//executorch/examples/...",
262- "@EXECUTORCH_CLIENTS",
263- ],
264- deps = [
265- ":export_library",
266- "//executorch/examples/models/llama/config:llm_config",
267- "fbsource//third-party/pypi/hydra-core:hydra-core",
268- ],
269- )
270-
271237runtime.python_test(
272238 name = "quantized_kv_cache_test",
273239 srcs = [
0 commit comments