@@ -83,8 +83,6 @@ runtime.python_binary(
8383 ],
8484 deps = [
8585 ":export_library",
86- ":export_llama_args",
87- ":export_llama_hydra",
8886 "//caffe2:torch",
8987 "//executorch/extension/pybindings:aten_lib",
9088 ],
@@ -135,6 +133,8 @@ runtime.python_library(
135133 name = "export_library",
136134 srcs = [
137135 "export_llama.py",
136+ "export_llama_args.py",
137+ "export_llama_hydra.py",
138138 "export_llama_lib.py",
139139 "model.py",
140140 ],
@@ -235,41 +235,6 @@ runtime.python_library(
235235 ],
236236)
237237
238- runtime.python_library(
239- name = "export_llama_args",
240- srcs = [
241- "export_llama_args.py",
242- ],
243- _is_external_target = True,
244- base_module = "executorch.examples.models.llama",
245- visibility = [
246- "//executorch/examples/...",
247- "@EXECUTORCH_CLIENTS",
248- ],
249- deps = [
250- ":export_library",
251- ],
252- )
253-
254- runtime.python_library(
255- name = "export_llama_hydra",
256- srcs = [
257- "export_llama_hydra.py",
258- ],
259- _is_external_target = True,
260- base_module = "executorch.examples.models.llama",
261- visibility = [
262- "//executorch/examples/...",
263- "@EXECUTORCH_CLIENTS",
264- ],
265- deps = [
266- ":export_library",
267- "//executorch/examples/models/llama/config:llm_config",
268- "fbsource//third-party/pypi/hydra-core:hydra-core",
269- "fbsource//third-party/pypi/omegaconf:omegaconf",
270- ],
271- )
272-
273238runtime.python_test(
274239 name = "quantized_kv_cache_test",
275240 srcs = [
0 commit comments