Skip to content

Commit 9d77d2e

Browse files
committed
Move llama_transformer to extension/llm
1 parent 92c9b51 commit 9d77d2e

File tree

27 files changed

+2155
-1882
lines changed

27 files changed

+2155
-1882
lines changed

backends/arm/test/models/test_llama.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,13 +22,13 @@
2222
TosaPipelineBI,
2323
TosaPipelineMI,
2424
)
25-
26-
from executorch.extension.llm.export.config.llm_config import LlmConfig
2725
from executorch.examples.models.llama.export_llama_lib import (
2826
build_args_parser,
2927
get_llama_model,
3028
)
3129

30+
from executorch.extension.llm.export.config.llm_config import LlmConfig
31+
3232
input_t = Tuple[torch.Tensor]
3333

3434
# Add project dir to sys path to workaround importlib.import_module() conditions in model_factory.py

examples/apple/mps/scripts/mps_example.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,6 @@
1919
from executorch.devtools.bundled_program.serialize import (
2020
serialize_from_bundled_program_to_flatbuffer,
2121
)
22-
23-
from executorch.extension.llm.export.config.llm_config import LlmConfig
2422
from executorch.exir import (
2523
EdgeCompileConfig,
2624
EdgeProgramManager,
@@ -31,6 +29,8 @@
3129
from executorch.exir.capture._config import ExecutorchBackendConfig
3230
from executorch.extension.export_util.utils import export_to_edge, save_pte_program
3331

32+
from executorch.extension.llm.export.config.llm_config import LlmConfig
33+
3434
from ....models import MODEL_NAME_TO_MODEL
3535
from ....models.model_factory import EagerModelFactory
3636

examples/models/llama/TARGETS

Lines changed: 5 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,6 @@ runtime.python_library(
1313
name = "llama_transformer",
1414
srcs = [
1515
"llama_transformer.py",
16-
"rope.py",
17-
"attention.py",
18-
"model_args.py",
19-
"norm.py",
2016
],
2117
_is_external_target = True,
2218
base_module = "executorch.examples.models.llama",
@@ -26,23 +22,21 @@ runtime.python_library(
2622
],
2723
deps = [
2824
"//caffe2:torch",
25+
"//executorch/extension/llm/modeling/text_decoder:text_decoder_attention",
26+
"//executorch/extension/llm/modeling/text_decoder:text_decoder_model_args",
27+
"//executorch/extension/llm/modeling/text_decoder:text_decoder_norm",
28+
"//executorch/extension/llm/modeling/text_decoder:text_decoder_rope",
2929
],
3030
)
3131

3232
runtime.python_library(
3333
name = "static_attention",
34-
srcs = [
35-
"static_attention.py",
36-
],
37-
_is_external_target = True,
38-
base_module = "executorch.examples.models.llama",
3934
visibility = [
4035
"//executorch/...",
4136
"@EXECUTORCH_CLIENTS",
4237
],
4338
deps = [
44-
":llama_transformer",
45-
"//caffe2:torch",
39+
"//executorch/extension/llm/modeling/text_decoder:text_decoder_static_attention",
4640
],
4741
)
4842

0 commit comments

Comments
 (0)