Skip to content

Commit 3234bc9

Browse files
cccclaifacebook-github-bot
authored andcommitted
fix llama buck build (#13169)
Summary: Pull Request resolved: #13169 Some recent changes break the llama buck build Reviewed By: rohansjoshi Differential Revision: D79753385
1 parent c52f6a0 commit 3234bc9

File tree

2 files changed

+21
-0
lines changed

2 files changed

+21
-0
lines changed

examples/qualcomm/oss_scripts/llama/TARGETS

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,30 @@ python_library(
1515
],
1616
)
1717

18+
python_library(
19+
name = "decoder_utils",
20+
srcs = [
21+
"decoder_utils.py",
22+
],
23+
deps = [
24+
"//caffe2:torch",
25+
"//executorch/examples/models/llama:eval_library",
26+
],
27+
)
28+
29+
python_library(
30+
name = "decoder_constants",
31+
srcs = [
32+
"decoder_constants.py",
33+
],
34+
)
35+
1836
python_library(
1937
name = "llama_lib",
2038
srcs = ["llama.py"],
2139
deps = [
40+
":decoder_constants",
41+
":decoder_utils",
2242
"//executorch/examples/models/llama:source_transformation",
2343
"//caffe2:torch",
2444
"//executorch/backends/qualcomm/partition:partition",

examples/qualcomm/oss_scripts/llama/decoder_utils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ def __init__(
5252
use_i64_token: bool,
5353
):
5454
# n seq len = n-1 cache len, so we len(inps) = n-1 during _model_call
55+
# #pyre-ignore
5556
super().__init__(
5657
model=model, tokenizer=tokenizer, max_seq_length=max_seq_length - 1
5758
)

0 commit comments

Comments
 (0)