Skip to content

Commit 80a1204

Browse files
committed
Remove unused modelname param from LLMEdgeManager
1 parent 9021b19 commit 80a1204

File tree

3 files changed

+0
-5
lines changed

3 files changed

+0
-5
lines changed

examples/models/llama/export_llama_lib.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -858,7 +858,6 @@ def _load_llama_model(
858858

859859
return LLMEdgeManager(
860860
model=model,
861-
modelname=modelname,
862861
max_seq_len=model.params.max_seq_len,
863862
dtype=dtype,
864863
use_kv_cache=use_kv_cache,

examples/models/llava/export_llava.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,6 @@ def forward(self, input_pos, embeddings):
8383

8484
text_model_em = LLMEdgeManager(
8585
model=llava_text_model,
86-
modelname="llava_text_model",
8786
max_seq_len=llava.text_model_args.max_seq_len,
8887
dtype=DType.fp32,
8988
use_kv_cache=True,
@@ -140,7 +139,6 @@ def forward(self, images):
140139
manager = (
141140
LlavaEdgeManager(
142141
model=llava_image_encode,
143-
modelname="llava_image_encoder",
144142
max_seq_len=llava.text_model_args.max_seq_len, # This may not be right
145143
dtype=DType.fp32,
146144
use_kv_cache=True,

extension/llm/export/builder.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,6 @@ class LLMEdgeManager:
6363
def __init__(
6464
self,
6565
model,
66-
modelname,
6766
max_seq_len,
6867
dtype,
6968
use_kv_cache,
@@ -84,7 +83,6 @@ def __init__(
8483
self.model = model
8584
# graph module returned from export()
8685
self.pre_autograd_graph_module: Optional[torch.fx.GraphModule] = None
87-
self.modelname = modelname
8886
self.max_seq_len = max_seq_len
8987
self.dtype = dtype
9088
self.example_inputs = example_inputs

0 commit comments

Comments
 (0)