Skip to content

Commit bfe8b06

Browse files
committed
Remove unnecessary code
1 parent 67c0b89 commit bfe8b06

File tree

1 file changed

+0
-9
lines changed

1 file changed

+0
-9
lines changed

examples/models/llama/model.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -289,15 +289,6 @@ def __init__(self, **kwargs):
289289
def get_eager_model(self) -> torch.nn.Module:
290290
return self.model_
291291

292-
if self.checkpoint_dtype:
293-
# convert to the type of the provided checkpoint
294-
# input and output are torch.long, so signature unchanged
295-
return self.model_.to(self.checkpoint_dtype)
296-
else:
297-
# int8 quantization code has some bf16,
298-
# switch all to FP32
299-
return self.model_.to(torch.float32)
300-
301292
def get_example_inputs(self):
302293
if self.use_kv_cache:
303294
return self.get_example_inputs_kvcache_sdpa()

0 commit comments

Comments
 (0)