Skip to content

Commit c2cfcca

Browse files
haozha111copybara-github
authored andcommitted
Cast path to string before running the build_model method.
PiperOrigin-RevId: 728419475
1 parent 8c52eb0 commit c2cfcca

File tree

5 files changed

+5
-5
lines changed

5 files changed

+5
-5
lines changed

ai_edge_torch/generative/examples/deepseek/verify.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ def main(_):
4949
)
5050
reauthored_checkpoint = pathlib.Path(cached_config_file).parent
5151
logging.info("Building the reauthored model from: %s", reauthored_checkpoint)
52-
reauthored_model = deepseek.build_model(reauthored_checkpoint)
52+
reauthored_model = deepseek.build_model(str(reauthored_checkpoint))
5353

5454
logging.info("Loading the tokenizer from: %s", checkpoint)
5555
tokenizer = transformers.AutoTokenizer.from_pretrained(checkpoint)

ai_edge_torch/generative/examples/openelm/verify.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ def main(_):
5050
)
5151
reauthored_checkpoint = pathlib.Path(cached_config_file).parent
5252
logging.info("Building the reauthored model from: %s", reauthored_checkpoint)
53-
reauthored_model = openelm.build_model(reauthored_checkpoint)
53+
reauthored_model = openelm.build_model(str(reauthored_checkpoint))
5454

5555
tokenizer_checkpoint = "meta-llama/Llama-2-7b-hf"
5656
logging.info("Loading the tokenizer from: %s", tokenizer_checkpoint)

ai_edge_torch/generative/examples/paligemma/verify.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def main(_):
7070
cached_config_file = transformers.utils.cached_file(
7171
checkpoint, transformers.utils.CONFIG_NAME
7272
)
73-
reauthored_checkpoint = pathlib.Path(cached_config_file).parent
73+
reauthored_checkpoint = str(pathlib.Path(cached_config_file).parent)
7474
else:
7575
checkpoint = kagglehub.model_download(_CHECKPOINT[_VERSION.value])
7676
reauthored_checkpoint = checkpoint

ai_edge_torch/generative/examples/qwen_vl/verify.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ def main(_):
6767
)
6868
reauthored_checkpoint = pathlib.Path(cached_config_file).parent
6969
logging.info("Building the reauthored model from: %s", reauthored_checkpoint)
70-
reauthored_model = qwen_vl.build_model(reauthored_checkpoint)
70+
reauthored_model = qwen_vl.build_model(str(reauthored_checkpoint))
7171

7272
logging.info("Loading the processor from: %s", checkpoint)
7373
processor = transformers.AutoProcessor.from_pretrained(checkpoint)

ai_edge_torch/generative/examples/tiny_llama/verify.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def main(_):
5151
)
5252
reauthored_checkpoint = pathlib.Path(cached_config_file).parent
5353
logging.info("Building the reauthored model from: %s", reauthored_checkpoint)
54-
reauthored_model = tiny_llama.build_model(reauthored_checkpoint)
54+
reauthored_model = tiny_llama.build_model(str(reauthored_checkpoint))
5555

5656
logging.info("Loading the tokenizer from: %s", checkpoint)
5757
tokenizer = transformers.AutoTokenizer.from_pretrained(checkpoint)

0 commit comments

Comments
 (0)