Skip to content
This repository was archived by the owner on Sep 10, 2025. It is now read-only.

Commit 7ac16f9

Browse files
authored
Update generate.py
1 parent db5fd1b commit 7ac16f9

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

torchchat/generate.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1229,7 +1229,7 @@ def callback(x, *, done_generating=False):
12291229
aggregate_metrics["first_token_per_sec"].append(first_token_sec)
12301230
aggregate_metrics["next_tokens_per_sec"].append(next_tokens_sec)
12311231

1232-
logger.info(
1232+
logging.info(
12331233
f"\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\
12341234
\nGenerated {num_tokens_generated} tokens \
12351235
\nTime for inference {i + 1}: {t:.04f} sec total \
@@ -1240,11 +1240,11 @@ def callback(x, *, done_generating=False):
12401240
\n Next token throughput: {next_tokens_sec:.04f} tokens/sec, {1 / next_tokens_sec:.04f} s/token \
12411241
"
12421242
)
1243-
logger.info(
1243+
logging.info(
12441244
f"\nBandwidth achieved: {model_size * tokens_sec / 1e9:.02f} GB/s"
12451245
)
12461246
if i == 0:
1247-
logger.info(
1247+
logging.info(
12481248
f"*** This first iteration will include cold start effects for dynamic import, hardware caches{', JIT compilation' if jit_compile else ''}. ***"
12491249
)
12501250
print("\n========================================\n")

0 commit comments

Comments
 (0)