Skip to content
This repository was archived by the owner on Sep 10, 2025. It is now read-only.

Commit 9ec55fb

Browse files
committed
disable calling dist_run.py directly for now
1 parent e8f7c98 commit 9ec55fb

File tree

1 file changed

+28
-29
lines changed

1 file changed

+28
-29
lines changed

torchchat/distributed/dist_run.py

Lines changed: 28 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -287,7 +287,7 @@ def _cleanup():
287287
# "Can you explain what is the purpose of back propagation in neural networks?",
288288
"Who is Santa Claus?",
289289
"Where does Santa live?",
290-
# "Who is Abraham Lincoln?",
290+
"Who is Abraham Lincoln?",
291291
# "How are models trained?",
292292
]
293293

@@ -600,31 +600,30 @@ def get_example_ins_outs(seqlen: int) -> Tuple[torch.Tensor, torch.Tensor]:
600600
f"{color.green}Success{color.white} - {color.blue}Rank {rank} has completed.{color.reset}"
601601
)
602602

603-
604-
if __name__ == "__main__":
605-
parser = argparse.ArgumentParser()
606-
"""parser.add_argument(
607-
"model_name",
608-
type=str,
609-
default="llama3",
610-
help="Name of the model to load",
611-
# choices=NAME_TO_DISTRIBUTION_AND_DTYPE.keys(),
612-
)
613-
"""
614-
parser.add_argument("--pp", type=int, default=1, help="Pipeline parallel degree")
615-
parser.add_argument(
616-
"--ntokens",
617-
type=int,
618-
default=40,
619-
help="Number of tokens to generate",
620-
)
621-
parser.add_argument(
622-
"--chpt-from",
623-
type=str,
624-
default="hf", # TODO: change to torchchat once we support it well
625-
help="Checkpoint format to load from",
626-
choices=["hf", "torchchat"],
627-
)
628-
args = parser.parse_args()
629-
630-
main(args)
603+
# TODO: remove or make it work again
604+
# if __name__ == "__main__":
605+
# parser = argparse.ArgumentParser()
606+
# parser.add_argument(
607+
# "model_name",
608+
# type=str,
609+
# default="llama3",
610+
# help="Name of the model to load",
611+
# choices=NAME_TO_DISTRIBUTION_AND_DTYPE.keys(),
612+
# )
613+
# parser.add_argument("--pp", type=int, default=1, help="Pipeline parallel degree")
614+
# parser.add_argument(
615+
# "--ntokens",
616+
# type=int,
617+
# default=40,
618+
# help="Number of tokens to generate",
619+
# )
620+
# parser.add_argument(
621+
# "--chpt-from",
622+
# type=str,
623+
# default="hf", # TODO: change to torchchat once we support it well
624+
# help="Checkpoint format to load from",
625+
# choices=["hf", "torchchat"],
626+
# )
627+
# args = parser.parse_args()
628+
629+
# main()

0 commit comments

Comments
 (0)