Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions deepmd/loggers/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ def format_training_message(
eta: int | None = None,
) -> str:
"""Format a training message."""
msg = f"batch {batch:7d}: total wall time = {wall_time:.2f} s"
msg = f"Batch {batch:7d}: total wall time = {wall_time:.2f} s"
if isinstance(eta, int):
msg += f", eta = {datetime.timedelta(seconds=int(eta))!s}"
return msg
Expand Down Expand Up @@ -49,7 +49,7 @@ def format_training_message_per_task(
# sort rmse
rmse = dict(sorted(rmse.items()))
msg = (
f"batch {batch:7d}: {task_name}"
f"Batch {batch:7d}: {task_name}"
f"{', '.join([f'{kk} = {vv:8.2e}' for kk, vv in rmse.items()])}"
f"{lr}"
)
Expand Down
4 changes: 2 additions & 2 deletions deepmd/pd/entrypoints/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,8 +219,8 @@ def get_backend_info(self) -> dict:
op_info = {}
return {
"Backend": "Paddle",
"PD ver": f"v{paddle.__version__}-g{paddle.version.commit[:11]}",
"Enable custom OP": False,
"PD Ver": f"v{paddle.__version__}-g{paddle.version.commit[:11]}",
"Custom OP Enabled": False,
**op_info,
}

Expand Down
10 changes: 5 additions & 5 deletions deepmd/pt/entrypoints/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,16 +239,16 @@ def get_backend_info(self) -> dict:
"""Get backend information."""
if ENABLE_CUSTOMIZED_OP:
op_info = {
"build with PT ver": GLOBAL_CONFIG["pt_version"],
"build with PT inc": GLOBAL_CONFIG["pt_include_dir"].replace(";", "\n"),
"build with PT lib": GLOBAL_CONFIG["pt_libs"].replace(";", "\n"),
"Built with PT Ver": GLOBAL_CONFIG["pt_version"],
"Built with PT Inc": GLOBAL_CONFIG["pt_include_dir"].replace(";", "\n"),
"Built with PT Lib": GLOBAL_CONFIG["pt_libs"].replace(";", "\n"),
}
else:
op_info = {}
return {
"Backend": "PyTorch",
"PT ver": f"v{torch.__version__}-g{torch.version.git_version[:11]}",
"Enable custom OP": ENABLE_CUSTOMIZED_OP,
"PT Ver": f"v{torch.__version__}-g{torch.version.git_version[:11]}",
"Custom OP Enabled": ENABLE_CUSTOMIZED_OP,
**op_info,
}

Expand Down
8 changes: 4 additions & 4 deletions deepmd/tf/train/run_options.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,10 +67,10 @@ def get_backend_info(self) -> dict:
"""Get backend information."""
return {
"Backend": "TensorFlow",
"TF ver": tf.version.GIT_VERSION,
"build with TF ver": TF_VERSION,
"build with TF inc": GLOBAL_CONFIG["tf_include_dir"].replace(";", "\n"),
"build with TF lib": GLOBAL_CONFIG["tf_libs"].replace(";", "\n"),
"TF Ver": tf.version.GIT_VERSION,
"Built with TF Ver": TF_VERSION,
"Built with TF Inc": GLOBAL_CONFIG["tf_include_dir"].replace(";", "\n"),
"Built with TF Lib": GLOBAL_CONFIG["tf_libs"].replace(";", "\n"),
}

def get_device_name(self) -> str | None:
Expand Down
4 changes: 2 additions & 2 deletions deepmd/utils/data_system.py
Original file line number Diff line number Diff line change
Expand Up @@ -715,9 +715,9 @@ def print_summary(
# width 65
sys_width = 42
log.info(
f"---Summary of DataSystem: {name:13s}-----------------------------------------------"
f"---Summary of DataSystem: {name.capitalize():13s}-----------------------------------------------"
)
log.info("found %d system(s):", nsystems)
log.info("Found %d System(s):", nsystems)
log.info(
"%s %6s %6s %6s %9s %3s",
_format_name_length("system", sys_width),
Expand Down
28 changes: 14 additions & 14 deletions deepmd/utils/summary.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,13 +47,13 @@ class SummaryPrinter(ABC):
)

BUILD: ClassVar = {
"installed to": "\n".join(deepmd.__path__),
"source": GLOBAL_CONFIG["git_summ"],
"source branch": GLOBAL_CONFIG["git_branch"],
"source commit": GLOBAL_CONFIG["git_hash"],
"source commit at": GLOBAL_CONFIG["git_date"],
"use float prec": global_float_prec,
"build variant": GLOBAL_CONFIG["dp_variant"],
"Installed to": "\n".join(deepmd.__path__),
"Source": GLOBAL_CONFIG["git_summ"],
"Source Branch": GLOBAL_CONFIG["git_branch"],
"Source Commit": GLOBAL_CONFIG["git_hash"],
"Source Commit at": GLOBAL_CONFIG["git_date"],
"Float Precision": global_float_prec.capitalize(),
"Build Variant": GLOBAL_CONFIG["dp_variant"].upper(),
}

def __call__(self) -> None:
Expand All @@ -64,14 +64,14 @@ def __call__(self) -> None:
if len(nodelist) > 1:
build_info.update(
{
"world size": str(len(nodelist)),
"node list": ", ".join(set(nodelist)),
"World Size": str(len(nodelist)),
"Node List": ", ".join(set(nodelist)),
}
)
build_info.update(
{
"running on": nodename,
"computing device": self.get_compute_device(),
"Running on": nodename,
"Computing Device": self.get_compute_device().upper(),
}
)
device_name = self.get_device_name()
Expand All @@ -84,13 +84,13 @@ def __call__(self) -> None:
env_value = os.environ.get("HIP_VISIBLE_DEVICES", "unset")
build_info["HIP_VISIBLE_DEVICES"] = env_value
if self.is_built_with_cuda() or self.is_built_with_rocm():
build_info["Count of visible GPUs"] = str(self.get_ngpus())
build_info["Visible GPU Count"] = str(self.get_ngpus())

intra, inter = get_default_nthreads()
build_info.update(
{
"num_intra_threads": str(intra),
"num_inter_threads": str(inter),
"Num Intra Threads": str(intra),
"Num Inter Threads": str(inter),
}
)
# count the maximum characters in the keys and values
Expand Down