Skip to content

Commit 28553f9

Browse files
committed
Rename imag to haar to keep consistency with the paper.
PR: USTC-KnowledgeComputingLab/qmb#64 Signed-off-by: Hao Zhang <[email protected]>
2 parents 1b25849 + 90f7aeb commit 28553f9

File tree

3 files changed

+43
-24
lines changed

3 files changed

+43
-24
lines changed

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ qmb --help
6767

6868
This command provides a collection of subcommands, such as `imag`.
6969
To access detailed help for a specific subcommand, users can append `--help` to the command.
70-
For example, use `qmb imag --help` to view the help information for the `imag` subcommand.
70+
For example, use `qmb haar --help` to view the help information for the `imag` subcommand.
7171

7272
Typically, `qmb` requires a specific descriptor for a particular physical or chemical model to execute.
7373
We have collected a set of such models [here][models-url].
@@ -77,7 +77,7 @@ Alternatively, users can specify a custom path by setting the `$QMB_MODEL_PATH`
7777

7878
After cloning or downloading the dataset, users can calculate the ground state of the $N_2$ system by running the command:
7979
```
80-
qmb imag openfermion mlp -PN2
80+
qmb haar openfermion mlp -PN2
8181
```
8282
This command utilizes the `imag` subcommand with the descriptor in OpenFermion format and the [mlp network][naqs-url],
8383
It specifies the $N_2$ model via the `-PN2` flag since the $N_2$ model is loaded from the file `N2.hdf5` in the folder `models`.

qmb/__main__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
from . import hubbard as _ # type: ignore[no-redef]
1111
from . import ising as _ # type: ignore[no-redef]
1212
from . import vmc as _ # type: ignore[no-redef]
13-
from . import imag as _ # type: ignore[no-redef]
13+
from . import haar as _ # type: ignore[no-redef]
1414
from . import rldiag as _ # type: ignore[no-redef]
1515
from . import precompile as _ # type: ignore[no-redef]
1616
from . import list_loss as _ # type: ignore[no-redef]

qmb/imag.py renamed to qmb/haar.py

Lines changed: 40 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -247,7 +247,7 @@ def _merge_pool_from_neural_network_and_pool_from_last_iteration(
247247

248248

249249
@dataclasses.dataclass
250-
class ImaginaryConfig:
250+
class HaarConfig:
251251
"""
252252
The two-step optimization process for solving quantum many-body problems based on imaginary time.
253253
"""
@@ -354,11 +354,15 @@ def main(self, *, model_param: typing.Any = None, network_param: typing.Any = No
354354
state_dict=data.get("optimizer"),
355355
)
356356

357-
if "imag" not in data:
358-
data["imag"] = {"global": 0, "local": 0, "lanczos": 0, "pool": None}
357+
if "haar" not in data and "imag" in data:
358+
logging.warning("The 'imag' subcommand is deprecated, please use 'haar' instead.")
359+
data["haar"] = data["imag"]
360+
del data["imag"]
361+
if "haar" not in data:
362+
data["haar"] = {"global": 0, "local": 0, "lanczos": 0, "pool": None}
359363
else:
360-
pool_configs, pool_psi = data["imag"]["pool"]
361-
data["imag"]["pool"] = (pool_configs.to(device=self.common.device), pool_psi.to(device=self.common.device))
364+
pool_configs, pool_psi = data["haar"]["pool"]
365+
data["haar"]["pool"] = (pool_configs.to(device=self.common.device), pool_psi.to(device=self.common.device))
362366

363367
writer = torch.utils.tensorboard.SummaryWriter(log_dir=self.common.folder()) # type: ignore[no-untyped-call]
364368

@@ -368,7 +372,7 @@ def main(self, *, model_param: typing.Any = None, network_param: typing.Any = No
368372
logging.info("Sampling configurations from neural network")
369373
configs_from_neural_network, psi_from_neural_network, _, _ = network.generate_unique(self.sampling_count_from_neural_network, self.local_batch_count_generation)
370374
logging.info("Sampling configurations from last iteration")
371-
configs_from_last_iteration, psi_from_last_iteration = _sampling_from_last_iteration(data["imag"]["pool"], self.sampling_count_from_last_iteration)
375+
configs_from_last_iteration, psi_from_last_iteration = _sampling_from_last_iteration(data["haar"]["pool"], self.sampling_count_from_last_iteration)
372376
logging.info("Merging configurations from neural network and last iteration")
373377
configs, original_psi = _merge_pool_from_neural_network_and_pool_from_last_iteration(
374378
configs_from_neural_network,
@@ -392,9 +396,9 @@ def main(self, *, model_param: typing.Any = None, network_param: typing.Any = No
392396
first_extend=self.krylov_extend_first,
393397
).run():
394398
logging.info("The current energy is %.10f where the sampling count is %d", target_energy.item(), len(configs))
395-
writer.add_scalar("imag/lanczos/energy", target_energy, data["imag"]["lanczos"]) # type: ignore[no-untyped-call]
396-
writer.add_scalar("imag/lanczos/error", target_energy - model.ref_energy, data["imag"]["lanczos"]) # type: ignore[no-untyped-call]
397-
data["imag"]["lanczos"] += 1
399+
writer.add_scalar("haar/lanczos/energy", target_energy, data["haar"]["lanczos"]) # type: ignore[no-untyped-call]
400+
writer.add_scalar("haar/lanczos/error", target_energy - model.ref_energy, data["haar"]["lanczos"]) # type: ignore[no-untyped-call]
401+
data["haar"]["lanczos"] += 1
398402
max_index = original_psi.abs().argmax()
399403
target_psi = original_psi / original_psi[max_index]
400404
logging.info("Local optimization target calculated, the target energy is %.10f, the sampling count is %d", target_energy.item(), len(configs))
@@ -449,12 +453,12 @@ def closure() -> torch.Tensor:
449453
logging.info("Starting local optimization process")
450454
success = True
451455
last_loss: float = 0.0
452-
local_step: int = data["imag"]["local"]
456+
local_step: int = data["haar"]["local"]
453457
scale_learning_rate(optimizer, 1 / (1 << try_index))
454458
for i in range(self.local_step):
455459
loss = optimizer.step(closure) # type: ignore[assignment,arg-type]
456460
logging.info("Local optimization in progress, step %d, current loss: %.10f", i, loss.item())
457-
writer.add_scalar(f"imag/loss/{self.loss_name}", loss, local_step) # type: ignore[no-untyped-call]
461+
writer.add_scalar(f"haar/loss/{self.loss_name}", loss, local_step) # type: ignore[no-untyped-call]
458462
local_step += 1
459463
if torch.isnan(loss) or torch.isinf(loss):
460464
logging.warning("Loss is NaN, restoring the previous state and exiting the optimization loop")
@@ -474,7 +478,7 @@ def closure() -> torch.Tensor:
474478
success = False
475479
if success:
476480
logging.info("Local optimization process completed")
477-
data["imag"]["local"] = local_step
481+
data["haar"]["local"] = local_step
478482
break
479483
network.load_state_dict(state_backup)
480484
optimizer.load_state_dict(optimizer_backup)
@@ -493,29 +497,44 @@ def closure() -> torch.Tensor:
493497
model.ref_energy,
494498
final_energy.item() - model.ref_energy,
495499
)
496-
writer.add_scalar("imag/energy/state", final_energy, data["imag"]["global"]) # type: ignore[no-untyped-call]
497-
writer.add_scalar("imag/energy/target", target_energy, data["imag"]["global"]) # type: ignore[no-untyped-call]
498-
writer.add_scalar("imag/error/state", final_energy - model.ref_energy, data["imag"]["global"]) # type: ignore[no-untyped-call]
499-
writer.add_scalar("imag/error/target", target_energy - model.ref_energy, data["imag"]["global"]) # type: ignore[no-untyped-call]
500+
writer.add_scalar("haar/energy/state", final_energy, data["haar"]["global"]) # type: ignore[no-untyped-call]
501+
writer.add_scalar("haar/energy/target", target_energy, data["haar"]["global"]) # type: ignore[no-untyped-call]
502+
writer.add_scalar("haar/error/state", final_energy - model.ref_energy, data["haar"]["global"]) # type: ignore[no-untyped-call]
503+
writer.add_scalar("haar/error/target", target_energy - model.ref_energy, data["haar"]["global"]) # type: ignore[no-untyped-call]
500504
logging.info("Displaying the largest amplitudes")
501505
indices = target_psi.abs().argsort(descending=True)
502506
text = []
503507
for index in indices[:self.logging_psi]:
504508
this_config = model.show_config(configs[index])
505509
logging.info("Configuration: %s, Target amplitude: %s, Final amplitude: %s", this_config, f"{target_psi[index].item():.8f}", f"{psi[index].item():.8f}")
506510
text.append(f"Configuration: {this_config}, Target amplitude: {target_psi[index].item():.8f}, Final amplitude: {psi[index].item():.8f}")
507-
writer.add_text("config", "\n".join(text), data["imag"]["global"]) # type: ignore[no-untyped-call]
511+
writer.add_text("config", "\n".join(text), data["haar"]["global"]) # type: ignore[no-untyped-call]
508512
writer.flush() # type: ignore[no-untyped-call]
509513

510514
logging.info("Saving model checkpoint")
511-
data["imag"]["pool"] = (configs, original_psi)
512-
data["imag"]["global"] += 1
515+
data["haar"]["pool"] = (configs, original_psi)
516+
data["haar"]["global"] += 1
513517
data["network"] = network.state_dict()
514518
data["optimizer"] = optimizer.state_dict()
515-
self.common.save(data, data["imag"]["global"])
519+
self.common.save(data, data["haar"]["global"])
516520
logging.info("Checkpoint successfully saved")
517521

518522
logging.info("Current optimization cycle completed")
519523

520524

521-
subcommand_dict["imag"] = ImaginaryConfig
525+
subcommand_dict["haar"] = HaarConfig
526+
527+
528+
class ImagConfig(HaarConfig):
529+
"""
530+
Deprecated, use "haar" instead.
531+
"""
532+
533+
# pylint: disable=too-few-public-methods
534+
535+
def __post_init__(self) -> None:
536+
logging.warning("The 'imag' subcommand is deprecated, please use 'haar' instead.")
537+
super().__post_init__()
538+
539+
540+
subcommand_dict["imag"] = ImagConfig

0 commit comments

Comments
 (0)