@@ -247,7 +247,7 @@ def _merge_pool_from_neural_network_and_pool_from_last_iteration(
247
247
248
248
249
249
@dataclasses .dataclass
250
- class ImaginaryConfig :
250
+ class HaarConfig :
251
251
"""
252
252
The two-step optimization process for solving quantum many-body problems based on imaginary time.
253
253
"""
@@ -354,11 +354,15 @@ def main(self, *, model_param: typing.Any = None, network_param: typing.Any = No
354
354
state_dict = data .get ("optimizer" ),
355
355
)
356
356
357
- if "imag" not in data :
358
- data ["imag" ] = {"global" : 0 , "local" : 0 , "lanczos" : 0 , "pool" : None }
357
+ if "haar" not in data and "imag" in data :
358
+ logging .warning ("The 'imag' subcommand is deprecated, please use 'haar' instead." )
359
+ data ["haar" ] = data ["imag" ]
360
+ del data ["imag" ]
361
+ if "haar" not in data :
362
+ data ["haar" ] = {"global" : 0 , "local" : 0 , "lanczos" : 0 , "pool" : None }
359
363
else :
360
- pool_configs , pool_psi = data ["imag " ]["pool" ]
361
- data ["imag " ]["pool" ] = (pool_configs .to (device = self .common .device ), pool_psi .to (device = self .common .device ))
364
+ pool_configs , pool_psi = data ["haar " ]["pool" ]
365
+ data ["haar " ]["pool" ] = (pool_configs .to (device = self .common .device ), pool_psi .to (device = self .common .device ))
362
366
363
367
writer = torch .utils .tensorboard .SummaryWriter (log_dir = self .common .folder ()) # type: ignore[no-untyped-call]
364
368
@@ -368,7 +372,7 @@ def main(self, *, model_param: typing.Any = None, network_param: typing.Any = No
368
372
logging .info ("Sampling configurations from neural network" )
369
373
configs_from_neural_network , psi_from_neural_network , _ , _ = network .generate_unique (self .sampling_count_from_neural_network , self .local_batch_count_generation )
370
374
logging .info ("Sampling configurations from last iteration" )
371
- configs_from_last_iteration , psi_from_last_iteration = _sampling_from_last_iteration (data ["imag " ]["pool" ], self .sampling_count_from_last_iteration )
375
+ configs_from_last_iteration , psi_from_last_iteration = _sampling_from_last_iteration (data ["haar " ]["pool" ], self .sampling_count_from_last_iteration )
372
376
logging .info ("Merging configurations from neural network and last iteration" )
373
377
configs , original_psi = _merge_pool_from_neural_network_and_pool_from_last_iteration (
374
378
configs_from_neural_network ,
@@ -392,9 +396,9 @@ def main(self, *, model_param: typing.Any = None, network_param: typing.Any = No
392
396
first_extend = self .krylov_extend_first ,
393
397
).run ():
394
398
logging .info ("The current energy is %.10f where the sampling count is %d" , target_energy .item (), len (configs ))
395
- writer .add_scalar ("imag /lanczos/energy" , target_energy , data ["imag " ]["lanczos" ]) # type: ignore[no-untyped-call]
396
- writer .add_scalar ("imag /lanczos/error" , target_energy - model .ref_energy , data ["imag " ]["lanczos" ]) # type: ignore[no-untyped-call]
397
- data ["imag " ]["lanczos" ] += 1
399
+ writer .add_scalar ("haar /lanczos/energy" , target_energy , data ["haar " ]["lanczos" ]) # type: ignore[no-untyped-call]
400
+ writer .add_scalar ("haar /lanczos/error" , target_energy - model .ref_energy , data ["haar " ]["lanczos" ]) # type: ignore[no-untyped-call]
401
+ data ["haar " ]["lanczos" ] += 1
398
402
max_index = original_psi .abs ().argmax ()
399
403
target_psi = original_psi / original_psi [max_index ]
400
404
logging .info ("Local optimization target calculated, the target energy is %.10f, the sampling count is %d" , target_energy .item (), len (configs ))
@@ -449,12 +453,12 @@ def closure() -> torch.Tensor:
449
453
logging .info ("Starting local optimization process" )
450
454
success = True
451
455
last_loss : float = 0.0
452
- local_step : int = data ["imag " ]["local" ]
456
+ local_step : int = data ["haar " ]["local" ]
453
457
scale_learning_rate (optimizer , 1 / (1 << try_index ))
454
458
for i in range (self .local_step ):
455
459
loss = optimizer .step (closure ) # type: ignore[assignment,arg-type]
456
460
logging .info ("Local optimization in progress, step %d, current loss: %.10f" , i , loss .item ())
457
- writer .add_scalar (f"imag /loss/{ self .loss_name } " , loss , local_step ) # type: ignore[no-untyped-call]
461
+ writer .add_scalar (f"haar /loss/{ self .loss_name } " , loss , local_step ) # type: ignore[no-untyped-call]
458
462
local_step += 1
459
463
if torch .isnan (loss ) or torch .isinf (loss ):
460
464
logging .warning ("Loss is NaN, restoring the previous state and exiting the optimization loop" )
@@ -474,7 +478,7 @@ def closure() -> torch.Tensor:
474
478
success = False
475
479
if success :
476
480
logging .info ("Local optimization process completed" )
477
- data ["imag " ]["local" ] = local_step
481
+ data ["haar " ]["local" ] = local_step
478
482
break
479
483
network .load_state_dict (state_backup )
480
484
optimizer .load_state_dict (optimizer_backup )
@@ -493,29 +497,44 @@ def closure() -> torch.Tensor:
493
497
model .ref_energy ,
494
498
final_energy .item () - model .ref_energy ,
495
499
)
496
- writer .add_scalar ("imag /energy/state" , final_energy , data ["imag " ]["global" ]) # type: ignore[no-untyped-call]
497
- writer .add_scalar ("imag /energy/target" , target_energy , data ["imag " ]["global" ]) # type: ignore[no-untyped-call]
498
- writer .add_scalar ("imag /error/state" , final_energy - model .ref_energy , data ["imag " ]["global" ]) # type: ignore[no-untyped-call]
499
- writer .add_scalar ("imag /error/target" , target_energy - model .ref_energy , data ["imag " ]["global" ]) # type: ignore[no-untyped-call]
500
+ writer .add_scalar ("haar /energy/state" , final_energy , data ["haar " ]["global" ]) # type: ignore[no-untyped-call]
501
+ writer .add_scalar ("haar /energy/target" , target_energy , data ["haar " ]["global" ]) # type: ignore[no-untyped-call]
502
+ writer .add_scalar ("haar /error/state" , final_energy - model .ref_energy , data ["haar " ]["global" ]) # type: ignore[no-untyped-call]
503
+ writer .add_scalar ("haar /error/target" , target_energy - model .ref_energy , data ["haar " ]["global" ]) # type: ignore[no-untyped-call]
500
504
logging .info ("Displaying the largest amplitudes" )
501
505
indices = target_psi .abs ().argsort (descending = True )
502
506
text = []
503
507
for index in indices [:self .logging_psi ]:
504
508
this_config = model .show_config (configs [index ])
505
509
logging .info ("Configuration: %s, Target amplitude: %s, Final amplitude: %s" , this_config , f"{ target_psi [index ].item ():.8f} " , f"{ psi [index ].item ():.8f} " )
506
510
text .append (f"Configuration: { this_config } , Target amplitude: { target_psi [index ].item ():.8f} , Final amplitude: { psi [index ].item ():.8f} " )
507
- writer .add_text ("config" , "\n " .join (text ), data ["imag " ]["global" ]) # type: ignore[no-untyped-call]
511
+ writer .add_text ("config" , "\n " .join (text ), data ["haar " ]["global" ]) # type: ignore[no-untyped-call]
508
512
writer .flush () # type: ignore[no-untyped-call]
509
513
510
514
logging .info ("Saving model checkpoint" )
511
- data ["imag " ]["pool" ] = (configs , original_psi )
512
- data ["imag " ]["global" ] += 1
515
+ data ["haar " ]["pool" ] = (configs , original_psi )
516
+ data ["haar " ]["global" ] += 1
513
517
data ["network" ] = network .state_dict ()
514
518
data ["optimizer" ] = optimizer .state_dict ()
515
- self .common .save (data , data ["imag " ]["global" ])
519
+ self .common .save (data , data ["haar " ]["global" ])
516
520
logging .info ("Checkpoint successfully saved" )
517
521
518
522
logging .info ("Current optimization cycle completed" )
519
523
520
524
521
- subcommand_dict ["imag" ] = ImaginaryConfig
525
+ subcommand_dict ["haar" ] = HaarConfig
526
+
527
+
528
+ class ImagConfig (HaarConfig ):
529
+ """
530
+ Deprecated, use "haar" instead.
531
+ """
532
+
533
+ # pylint: disable=too-few-public-methods
534
+
535
+ def __post_init__ (self ) -> None :
536
+ logging .warning ("The 'imag' subcommand is deprecated, please use 'haar' instead." )
537
+ super ().__post_init__ ()
538
+
539
+
540
+ subcommand_dict ["imag" ] = ImagConfig
0 commit comments