Skip to content

Commit 2028bcd

Browse files
authored
Merge branch 'develop' into becke
2 parents 5aa14e1 + 6d4e946 commit 2028bcd

File tree

366 files changed

+6470
-4899
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

366 files changed

+6470
-4899
lines changed

.github/workflows/coverage.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,10 @@ jobs:
1515
uses: actions/checkout@v4
1616
- name: Install Requirements for Coverage Testing
1717
run: |
18-
apt update && apt install -y lcov
18+
apt update && apt install -y lcov gpg
1919
- name: Building
2020
run: |
21-
cmake -B build -DENABLE_DEEPKS=ON -DENABLE_LIBXC=ON -DBUILD_TESTING=ON -DENABLE_COVERAGE=ON
21+
cmake -B build -DENABLE_COVERAGE=ON -DBUILD_TESTING=ON -DENABLE_DEEPKS=ON -DENABLE_LIBXC=ON -DENABLE_LIBRI=ON -DENABLE_PAW=ON -DENABLE_GOOGLEBENCH=ON -DENABLE_RAPIDJSON=ON
2222
cmake --build build -j`nproc`
2323
cmake --install build
2424
- name: Testing

CMakeLists.txt

Lines changed: 27 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -436,22 +436,6 @@ if(ENABLE_FLOAT_FFTW)
436436
endif()
437437

438438
if(ENABLE_DEEPKS)
439-
# Torch uses outdated components to detect CUDA arch, causing failure on
440-
# latest CUDA kits. Set CMake variable TORCH_CUDA_ARCH_LIST in the form of
441-
# "major.minor" if required.
442-
find_package(Torch REQUIRED)
443-
if(NOT Torch_VERSION VERSION_LESS "2.1.0")
444-
set_if_higher(CMAKE_CXX_STANDARD 17)
445-
elseif(NOT Torch_VERSION VERSION_LESS "1.5.0")
446-
set_if_higher(CMAKE_CXX_STANDARD 14)
447-
endif()
448-
include_directories(${TORCH_INCLUDE_DIRS})
449-
if(MKL_FOUND)
450-
list(PREPEND math_libs ${TORCH_LIBRARIES})
451-
else()
452-
list(APPEND math_libs ${TORCH_LIBRARIES})
453-
endif()
454-
add_compile_options(${TORCH_CXX_FLAGS})
455439
target_link_libraries(${ABACUS_BIN_NAME} deepks)
456440

457441
find_path(libnpy_SOURCE_DIR npy.hpp HINTS ${libnpy_INCLUDE_DIR})
@@ -470,6 +454,25 @@ if(ENABLE_DEEPKS)
470454
add_compile_definitions(__DEEPKS)
471455
endif()
472456

457+
# Torch uses outdated components to detect CUDA arch, causing failure on
458+
# latest CUDA kits. Set CMake variable TORCH_CUDA_ARCH_LIST in the form of
459+
# "major.minor" if required.
460+
if(ENABLE_DEEPKS OR DEFINED Torch_DIR)
461+
find_package(Torch REQUIRED)
462+
if(NOT Torch_VERSION VERSION_LESS "2.1.0")
463+
set_if_higher(CMAKE_CXX_STANDARD 17)
464+
elseif(NOT Torch_VERSION VERSION_LESS "1.5.0")
465+
set_if_higher(CMAKE_CXX_STANDARD 14)
466+
endif()
467+
include_directories(${TORCH_INCLUDE_DIRS})
468+
if(MKL_FOUND)
469+
list(PREPEND math_libs ${TORCH_LIBRARIES})
470+
else()
471+
list(APPEND math_libs ${TORCH_LIBRARIES})
472+
endif()
473+
add_compile_options(${TORCH_CXX_FLAGS})
474+
endif()
475+
473476
if (ENABLE_CNPY)
474477
find_path(cnpy_SOURCE_DIR
475478
cnpy.h
@@ -590,13 +593,14 @@ if(DEFINED DeePMD_DIR)
590593
add_compile_definitions(__DPMDC)
591594
else()
592595
target_link_libraries(${ABACUS_BIN_NAME} DeePMD::deepmd_cc)
593-
if(NOT DEFINED TensorFlow_DIR)
594-
set(TensorFlow_DIR ${DeePMD_DIR})
595-
endif()
596-
find_package(TensorFlow REQUIRED)
597-
if(TensorFlow_FOUND)
598-
target_link_libraries(${ABACUS_BIN_NAME} TensorFlow::tensorflow_cc)
599-
endif()
596+
endif()
597+
endif()
598+
599+
if(DEFINED TensorFlow_DIR)
600+
find_package(TensorFlow REQUIRED)
601+
include_directories(${TensorFlow_DIR}/include)
602+
if(TensorFlow_FOUND)
603+
target_link_libraries(${ABACUS_BIN_NAME} TensorFlow::tensorflow_cc)
600604
endif()
601605
endif()
602606

docs/advanced/acceleration/cuda.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ The ABACUS program will automatically determine whether the current ELPA support
3636
## Run with the GPU support by editing the INPUT script:
3737

3838
In `INPUT` file we need to set the input parameter [device](../input_files/input-main.md#device) to `gpu`. If this parameter is not set, ABACUS will try to determine if there are available GPUs.
39-
- Set `ks_solver`: For the PW basis, CG, BPCG and Davidson methods are supported on GPU; set the input parameter [ks_solver](../input_files/input-main.md#ks_solver) to `cg`, `bpcg` or `dav`. For the LCAO basis, `cusolver` and `elpa` is supported on GPU.
39+
- Set `ks_solver`: For the PW basis, CG, BPCG and Davidson methods are supported on GPU; set the input parameter [ks_solver](../input_files/input-main.md#ks_solver) to `cg`, `bpcg` or `dav`. For the LCAO basis, `cusolver`, `cusolvermp` and `elpa` is supported on GPU.
4040
- **multi-card**: ABACUS allows for multi-GPU acceleration. If you have multiple GPU cards, you can run ABACUS with several MPI processes, and each process will utilize one GPU card. For example, the command `mpirun -n 2 abacus` will by default launch two GPUs for computation. If you only have one card, this command will only start one GPU.
4141

4242
## Examples

docs/advanced/input_files/input-main.md

Lines changed: 29 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,6 @@
3939
- [pw\_diag\_thr](#pw_diag_thr)
4040
- [pw\_diag\_nmax](#pw_diag_nmax)
4141
- [pw\_diag\_ndim](#pw_diag_ndim)
42-
- [diago\_full\_acc](#diago_full_acc)
4342
- [erf\_ecut](#erf_ecut)
4443
- [fft\_mode](#fft_mode)
4544
- [erf\_height](#erf_height)
@@ -161,6 +160,7 @@
161160
- [nbands\_istate](#nbands_istate)
162161
- [bands\_to\_print](#bands_to_print)
163162
- [if\_separate\_k](#if_separate_k)
163+
- [out\_elf](#out_elf)
164164
- [Density of states](#density-of-states)
165165
- [dos\_edelta\_ev](#dos_edelta_ev)
166166
- [dos\_sigma](#dos_sigma)
@@ -778,12 +778,6 @@ These variables are used to control the plane wave related parameters.
778778
- **Description**: Only useful when you use `ks_solver = dav` or `ks_solver = dav_subspace`. It indicates dimension of workspace(number of wavefunction packets, at least 2 needed) for the Davidson method. A larger value may yield a smaller number of iterations in the algorithm but uses more memory and more CPU time in subspace diagonalization.
779779
- **Default**: 4
780780

781-
### diago_full_acc
782-
783-
- **Type**: bool
784-
- **Description**: Only useful when you use `ks_solver = dav_subspace`. If `TRUE`, all the empty states are diagonalized at the same level of accuracy of the occupied ones. Otherwise the empty states are diagonalized using a larger threshold (10-5) (this should not affect total energy, forces, and other ground-state properties).
785-
- **Default**: false
786-
787781
### erf_ecut
788782

789783
- **Type**: Real
@@ -924,14 +918,16 @@ calculations.
924918
- **cg**: cg method.
925919
- **bpcg**: bpcg method, which is a block-parallel Conjugate Gradient (CG) method, typically exhibits higher acceleration in a GPU environment.
926920
- **dav**: the Davidson algorithm.
927-
- **dav_subspace**: subspace Davidson algorithm
921+
- **dav_subspace**: Davidson algorithm without orthogonalization operation, this method is the most recommended for efficiency. `pw_diag_ndim` can be set to 2 for this method.
928922

929923
For atomic orbitals basis,
930924

931925
- **lapack**: This method is only avaliable for serial version. For parallel version please use **scalapack_gvx**.
932926
- **genelpa**: This method should be used if you choose localized orbitals.
933927
- **scalapack_gvx**: Scalapack can also be used for localized orbitals.
934928
- **cusolver**: This method needs building with CUDA and at least one gpu is available.
929+
- **cusolvermp**: This method supports multi-GPU acceleration and needs building with CUDA。 Note that when using cusolvermp, you should set the number of MPI processes to be equal to the number of GPUs.
930+
- **elpa**: The ELPA solver supports both CPU and GPU. By setting the `device` to GPU, you can launch the ELPA solver with GPU acceleration (provided that you have installed a GPU-supported version of ELPA, which requires you to manually compile and install ELPA, and the ABACUS should be compiled with -DUSE_ELPA=ON and -DUSE_CUDA=ON). The ELPA solver also supports multi-GPU acceleration.
935931

936932
If you set ks_solver=`genelpa` for basis_type=`pw`, the program will be stopped with an error message:
937933

@@ -940,7 +936,13 @@ calculations.
940936
```
941937

942938
Then the user has to correct the input file and restart the calculation.
943-
- **Default**: cg (plane-wave basis), or genelpa (localized atomic orbital basis, if compiling option `USE_ELPA` has been set),lapack (localized atomic orbital basis, if compiling option `ENABLE_MPI` has not been set), scalapack_gvx, (localized atomic orbital basis, if compiling option `USE_ELPA` has not been set and if compiling option `ENABLE_MPI` has been set)
939+
- **Default**:
940+
- **PW basis**: cg.
941+
- **LCAO basis**:
942+
- genelpa (if compiling option `USE_ELPA` has been set)
943+
- lapack (if compiling option `ENABLE_MPI` has not been set)
944+
- scalapack_gvx (if compiling option `USE_ELPA` has not been set and compiling option `ENABLE_MPI` has been set)
945+
- cusolver (if compiling option `USE_CUDA` has been set)
944946

945947
### nbands
946948

@@ -1521,7 +1523,7 @@ These variables are used to control the output of properties.
15211523
- **Type**: Integer \[Integer\](optional)
15221524
- **Description**:
15231525
The first integer controls whether to output the charge density on real space grids:
1524-
- 1. Output the charge density (in Bohr^-3) on real space grids into the density files in the folder `OUT.${suffix}`. The files are named as:
1526+
- 1: Output the charge density (in Bohr^-3) on real space grids into the density files in the folder `OUT.${suffix}`. The files are named as:
15251527
- nspin = 1: SPIN1_CHG.cube;
15261528
- nspin = 2: SPIN1_CHG.cube, and SPIN2_CHG.cube;
15271529
- nspin = 4: SPIN1_CHG.cube, SPIN2_CHG.cube, SPIN3_CHG.cube, and SPIN4_CHG.cube.
@@ -1801,6 +1803,23 @@ The band (KS orbital) energy for each (k-point, spin, band) will be printed in t
18011803
- **Description**: Specifies whether to write the partial charge densities for all k-points to individual files or merge them. **Warning**: Enabling symmetry may produce incorrect results due to incorrect k-point weights. Therefore, when calculating partial charge densities, it is strongly recommended to set `symmetry = -1`.
18021804
- **Default**: false
18031805

1806+
### out_elf
1807+
1808+
- **Type**: Integer \[Integer\](optional)
1809+
- **Availability**: Only for Kohn-Sham DFT and Orbital Free DFT.
1810+
- **Description**: Whether to output the electron localization function (ELF) in the folder `OUT.${suffix}`. The files are named as
1811+
- nspin = 1:
1812+
- ELF.cube: ${\rm{ELF}} = \frac{1}{1+\chi^2}$, $\chi = \frac{\frac{1}{2}\sum_{i}{f_i |\nabla\psi_{i}|^2} - \frac{|\nabla\rho|^2}{8\rho}}{\frac{3}{10}(3\pi^2)^{2/3}\rho^{5/3}}$;
1813+
- nspin = 2:
1814+
- ELF_SPIN1.cube, ELF_SPIN2.cube: ${\rm{ELF}}_\sigma = \frac{1}{1+\chi_\sigma^2}$, $\chi_\sigma = \frac{\frac{1}{2}\sum_{i}{f_i |\nabla\psi_{i,\sigma}|^2} - \frac{|\nabla\rho_\sigma|^2}{8\rho_\sigma}}{\frac{3}{10}(6\pi^2)^{2/3}\rho_\sigma^{5/3}}$;
1815+
- ELF.cube: ${\rm{ELF}} = \frac{1}{1+\chi^2}$, $\chi = \frac{\frac{1}{2}\sum_{i,\sigma}{f_i |\nabla\psi_{i,\sigma}|^2} - \sum_{\sigma}{\frac{|\nabla\rho_\sigma|^2}{8\rho_\sigma}}}{\sum_{\sigma}{\frac{3}{10}(6\pi^2)^{2/3}\rho_\sigma^{5/3}}}$;
1816+
1817+
The second integer controls the precision of the kinetic energy density output, if not given, will use `3` as default. For purpose restarting from this file and other high-precision involved calculation, recommend to use `10`.
1818+
1819+
---
1820+
In molecular dynamics calculations, the output frequency is controlled by [out_interval](#out_interval).
1821+
- **Default**: 0 3
1822+
18041823
[back to top](#full-list-of-input-keywords)
18051824

18061825
## Density of states

docs/advanced/install.md

Lines changed: 25 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -38,13 +38,22 @@ cmake -B build -DENABLE_DEEPKS=1 -DTorch_DIR=~/libtorch/share/cmake/Torch/ -Dlib
3838
If the Deep Potential model is employed in Molecule Dynamics calculations, the following prerequisites and steps are needed:
3939

4040
- [DeePMD-kit](https://github.com/deepmodeling/deepmd-kit)
41-
- [TensorFlow](https://www.tensorflow.org/)
41+
- [TensorFlow](https://www.tensorflow.org/) (optional)
42+
- [LibTorch](https://pytorch.org/) (optional)
4243

44+
In the simplest case, the `tensorflow_cc` and `torch` libraries are in the same directory as the `deepmd_c`/`deepmd_cc` libraries, then
4345
```bash
44-
cmake -B build -DDeePMD_DIR=~/deepmd-kit -DTensorFlow_DIR=~/tensorflow
46+
cmake -B build -DDeePMD_DIR=/dir_to_deepmd-kit
4547
```
48+
DeePMD-kit supports TensorFlow backend but its libraries are placed at another directory, then
4649

47-
> `deepmd_c`/`deepmd_cc` and `tensorflow_cc` libraries would be called according to `DeePMD_DIR` and `TensorFlow_DIR`, which is showed in detail in [this page](https://github.com/deepmodeling/deepmd-kit/blob/master/doc/inference/cxx.md). If `TensorFlow_DIR` is not defined, it will be the same as `DeePMD_DIR`. Note that `tensorflow_cc` is not required if `deepmd_c` is found.
50+
```bash
51+
cmake -B build -DDeePMD_DIR=/dir_to_deepmd-kit -DTensorFlow_DIR=/dir_to_tensorflow
52+
```
53+
Similarly, DeePMD-kit supports PyTorch backend but its libraries are placed at another directory, then
54+
```bash
55+
cmake -B build -DDeePMD_DIR=/dir_to_deepmd-kit -DTorch_DIR=/dir_to_pytorch
56+
```
4857

4958
## Build with LibRI and LibComm
5059

@@ -93,9 +102,9 @@ cmake -B build -DUSE_CUDA=1 -DCMAKE_CUDA_COMPILER=${path to cuda toolkit}/bin/nv
93102

94103
## Build math library from source
95104

96-
> Note: This flag is **enabled by default**. It will get better performance than the standard implementation on `gcc` and `clang`. But it **will be disabled** when using `Intel Compiler` since the math functions will get wrong results and the performance is also unexpectly poor.
105+
> Note: We recommend using the latest available compiler sets, since they offer faster implementations of math functions.
97106
98-
To build math functions from source code, instead of using c++ standard implementation, define `USE_ABACUS_LIBM` flag.
107+
This flag is disabled by default. To build math functions from source code, define `USE_ABACUS_LIBM` flag. It is expected to get a better performance on legacy versions of `gcc` and `clang`.
99108

100109
Currently supported math functions:
101110
`sin`, `cos`, `sincos`, `exp`, `cexp`
@@ -282,15 +291,21 @@ directly.
282291

283292
> Note: This part is only required if you want to load a trained DeeP Potential and run molecular dynamics with that. To train the DeeP Potential with DP-GEN, no extra prerequisite is needed and please refer to [this page](http://abacus.deepmodeling.com/en/latest/advanced/interface/dpgen.html) for ABACUS interface with DP-GEN.
284293
285-
To compile ABACUS with DeePMD-kit, you need to define `DeePMD_DIR` and `TensorFlow_DIR` in the file `Makefile.vars` or use
294+
To compile ABACUS with DeePMD-kit, you need to define `DeePMD_DIR` and `TensorFlow_DIR` (TensorFlow Backend, optional) and/or `LIBTORCH_DIR` (PyTorch Backend, optional) in the file `Makefile.vars`.
286295

296+
Or the `tensorflow_cc` and `torch` libraries are in the same directory as the `deepmd_c`/`deepmd_cc` libraries, then
287297
```makefile
288-
make DeePMD_DIR=~/deepmd-kit TensorFlow_DIR=~/tensorflow
298+
make DeePMD_DIR=/dir_to_deepmd-kit
289299
```
300+
DeePMD-kit supports TensorFlow backend but its libraries are placed at another directory, then
290301

291-
directly.
292-
293-
> `deepmd_c`/`deepmd_cc` and `tensorflow_cc` libraries would be called according to `DeePMD_DIR` and `TensorFlow_DIR`, which is showed in detail in [this page](https://github.com/deepmodeling/deepmd-kit/blob/master/doc/inference/cxx.md). If `TensorFlow_DIR` is not defined, it will be the same as `DeePMD_DIR`. Note that `tensorflow_cc` is not required if `deepmd_c` is found.
302+
```makefile
303+
make DeePMD_DIR=/dir_to_deepmd-kit TensorFlow_DIR=/dir_to_tensorflow
304+
```
305+
Similarly, DeePMD-kit supports PyTorch backend but its libraries are placed at another directory, then
306+
```makefile
307+
make DeePMD_DIR=/dir_to_deepmd-kit Torch_DIR=/dir_to_pytorch
308+
```
294309

295310
### Add LibRI Support
296311
To use new EXX, you need two libraries: [LibRI](https://github.com/abacusmodeling/LibRI) and [LibComm](https://github.com/abacusmodeling/LibComm) and need to define `LIBRI_DIR` and `LIBCOMM_DIR` in the file `Makefile.vars` or use

examples/lr-tddft/lcao_Si2/INPUT

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,4 +37,3 @@ out_alllog 1
3737

3838
nvirt 19
3939
abs_wavelen_range 100 175
40-
#diago_full_acc 1

python/pyabacus/src/py_diago_dav_subspace.hpp

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -106,30 +106,28 @@ class PyDiagoDavSubspace
106106
double tol,
107107
int max_iter,
108108
bool need_subspace,
109-
std::vector<bool> is_occupied,
109+
std::vector<double> diag_ethr,
110110
bool scf_type,
111111
hsolver::diag_comm_info comm_info
112112
) {
113113
auto hpsi_func = [mm_op] (
114114
std::complex<double> *psi_in,
115115
std::complex<double> *hpsi_out,
116-
const int nband_in,
117-
const int nbasis_in,
118-
const int band_index1,
119-
const int band_index2
116+
const int ld_psi,
117+
const int nvec
120118
) {
121119
// Note: numpy's py::array_t is row-major, but
122120
// our raw pointer-array is column-major
123-
py::array_t<std::complex<double>, py::array::f_style> psi({nbasis_in, band_index2 - band_index1 + 1});
121+
py::array_t<std::complex<double>, py::array::f_style> psi({ld_psi, nvec});
124122
py::buffer_info psi_buf = psi.request();
125123
std::complex<double>* psi_ptr = static_cast<std::complex<double>*>(psi_buf.ptr);
126-
std::copy(psi_in + band_index1 * nbasis_in, psi_in + (band_index2 + 1) * nbasis_in, psi_ptr);
124+
std::copy(psi_in, psi_in + nvec * ld_psi, psi_ptr);
127125

128126
py::array_t<std::complex<double>, py::array::f_style> hpsi = mm_op(psi);
129127

130128
py::buffer_info hpsi_buf = hpsi.request();
131129
std::complex<double>* hpsi_ptr = static_cast<std::complex<double>*>(hpsi_buf.ptr);
132-
std::copy(hpsi_ptr, hpsi_ptr + (band_index2 - band_index1 + 1) * nbasis_in, hpsi_out);
130+
std::copy(hpsi_ptr, hpsi_ptr + nvec * ld_psi, hpsi_out);
133131
};
134132

135133
obj = std::make_unique<hsolver::Diago_DavSubspace<std::complex<double>, base_device::DEVICE_CPU>>(
@@ -143,7 +141,7 @@ class PyDiagoDavSubspace
143141
comm_info
144142
);
145143

146-
return obj->diag(hpsi_func, psi, nbasis, eigenvalue, is_occupied, scf_type);
144+
return obj->diag(hpsi_func, psi, nbasis, eigenvalue, diag_ethr.data(), scf_type);
147145
}
148146

149147
private:

python/pyabacus/src/py_diago_david.hpp

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -111,30 +111,27 @@ class PyDiagoDavid
111111
auto hpsi_func = [mm_op] (
112112
std::complex<double> *psi_in,
113113
std::complex<double> *hpsi_out,
114-
const int nband_in,
115-
const int nbasis_in,
116-
const int band_index1,
117-
const int band_index2
114+
const int ld_psi,
115+
const int nvec
118116
) {
119117
// Note: numpy's py::array_t is row-major, but
120118
// our raw pointer-array is column-major
121-
py::array_t<std::complex<double>, py::array::f_style> psi({nbasis_in, band_index2 - band_index1 + 1});
119+
py::array_t<std::complex<double>, py::array::f_style> psi({ld_psi, nvec});
122120
py::buffer_info psi_buf = psi.request();
123121
std::complex<double>* psi_ptr = static_cast<std::complex<double>*>(psi_buf.ptr);
124-
std::copy(psi_in + band_index1 * nbasis_in, psi_in + (band_index2 + 1) * nbasis_in, psi_ptr);
122+
std::copy(psi_in, psi_in + nvec * ld_psi, psi_ptr);
125123

126124
py::array_t<std::complex<double>, py::array::f_style> hpsi = mm_op(psi);
127125

128126
py::buffer_info hpsi_buf = hpsi.request();
129127
std::complex<double>* hpsi_ptr = static_cast<std::complex<double>*>(hpsi_buf.ptr);
130-
std::copy(hpsi_ptr, hpsi_ptr + (band_index2 - band_index1 + 1) * nbasis_in, hpsi_out);
128+
std::copy(hpsi_ptr, hpsi_ptr + nvec * ld_psi, hpsi_out);
131129
};
132130

133131
auto spsi_func = [this] (
134132
const std::complex<double> *psi_in,
135133
std::complex<double> *spsi_out,
136134
const int nrow,
137-
const int npw,
138135
const int nbands
139136
) {
140137
syncmem_op()(this->ctx, this->ctx, spsi_out, psi_in, static_cast<size_t>(nbands * nrow));

python/pyabacus/src/py_hsolver.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,8 +59,8 @@ void bind_hsolver(py::module& m)
5959
The maximum number of iterations.
6060
need_subspace : bool
6161
Whether to use the subspace function.
62-
is_occupied : list[bool]
63-
A list of boolean values indicating whether the band is occupied,
62+
diag_ethr : list[float]
63+
A list of float values indicating the thresholds of each band for the diagonalization,
6464
meaning that the corresponding eigenvalue is to be calculated.
6565
scf_type : bool
6666
Whether to use the SCF type, which is used to determine the
@@ -76,7 +76,7 @@ void bind_hsolver(py::module& m)
7676
"tol"_a,
7777
"max_iter"_a,
7878
"need_subspace"_a,
79-
"is_occupied"_a,
79+
"diag_ethr"_a,
8080
"scf_type"_a,
8181
"comm_info"_a)
8282
.def("set_psi", &py_hsolver::PyDiagoDavSubspace::set_psi, R"pbdoc(

0 commit comments

Comments
 (0)