Skip to content

Commit fa4596e

Browse files
authored
Merge pull request #453 from Cytnx-dev/dev-master
Dev master to master
2 parents ba1eb2f + 5d37d81 commit fa4596e

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

67 files changed

+2874
-615
lines changed

Install.sh

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,7 @@
33
#=========================================================
44
# [Note] Set the destination path for installation in Ins_dest
55
#----------------------------------------------
6-
# Ins_dest="/usr/local/cytnx"
7-
Ins_dest="~/Cytnx_lib"
6+
Ins_dest="/usr/local/cytnx"
87
FLAG="${FLAG} -DCMAKE_INSTALL_PREFIX=${Ins_dest}"
98
#-----------------------------------------------
109

@@ -19,7 +18,7 @@ FLAG="${FLAG} -DCMAKE_INSTALL_PREFIX=${Ins_dest}"
1918
# Please follow the guide from official mkl/oneMKL "post-installation" part
2019
# to source the proper setvars.sh and/or vars.sh
2120
#---------------------------
22-
FLAG="${FLAG} -DUSE_MKL=ON"
21+
FLAG="${FLAG} -DUSE_MKL=OFF"
2322
#---------------------------
2423
# 2-b) use OpenBLAS (DEFAULT = on (by settung DUSE_MKL=OFF above))
2524
# [Note] By default it will automatically find openblas installed
@@ -155,7 +154,7 @@ FLAG="${FLAG} -DUSE_OMP=OFF"
155154
#=========================================================
156155
# [Note] Wheather to run cytnx tests (DEFAULT = OFF)
157156
#-----------------------------------
158-
DRUN_TESTS=ON
157+
DRUN_TESTS=OFF
159158
FLAG="${FLAG} -DRUN_TESTS=${DRUN_TESTS}"
160159
#-----------------------------------
161160

bm_tests/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ add_executable(
1313
linalg/Directsum_bm.cpp
1414
linalg/Svd_bm.cpp
1515
linalg/Svd_truncate_bm.cpp
16+
linalg/Lanczos_bm.cpp
1617

1718
)
1819
if(USE_CUDA)

bm_tests/linalg/Lanczos_bm.cpp

Lines changed: 138 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,138 @@
1+
#include <benchmark/benchmark.h>
2+
#include <cytnx.hpp>
3+
using namespace cytnx;
4+
5+
namespace BMTest_Lanczos {
6+
UniTensor CreateOneSiteEffHam(const int d, const int D, const unsigned int dypte = Type.Double,
7+
const int device = Device.cpu);
8+
UniTensor CreateA(const int d, const int D, const unsigned int dtype = Type.Double,
9+
const int device = Device.cpu);
10+
class OneSiteOp : public LinOp {
11+
public:
12+
OneSiteOp(const int d, const int D, const unsigned int dtype = Type.Double,
13+
const int& device = Device.cpu)
14+
: LinOp("mv", D * D, dtype, device) {
15+
EffH = CreateOneSiteEffHam(d, D, dtype, device);
16+
}
17+
UniTensor EffH;
18+
19+
/*
20+
* |-|--"vil" "pi" "vir"--|-| |-|--"vil" "pi" "vir"--|-|
21+
* | | + | | "po" | | + | |
22+
* |L|- -------O----------|R| dot | = |L|- -------O----------|R|
23+
* | | + | | "vol"--A--"vor" | | + | |
24+
* |_|--"vol" "po" "vor"--|_| |_|---------A----------|_|
25+
*
26+
* Then relabels ["vil", "pi", "vir"] -> ["vol", "po", "vor"]
27+
*
28+
* "vil":virtual in bond left
29+
* "po":physical out bond
30+
*/
31+
UniTensor matvec(const UniTensor& A) override {
32+
auto tmp = Contract(EffH, A);
33+
tmp.permute_({"vil", "pi", "vir"}, 1);
34+
tmp.relabels_(A.labels());
35+
return tmp;
36+
}
37+
};
38+
39+
// describe:test not supported UniTensor Type
40+
41+
/*
42+
* -1
43+
* |
44+
* 0--A--2
45+
*/
46+
UniTensor CreateA(const int d, const int D, const unsigned int dtype, const int device) {
47+
double low = -1.0, high = 1.0;
48+
UniTensor A = UniTensor({Bond(D), Bond(d), Bond(D)}, {}, -1, dtype, device)
49+
.set_name("A")
50+
.relabels_({"vol", "po", "vor"})
51+
.set_rowrank_(1);
52+
if (Type.is_float(A.dtype())) {
53+
random::uniform_(A, low, high, 0);
54+
}
55+
return A;
56+
}
57+
58+
/*
59+
* |-|--"vil" "pi" "vir"--|-|
60+
* | | + | |
61+
* |L|- -------O----------|R|
62+
* | | + | |
63+
* |_|--"vol" "po" "vor"--|_|
64+
*/
65+
UniTensor CreateOneSiteEffHam(const int d, const int D, const unsigned int dtype,
66+
const int device) {
67+
double low = -1.0, high = 1.0;
68+
std::vector<Bond> bonds = {Bond(D), Bond(d), Bond(D), Bond(D), Bond(d), Bond(D)};
69+
std::vector<std::string> heff_labels = {"vil", "pi", "vir", "vol", "po", "vor"};
70+
UniTensor HEff = UniTensor(bonds, {}, -1, dtype, device)
71+
.set_name("HEff")
72+
.relabels_(heff_labels)
73+
.set_rowrank(bonds.size() / 2);
74+
auto HEff_shape = HEff.shape();
75+
auto in_dim = 1;
76+
for (int i = 0; i < HEff.rowrank(); ++i) {
77+
in_dim *= HEff_shape[i];
78+
}
79+
auto out_dim = in_dim;
80+
if (Type.is_float(HEff.dtype())) {
81+
random::uniform_(HEff, low, high, 0);
82+
}
83+
auto HEff_mat = HEff.get_block();
84+
HEff_mat.reshape_({in_dim, out_dim});
85+
HEff_mat = HEff_mat + HEff_mat.permute({1, 0}); // symmtrize
86+
87+
// Let H can be converge in ExpM
88+
auto eigs = HEff_mat.Eigh();
89+
auto e = UniTensor(eigs[0], true) * 0.01;
90+
e.set_labels({"a", "b"});
91+
auto v = UniTensor(eigs[1]);
92+
v.set_labels({"i", "a"});
93+
auto vt = UniTensor(linalg::InvM(v.get_block()));
94+
vt.set_labels({"b", "j"});
95+
HEff_mat = Contract(Contract(e, v), vt).get_block();
96+
97+
// HEff_mat = linalg::Matmul(HEff_mat, HEff_mat.permute({1, 0}).Conj()); // positive definete
98+
HEff_mat.reshape_(HEff_shape);
99+
HEff.put_block(HEff_mat);
100+
return HEff;
101+
}
102+
103+
static void BM_Lanczos_Gnd_F64(benchmark::State& state) {
104+
// prepare data
105+
int d = 2;
106+
auto D = state.range(0);
107+
auto op = OneSiteOp(d, D);
108+
auto Tin = CreateA(d, D);
109+
const double crit = 1.0e+8;
110+
const int maxiter = 2;
111+
bool is_V = true;
112+
int k = 1;
113+
bool is_row = false;
114+
int max_krydim = 0;
115+
// start test here
116+
for (auto _ : state) {
117+
auto x = linalg::Lanczos(&op, Tin, "Gnd", crit, maxiter, k, is_V, is_row, max_krydim);
118+
}
119+
}
120+
BENCHMARK(BM_Lanczos_Gnd_F64)->Args({10})->Args({30})->Unit(benchmark::kMillisecond);
121+
122+
static void BM_Lanczos_Exp_F64(benchmark::State& state) {
123+
// prepare data
124+
int d = 2;
125+
auto D = state.range(0);
126+
auto op = OneSiteOp(d, D);
127+
auto Tin = CreateA(d, D);
128+
const double crit = 1.0e+8;
129+
double tau = 0.1;
130+
const int maxiter = 2;
131+
// start test here
132+
for (auto _ : state) {
133+
auto x = linalg::Lanczos_Exp(&op, Tin, tau, crit, maxiter);
134+
}
135+
}
136+
BENCHMARK(BM_Lanczos_Exp_F64)->Args({10})->Args({30})->Unit(benchmark::kMillisecond);
137+
138+
} // namespace BMTest_Lanczos

cmake/Modules/FindCUQUANTUM.cmake

Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -27,19 +27,7 @@ else()
2727
endif()
2828

2929
message(STATUS " cudaver: ${CUDAToolkit_VERSION_MAJOR}" )
30-
if((${CUDAToolkit_VERSION_MAJOR} LESS_EQUAL 10))
31-
message(FATAL_ERROR "cuquantum requires CUDA ver.11+")
32-
elseif((${CUDAToolkit_VERSION_MAJOR} GREATER_EQUAL 11) AND (${CUDAToolkit_VERSION_MAJOR} LESS 12) AND (${CUDAToolkit_VERSION_MINOR} LESS_EQUAL 0))
33-
set(CUTNLIB_DIR "lib/11.0")
34-
elseif((${CUDAToolkit_VERSION_MAJOR} GREATER_EQUAL 11) AND (${CUDAToolkit_VERSION_MAJOR} LESS 12) AND (${CUDAToolkit_VERSION_MINOR} GREATER_EQUAL 1))
35-
set(CUTNLIB_DIR "lib/11")
36-
elseif((${CUDAToolkit_VERSION_MAJOR} GREATER_EQUAL 12))
37-
if(EXISTS "${CUQUANTUM_ROOT}/lib/12")
38-
set(CUTNLIB_DIR "lib/12")
39-
else()
40-
set(CUTNLIB_DIR "lib")
41-
endif()
42-
endif()
30+
set(CUTNLIB_DIR "lib")
4331

4432
set(CUQUANTUM_LIBRARY_DIRS ${CUQUANTUM_ROOT}/${CUTNLIB_DIR})
4533
set(CUQUANTUM_INCLUDE_DIRS ${CUQUANTUM_ROOT}/include)

cytnx/UniTensor_conti.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -211,6 +211,16 @@ def relabel_(self, idx:int, new_label:str):
211211

212212

213213

214+
@add_ovld_method(UniTensor)
215+
def relabel_(self, old_labels:List[str],new_labels:List[str]):
216+
self.c_relabel_(old_labels,new_labels);
217+
return self
218+
219+
@add_ovld_method(UniTensor)
220+
def relabel_(self, new_labels:List[str]):
221+
self.c_relabel_(new_labels);
222+
return self
223+
214224
@add_ovld_method(UniTensor)
215225
def relabels_(self, old_labels:List[str],new_labels:List[str]):
216226
self.c_relabels_(old_labels,new_labels);

0 commit comments

Comments
 (0)