Skip to content

Commit 86b5d07

Browse files
committed
some cleanup in the Lanczos files, avoids function names starting with _ followed by upper case
1 parent c5dd9c4 commit 86b5d07

File tree

2 files changed

+38
-36
lines changed

2 files changed

+38
-36
lines changed

src/linalg/Lanczos.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -499,7 +499,7 @@ namespace cytnx {
499499
cytnx_error_msg(true, "[ERROR][Lanczos] 'which' should be 'LM', 'LA', 'SA'", "\n");
500500
}
501501

502-
// If the operator is complex hermaitian, just call Arnoldi algorthm since there is no
502+
// If the operator is complex Hermitian, just call Arnoldi algorthm since there is no
503503
// specific routine for complex Hermitian operaton in arpack.
504504
if (Type.is_complex(Hop->dtype())) {
505505
try {

src/linalg/Lanczos_Exp.cpp

Lines changed: 37 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -20,29 +20,30 @@ namespace cytnx {
2020
using namespace std;
2121

2222
// <A|B>
23-
static Scalar _Dot(const UniTensor &A, const UniTensor &B) {
23+
static Scalar Dot_internal(const UniTensor &A, const UniTensor &B) {
2424
return Contract(A.Dagger(), B).item();
2525
}
2626

2727
// project v to u
28-
static UniTensor _Gram_Schimidt_proj(const UniTensor &v, const UniTensor &u) {
29-
auto nu = _Dot(u, v);
30-
auto de = _Dot(u, u);
28+
static UniTensor Gram_Schmidt_proj_internal(const UniTensor &v, const UniTensor &u) {
29+
auto nu = Dot_internal(u, v);
30+
auto de = Dot_internal(u, u);
3131
auto coe = nu / de;
3232
return coe * u;
3333
}
3434

35-
static UniTensor _Gram_Schimidt(const std::vector<UniTensor> &vs) {
35+
static UniTensor Gram_Schimidt_internal(const std::vector<UniTensor> &vs) {
3636
auto u = vs.at(0).clone();
3737
double low = -1.0, high = 1.0;
3838
random::uniform_(u, low, high);
3939
for (auto &v : vs) {
40-
u -= _Gram_Schimidt_proj(u, v);
40+
u -= Gram_Schmidt_proj_internal(u, v);
4141
}
4242
return u;
4343
}
4444

45-
static Tensor _resize_mat(const Tensor &src, const cytnx_uint64 r, const cytnx_uint64 c) {
45+
static Tensor resize_mat_internal(const Tensor &src, const cytnx_uint64 r,
46+
const cytnx_uint64 c) {
4647
const auto min_r = std::min(r, src.shape()[0]);
4748
const auto min_c = std::min(c, src.shape()[1]);
4849
// Tensor dst = src[{ac::range(0,min_r),ac::range(0,min_c)}];
@@ -61,9 +62,9 @@ namespace cytnx {
6162

6263
// BiCGSTAB method to solve the linear equation
6364
// ref: https://en.wikipedia.org/wiki/Biconjugate_gradient_stabilized_method
64-
UniTensor _invert_biCGSTAB(LinOp *Hop, const UniTensor &b, const UniTensor &Tin, const int &k,
65-
const double &CvgCrit = 1.0e-12,
66-
const unsigned int &Maxiter = 10000) {
65+
static UniTensor invert_biCGSTAB_internal(LinOp *Hop, const UniTensor &b, const UniTensor &Tin,
66+
const int &k, const double &CvgCrit = 1.0e-12,
67+
const unsigned int &Maxiter = 10000) {
6768
// the operation (I + Hop/k) on A
6869
auto I_plus_A_Op = [&](UniTensor A) {
6970
return ((Hop->matvec(A)) / k + A).relabels_(b.labels());
@@ -74,7 +75,7 @@ namespace cytnx {
7475
auto r0 = r;
7576
auto x = Tin;
7677
// choose p = (r0_hat, r)
77-
auto p = _Dot(r0, r);
78+
auto p = Dot_internal(r0, r);
7879
// choose pv = r
7980
auto pv = r;
8081

@@ -88,21 +89,21 @@ namespace cytnx {
8889
// as:https://en.wikipedia.org/wiki/Biconjugate_gradient_stabilized_method
8990
for (int i = 1; i < Maxiter; ++i) {
9091
auto v = I_plus_A_Op(pv_old);
91-
auto a = p_old / _Dot(r0, v);
92+
auto a = p_old / Dot_internal(r0, v);
9293
auto h = (x_old + a * pv_old).relabels_(b.labels());
9394
auto s = (r_old - a * v).relabels_(b.labels());
94-
if (abs(_Dot(s, s)) < CvgCrit) {
95+
if (abs(Dot_internal(s, s)) < CvgCrit) {
9596
x = h;
9697
break;
9798
}
9899
auto t = I_plus_A_Op(s);
99-
auto w = _Dot(t, s) / _Dot(t, t);
100+
auto w = Dot_internal(t, s) / Dot_internal(t, t);
100101
x = (h + w * s).relabels_(b.labels());
101102
r = (s - w * t).relabels_(b.labels());
102-
if (abs(_Dot(r, r)) < CvgCrit) {
103+
if (abs(Dot_internal(r, r)) < CvgCrit) {
103104
break;
104105
}
105-
auto p = _Dot(r0, r);
106+
auto p = Dot_internal(r0, r);
106107
auto beta = (p / p_old) * (a / w);
107108
pv = (r + beta * (pv_old - w * v)).relabels_(b.labels());
108109

@@ -116,9 +117,9 @@ namespace cytnx {
116117
}
117118

118119
// ref: https://doi.org/10.48550/arXiv.1111.1491
119-
void _Lanczos_Exp_Ut_positive(UniTensor &out, LinOp *Hop, const UniTensor &Tin,
120-
const double &CvgCrit, const unsigned int &Maxiter,
121-
const bool &verbose) {
120+
static void Lanczos_Exp_Ut_internal_positive(UniTensor &out, LinOp *Hop, const UniTensor &Tin,
121+
const double &CvgCrit, const unsigned int &Maxiter,
122+
const bool &verbose) {
122123
double delta = CvgCrit;
123124
int k = static_cast<int>(std::log(1.0 / delta));
124125
k = k < Maxiter ? k : Maxiter;
@@ -143,21 +144,21 @@ namespace cytnx {
143144
// Call the procedure Invert_A (v[i], k, eps1). The procedure returns a vector w[i], such
144145
// that,
145146
// |(I + A / k )^(−1) v[i] − w[i]| ≤ eps1 |v[i]| .
146-
auto w = _invert_biCGSTAB(Hop, v, v, k, eps1);
147+
auto w = invert_biCGSTAB_internal(Hop, v, v, k, eps1);
147148
// auto resi = ((Hop->matvec(w))/k + w).relabels_(v.labels()) - v;
148149

149150
// For j = 0 to i
150151
for (int j = 0; j <= i; ++j) {
151152
// Let a[j,i] = <v[j], w[i]>
152-
as.at({(cytnx_uint64)j, (cytnx_uint64)i}) = _Dot(Vs.at(j), w);
153+
as.at({(cytnx_uint64)j, (cytnx_uint64)i}) = Dot_internal(Vs.at(j), w);
153154
}
154155
// Define wp[i] = w[i] - \sum_{j=0}^{j=i} {a[j,i]v[j]}
155156
auto wp = w;
156157
for (int j = 0; j <= i; j++) {
157158
wp -= as.at({(cytnx_uint64)j, (cytnx_uint64)i}) * Vs.at(j);
158159
}
159160
// Let a[i+1, i] = |wp[i]|, v[i+1]=wp[i] / a[i+1, i]
160-
auto b = std::sqrt(double(_Dot(wp, wp).real()));
161+
auto b = std::sqrt(double(Dot_internal(wp, wp).real()));
161162
if (i < k) {
162163
as.at({(cytnx_uint64)i + 1, (cytnx_uint64)i}) = b;
163164
v = wp / b;
@@ -171,7 +172,7 @@ namespace cytnx {
171172
// Let V_k be the n × (k + 1) matrix whose columns are v[0],...,v[k] respectively.
172173
UniTensor Vk_ut(Vk);
173174
Vk_ut.set_rowrank_(1);
174-
auto VkDag_ut = Vk_ut.Dagger();
175+
auto VkDag_ut = Vk_ut.Dagger(); // left and right indices are exchanged here!
175176
// Let T_k be the (k + 1) × (k + 1) matrix a[i,j] i,j is {0,...,k} and Tk_hat = 1 / 2
176177
// (Tk^Dagger + Tk).
177178
auto asT = as.permute({1, 0}).Conj().contiguous();
@@ -226,8 +227,9 @@ namespace cytnx {
226227
out.set_rowrank_(v0.rowrank());
227228
}
228229

229-
void _Lanczos_Exp_Ut(UniTensor &out, LinOp *Hop, const UniTensor &T, Scalar tau,
230-
const double &CvgCrit, const unsigned int &Maxiter, const bool &verbose) {
230+
static void Lanczos_Exp_Ut_internal(UniTensor &out, LinOp *Hop, const UniTensor &T, Scalar tau,
231+
const double &CvgCrit, const unsigned int &Maxiter,
232+
const bool &verbose) {
231233
const double beta_tol = 1.0e-6;
232234
std::vector<UniTensor> vs;
233235
cytnx_uint32 vec_len = Hop->nx();
@@ -237,12 +239,12 @@ namespace cytnx {
237239
Tensor B_mat;
238240
// prepare initial tensor and normalize
239241
auto v = T.clone();
240-
auto v_nrm = std::sqrt(double(_Dot(v, v).real()));
242+
auto v_nrm = std::sqrt(double(Dot_internal(v, v).real()));
241243
v = v / v_nrm;
242244

243245
// first iteration
244246
auto wp = (Hop->matvec(v)).relabels_(v.labels());
245-
auto alpha = _Dot(wp, v);
247+
auto alpha = Dot_internal(wp, v);
246248
Hp.at({0, 0}) = alpha;
247249
auto w = (wp - alpha * v).relabels_(v.labels());
248250

@@ -259,7 +261,7 @@ namespace cytnx {
259261
if (verbose) {
260262
std::cout << "Lancos iteration:" << i << std::endl;
261263
}
262-
auto beta = std::sqrt(double(_Dot(w, w).real()));
264+
auto beta = std::sqrt(double(Dot_internal(w, w).real()));
263265
v_old = v.clone();
264266
if (beta > beta_tol) {
265267
v = (w / beta).relabels_(v.labels());
@@ -269,8 +271,8 @@ namespace cytnx {
269271
std::cout << "beta too small, pick another vector." << i << std::endl;
270272
}
271273
// pick a new vector perpendicular to all vector in Vs
272-
v = _Gram_Schimidt(Vs).relabels_(v.labels());
273-
auto v_norm = _Dot(v, v);
274+
v = Gram_Schimidt_internal(Vs).relabels_(v.labels());
275+
auto v_norm = Dot_internal(v, v);
274276
// if the picked vector also too small, break and construct expH
275277
if (abs(v_norm) <= beta_tol) {
276278
if (verbose) {
@@ -285,12 +287,12 @@ namespace cytnx {
285287
Hp.at({(cytnx_uint64)i, (cytnx_uint64)i - 1}) =
286288
Hp.at({(cytnx_uint64)i - 1, (cytnx_uint64)i}) = beta;
287289
wp = (Hop->matvec(v)).relabels_(v.labels());
288-
alpha = _Dot(wp, v);
290+
alpha = Dot_internal(wp, v);
289291
Hp.at({(cytnx_uint64)i, (cytnx_uint64)i}) = alpha;
290292
w = (wp - alpha * v - beta * v_old).relabels_(v.labels());
291293

292294
// Converge check
293-
Hp_sub = _resize_mat(Hp, i + 1, i + 1);
295+
Hp_sub = resize_mat_internal(Hp, i + 1, i + 1);
294296
// We use ExpM since H*tau may not be Hermitian if tau is complex.
295297
B_mat = linalg::ExpM(Hp_sub * tau);
296298
// Set the error as the element of bottom left of the exp(H_sub*tau)
@@ -311,7 +313,7 @@ namespace cytnx {
311313
// Let V_k be the n × (k + 1) matrix whose columns are v[0],...,v[k] respectively.
312314
UniTensor Vk_ut(Vk);
313315
Vk_ut.set_rowrank_(1);
314-
auto VkDag_ut = Vk_ut.Dagger();
316+
auto VkDag_ut = Vk_ut.Dagger(); // left and right indices are exchanged here!
315317
/*
316318
* |||
317319
* |-----|
@@ -395,8 +397,8 @@ namespace cytnx {
395397
}
396398
}
397399

398-
//_Lanczos_Exp_Ut_positive(out, Hop, v0, _cvgcrit, Maxiter, verbose);
399-
_Lanczos_Exp_Ut(out, Hop, v0, tau, _cvgcrit, Maxiter, verbose);
400+
// Lanczos_Exp_Ut_internal_positive(out, Hop, v0, _cvgcrit, Maxiter, verbose);
401+
Lanczos_Exp_Ut_internal(out, Hop, v0, tau, _cvgcrit, Maxiter, verbose);
400402

401403
return out;
402404

0 commit comments

Comments
 (0)