diff --git a/example/DMRG/dmrg_two_sites_U1.py b/example/DMRG/dmrg_two_sites_U1.py index 2a4e5fbc1..c801d1116 100644 --- a/example/DMRG/dmrg_two_sites_U1.py +++ b/example/DMRG/dmrg_two_sites_U1.py @@ -41,7 +41,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): return psivec, energy[0].item() - ## Initialiaze MPO + ## Initialize MPO ##>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> d = 2 s = 0.5 @@ -49,6 +49,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): bd_phys = cytnx.Bond(cytnx.BD_KET,[[1],[-1]],[1,1]) M = cytnx.UniTensor([bd_inner,bd_inner.redirect(),bd_phys, bd_phys.redirect()],rowrank=2) + M.set_name("MPO") # I M.set_elem([0,0,0,0],1); @@ -69,7 +70,9 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): VbdL = cytnx.Bond(cytnx.BD_KET,[[0]],[1]) VbdR = cytnx.Bond(cytnx.BD_KET,[[q]],[1]) L0 = cytnx.UniTensor([bd_inner.redirect(),VbdL.redirect(),VbdL],rowrank=1) #Left boundary + L0.set_name("L0") R0 = cytnx.UniTensor([bd_inner,VbdR,VbdR.redirect()],rowrank=1) #Right boundary + R0.set_name("R0") L0.set_elem([0,0,0],1) R0.set_elem([3,0,0],1) @@ -83,6 +86,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): A[0] = cytnx.UniTensor([VbdL,bd_phys.redirect(),cytnx.Bond(cytnx.BD_BRA,[[qcntr]],[1])],rowrank=2) A[0].get_block_()[0] = 1 + A[0].set_name("A0") lbls = [] lbls.append(["0","1","2"]) # store the labels for later convinience. @@ -96,6 +100,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): B3 = cytnx.Bond(cytnx.BD_BRA,[[qcntr]],[1]) A[k] = cytnx.UniTensor([B1,B2,B3],rowrank=2) + A[k].set_name(f"A{k}") lbl = [str(2*k),str(2*k+1),str(2*k+2)] A[k].set_labels(lbl) @@ -114,8 +119,10 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): "A_Conj: -3,-5,2",\ "TOUT: 0;1,2"]) for p in range(Nsites - 1): - anet.PutUniTensors(["L","A","A_Conj","M"],[LR[p],A[p],A[p].Dagger(),M]) + anet.PutUniTensors(["L","A","A_Conj","M"], \ + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) LR[p+1] = anet.Launch() + LR[p+1].set_name(f"LR{p+1}") Ekeep = [] for k in range(1, numsweeps+1): @@ -138,6 +145,9 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): A[p] = cytnx.Contract(A[p],s) # absorb s into next neighbor A[p].relabels_(lbls[p]); # set the label back to be consistent + A[p].set_name(f"A{p}") + A[p+1].set_name(f"A{p+1}") + # update LR from right to left: anet = cytnx.Network() anet.FromString(["R: -2,-1,-3",\ @@ -145,8 +155,10 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): "M: 0,-2,-4,-5",\ "B_Conj: 2,-5,-3",\ "TOUT: 0;1,2"]) - anet.PutUniTensors(["R","B","M","B_Conj"],[LR[p+2],A[p+1],M,A[p+1].Dagger()]) + anet.PutUniTensors(["R","B","M","B_Conj"], \ + [LR[p+2],A[p+1],M,A[p+1].Dagger().permute_(A[p+1].labels())]) LR[p+1] = anet.Launch() + LR[p+1].set_name(f"LR{p+1}") print('Sweep[r->l]: %d/%d, Loc: %d,Energy: %f' % (k, numsweeps, p, Ekeep[-1])) @@ -172,6 +184,9 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): A[p+1] = cytnx.Contract(s,A[p+1]) ## absorb s into next neighbor. A[p+1].relabels_(lbls[p+1]); #set the label back to be consistent + A[p].set_name(f"A{p}") + A[p+1].set_name(f"A{p+1}") + # update LR from left to right: anet = cytnx.Network() anet.FromString(["L: -2,-1,-3",\ @@ -179,14 +194,17 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): "M: -2,0,-4,-5",\ "A_Conj: -3,-5,2",\ "TOUT: 0;1,2"]) - anet.PutUniTensors(["L","A","A_Conj","M"],[LR[p],A[p],A[p].Dagger(),M]) + anet.PutUniTensors(["L","A","A_Conj","M"], \ + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) LR[p+1] = anet.Launch() + LR[p+1].set_name(f"LR{p+1}") print('Sweep[l->r]: %d/%d, Loc: %d,Energy: %f' % (k, numsweeps, p, Ekeep[-1])) A[-1].set_rowrank_(2) _,A[-1] = cytnx.linalg.Gesvd(A[-1],is_U=True,is_vT=False) ## last one. A[-1].relabels_(lbls[-1]); #set the label back to be consistent + A[-1].set_name(f"A{Nsites-1}") return Ekeep diff --git a/example/DMRG/dmrg_two_sites_dense.py b/example/DMRG/dmrg_two_sites_dense.py index 72a6775e9..9dd8e5abb 100644 --- a/example/DMRG/dmrg_two_sites_dense.py +++ b/example/DMRG/dmrg_two_sites_dense.py @@ -53,25 +53,32 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): M[0,1] = M[2,3] = 2**0.5*sp.real() M[0,2] = M[1,3] = 2**0.5*sm.real() M = cytnx.UniTensor(M,0) + M.set_name("MPO") L0 = cytnx.UniTensor(cytnx.zeros([4,1,1]), rowrank = 0) #Left boundary R0 = cytnx.UniTensor(cytnx.zeros([4,1,1]), rowrank = 0) #Right boundary - L0[0,0,0] = 1.; R0[3,0,0] = 1. + L0.set_name("L0") + R0.set_name("R0") + L0[0,0,0] = 1. + R0[3,0,0] = 1. - lbls = [] # List for storing the MPS labels A = [None for i in range(Nsites)] A[0] = cytnx.UniTensor(cytnx.random.normal([1, d, min(chi, d)], 0., 1.), rowrank = 2) A[0].relabels_(["0","1","2"]) - lbls.append(["0","1","2"]) # store the labels for later convinience. + A[0].set_name("A0") + + lbls = [] # List for storing the MPS labels + lbls.append(["0","1","2"]) # store the labels for later convenience. for k in range(1,Nsites): dim1 = A[k-1].shape()[2]; dim2 = d dim3 = min(min(chi, A[k-1].shape()[2] * d), d ** (Nsites - k - 1)) A[k] = cytnx.UniTensor(cytnx.random.normal([dim1, dim2, dim3],0.,1.), rowrank = 2) + A[k].set_name(f"A{k}") lbl = [str(2*k),str(2*k+1),str(2*k+2)] A[k].relabels_(lbl) - lbls.append(lbl) # store the labels for later convinience. + lbls.append(lbl) # store the labels for later convenience. LR = [None for i in range(Nsites+1)] LR[0] = L0 @@ -84,7 +91,10 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): s, A[p] ,vt = cytnx.linalg.Gesvd(A[p]) A[p+1] = cytnx.Contract(cytnx.Contract(s,vt),A[p+1]) - ## Calculate enviroments: + A[p].set_name(f"A{p}") + A[p+1].set_name(f"A{p+1}") + + ## Calculate environments: anet = cytnx.Network() anet.FromString(["L: -2,-1,-3",\ "A: -1,-4,1",\ @@ -92,8 +102,10 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): "A_Conj: -3,-5,2",\ "TOUT: 0,1,2"]) # or you can do: anet = cytnx.Network("L_AMAH.net") - anet.PutUniTensors(["L","A","A_Conj","M"],[LR[p],A[p],A[p].Conj(),M]) + anet.PutUniTensors(["L","A","A_Conj","M"], \ + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) LR[p+1] = anet.Launch() + LR[p+1].set_name(f"LR{p+1}") # Recover the original MPS labels A[p].relabels_(lbls[p]) @@ -101,6 +113,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): _,A[-1] = cytnx.linalg.Gesvd(A[-1],is_U=True,is_vT=False) ## last one. A[-1].relabels_(lbls[-1]) # Recover the original MPS labels + A[-1].set_name(f"A{Nsites-1}") Ekeep = [] for k in range(1, numsweeps+1): @@ -123,6 +136,9 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): A[p] = cytnx.Contract(A[p],s) # absorb s into next neighbor A[p].relabels_(lbls[p]); # set the label back to be consistent + A[p].set_name(f"A{p}") + A[p+1].set_name(f"A{p+1}") + # update LR from right to left: anet = cytnx.Network() anet.FromString(["R: -2,-1,-3",\ @@ -131,14 +147,17 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): "B_Conj: 2,-5,-3",\ "TOUT: 0;1,2"]) # or you can do: anet = cytnx.Network("R_AMAH.net") - anet.PutUniTensors(["R","B","M","B_Conj"],[LR[p+2],A[p+1],M,A[p+1].Conj()]) + anet.PutUniTensors(["R","B","M","B_Conj"], \ + [LR[p+2],A[p+1],M,A[p+1].Dagger().permute_(A[p+1].labels())]) LR[p+1] = anet.Launch() + LR[p+1].set_name(f"LR{p+1}") print('Sweep[r->l]: %d/%d, Loc: %d,Energy: %f' % (k, numsweeps, p, Ekeep[-1])) A[0].set_rowrank_(1) _,A[0] = cytnx.linalg.Gesvd(A[0],is_U=False, is_vT=True) A[0].relabels_(lbls[0]); #set the label back to be consistent + A[0].set_name("A0") for p in range(Nsites-1): dim_l = A[p].shape()[0] @@ -158,6 +177,9 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): A[p+1] = cytnx.Contract(s,A[p+1]) ## absorb s into next neighbor. A[p+1].relabels_(lbls[p+1]); #set the label back to be consistent + A[p].set_name(f"A{p}") + A[p+1].set_name(f"A{p+1}") + # update LR from left to right: anet = cytnx.Network() anet.FromString(["L: -2,-1,-3",\ @@ -167,14 +189,17 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): "TOUT: 0,1,2"]) # or you can do: anet = cytnx.Network("L_AMAH.net") - anet.PutUniTensors(["L","A","A_Conj","M"],[LR[p],A[p],A[p].Conj(),M]) + anet.PutUniTensors(["L","A","A_Conj","M"], \ + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) LR[p+1] = anet.Launch() + LR[p+1].set_name(f"LR{p+1}") print('Sweep[l->r]: %d/%d, Loc: %d,Energy: %f' % (k, numsweeps, p, Ekeep[-1])) A[-1].set_rowrank_(2) _,A[-1] = cytnx.linalg.Gesvd(A[-1],is_U=True,is_vT=False) ## last one. A[-1].relabels_(lbls[-1]); #set the label back to be consistent + A[-1].set_name(f"A{Nsites-1}") return Ekeep if __name__ == '__main__': diff --git a/example/TDVP/tdvp1_dense.py b/example/TDVP/tdvp1_dense.py index 7a55e3968..846ce9ee3 100644 --- a/example/TDVP/tdvp1_dense.py +++ b/example/TDVP/tdvp1_dense.py @@ -80,7 +80,8 @@ def get_energy(A, M): "TOUT: 0,1,2"]) # or you can do: anet = cytnx.Network("L_AMAH.net") for p in range(0, N): - anet.PutUniTensors(["L","A","A_Conj","M"],[L,A[p],A[p].Conj(),M]) + anet.PutUniTensors(["L","A","A_Conj","M"], \ + [L,A[p],A[p].Dagger().permute_(A[p].labels()),M]) L = anet.Launch() E = cytnx.Contract(L, R0).item() print('energy:', E) @@ -138,7 +139,8 @@ def get_energy(A, M): "A_Conj: -3,-5,2",\ "TOUT: 0,1,2"]) # or you can do: anet = cytnx.Network("L_AMAH.net") - anet.PutUniTensors(["L","A","A_Conj","M"],[LR[p],A[p],A[p].Conj(),M]) + anet.PutUniTensors(["L","A","A_Conj","M"], \ + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) LR[p+1] = anet.Launch() # Recover the original MPS labels @@ -175,7 +177,8 @@ def get_energy(A, M): "B_Conj: 2,-5,-3",\ "TOUT: ;0,1,2"]) # or you can do: anet = cytnx.Network("R_AMAH.net") - anet.PutUniTensors(["R","B","M","B_Conj"],[LR[p+1],A[p],M,A[p].Conj()]) + anet.PutUniTensors(["R","B","M","B_Conj"], \ + [LR[p+1],A[p],M,A[p].Dagger().permute_(A[p].labels())]) old_LR = LR[p].clone() if p != 0: LR[p] = anet.Launch() @@ -215,7 +218,8 @@ def get_energy(A, M): "A_Conj: -3,-5,2",\ "TOUT: 0,1,2"]) - anet.PutUniTensors(["L","A","A_Conj","M"],[LR[p],A[p],A[p].Conj(),M]) + anet.PutUniTensors(["L","A","A_Conj","M"], \ + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) old_LR = LR[p+1].clone() @@ -246,7 +250,8 @@ def Local_meas(A, B, Op, site): "TOUT: 2;4"]) for i in range(0, N): if i != site: - anet.PutUniTensors(["l","A","B"],[l,A[i],B[i].Conj()]) + anet.PutUniTensors(["l","A","B"], \ + [l,A[i],B[i].Dagger().permute_(B[i].labels())]) l = anet.Launch() else: tmp = A[i].relabel(1, "_aux_up") @@ -254,7 +259,8 @@ def Local_meas(A, B, Op, site): tmp = cytnx.Contract(tmp, Op) tmp.relabel_("_aux_low", A[i].labels()[1]) tmp.permute_(A[i].labels()) - anet.PutUniTensors(["l","A","B"],[l,tmp,B[i].Conj()]) + anet.PutUniTensors(["l","A","B"], \ + [l,tmp,B[i].Dagger().permute_(B[i].labels())]) l = anet.Launch() return l.reshape(1).item() diff --git a/include/UniTensor.hpp b/include/UniTensor.hpp index f67b97cd2..5c178700e 100644 --- a/include/UniTensor.hpp +++ b/include/UniTensor.hpp @@ -4974,16 +4974,20 @@ namespace cytnx { /** @brief Take the transpose of the UniTensor. - @details This function will take the transpose of the UniTensor. If the UniTensor is - tagged (i.e. the Bonds are directional), it will swap the direction of the Bonds but - the rowrank will not change. If the UniTensor is untagged (i.e. the Bonds are - BondType::BD_REG), it will change the rowrank to the opposite side. - For fermionic UniTensors, the index order will be reversed without sign flips, and the - direction of all Bonds will swapped. - @return UniTensor + @details This function takes the transpose of a UniTensor: + 1) Swaps the roles of left and right indices: index numbers k < rowrank become k + rowrank, + indices k' >= rowrank become k' - rowrank. For fermions, the order of the indices is + inverted instead. + 2) Incoming legs become outgoing onces, and vice versa + 3) The rowrank is set to rank - old rowrank, such that left indices become right indices and + vice versa. + @return UniTensor @note Compared to Transpose_(), this function will return new UniTensor object. - @see Transpose_() - */ + @warning For fermionic UniTensors, the order of the indices is inverted, while for bosonic + UniTensors the role of the left-and right indices is exchanged without inverting the orders in + these two groups. + @see Transpose_() + */ UniTensor Transpose() const { UniTensor out; out._impl = this->_impl->Transpose(); diff --git a/src/BlockFermionicUniTensor.cpp b/src/BlockFermionicUniTensor.cpp index 095c1bea0..2b84b1cd4 100644 --- a/src/BlockFermionicUniTensor.cpp +++ b/src/BlockFermionicUniTensor.cpp @@ -1895,17 +1895,15 @@ namespace cytnx { }; void BlockFermionicUniTensor::Transpose_() { - //[21 Aug 2024] This is a copy from BlockUniTensor; - // modify tag // The index order is reversed without any sign flips! std::vector idxorder(this->_bonds.size()); - std::size_t idxnum = this->bonds().size() - 1; - for (int i = 0; i <= idxnum; i++) { + cytnx_int64 idxnum = this->bonds().size() - 1; + for (cytnx_int64 i = 0; i <= idxnum; i++) { this->bonds()[i].redirect_(); - // this->bonds()[i].qnums() = this->bonds()[i].calc_reverse_qnums(); idxorder[i] = idxnum - i; } this->permute_nosignflip_(idxorder); + this->_rowrank = idxnum + 1 - this->_rowrank; }; void BlockFermionicUniTensor::normalize_() { diff --git a/src/BlockUniTensor.cpp b/src/BlockUniTensor.cpp index 9c1bdf75e..4257f7b9f 100644 --- a/src/BlockUniTensor.cpp +++ b/src/BlockUniTensor.cpp @@ -1214,11 +1214,19 @@ namespace cytnx { }; void BlockUniTensor::Transpose_() { - // modify tag - for (int i = 0; i < this->bonds().size(); i++) { + const cytnx_int64 rank = this->bonds().size(); + std::vector idxorder(rank); + const cytnx_int64 oldrowrank = this->_rowrank; + this->_rowrank = rank - oldrowrank; + for (cytnx_int64 i = 0; i < this->_rowrank; i++) { this->bonds()[i].redirect_(); - // this->bonds()[i].qnums() = this->bonds()[i].calc_reverse_qnums(); + idxorder[i] = i + oldrowrank; } + for (cytnx_int64 i = this->_rowrank; i < rank; i++) { + this->bonds()[i].redirect_(); + idxorder[i] = i - this->_rowrank; + } + this->permute_(idxorder); }; void BlockUniTensor::normalize_() { diff --git a/src/DenseUniTensor.cpp b/src/DenseUniTensor.cpp index 28757f8ad..a1cc60ff9 100644 --- a/src/DenseUniTensor.cpp +++ b/src/DenseUniTensor.cpp @@ -1190,12 +1190,19 @@ namespace cytnx { void DenseUniTensor::Transpose_() { if (this->is_tag()) { - // this->_rowrank = this->rank() - this->_rowrank; - for (int i = 0; i < this->rank(); i++) { - this->_bonds[i].set_type((this->_bonds[i].type() == BD_KET) ? BD_BRA : BD_KET); + const cytnx_int64 rank = this->bonds().size(); + std::vector idxorder(rank); + const cytnx_int64 oldrowrank = this->_rowrank; + this->_rowrank = rank - oldrowrank; + for (cytnx_int64 i = 0; i < this->_rowrank; i++) { + this->bonds()[i].redirect_(); + idxorder[i] = i + oldrowrank; } - this->_is_braket_form = this->_update_braket(); - + for (cytnx_int64 i = this->_rowrank; i < rank; i++) { + this->bonds()[i].redirect_(); + idxorder[i] = i - this->_rowrank; + } + this->permute_(idxorder); } else { std::vector new_permute = vec_concatenate(vec_range(this->rowrank(), this->rank()), @@ -1203,7 +1210,8 @@ namespace cytnx { this->permute_(new_permute); this->_rowrank = this->rank() - this->_rowrank; } - } + }; + void DenseUniTensor::normalize_() { this->_block /= linalg::Norm(this->_block); } void DenseUniTensor::_save_dispatch(std::fstream &f) const { this->_block._Save(f); } diff --git a/tests/BlockUniTensor_test.cpp b/tests/BlockUniTensor_test.cpp index 4e406f92d..5b5f01b37 100644 --- a/tests/BlockUniTensor_test.cpp +++ b/tests/BlockUniTensor_test.cpp @@ -104,9 +104,9 @@ TEST_F(BlockUniTensorTest, is_blockform) { } TEST_F(BlockUniTensorTest, clone) { UniTensor cloned = UT_pB_ans.clone(); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(cloned.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (cloned.at({i, j, k}).exists()) EXPECT_EQ(cloned.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -261,9 +261,9 @@ TEST_F(BlockUniTensorTest, permute1) { // rank-3 tensor std::vector a = {1, 2, 0}; auto permuted = UT_permute_1.permute(a, -1); - for (size_t i = 0; i < 10; i++) - for (size_t j = 0; j < 6; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 i = 0; i < 10; i++) + for (cytnx_int64 j = 0; j < 6; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({i, j, k}).exists(), UT_permute_ans1.at({i, j, k}).exists()); if (permuted.at({i, j, k}).exists()) EXPECT_EQ(double(permuted.at({i, j, k}).real()), @@ -275,8 +275,8 @@ TEST_F(BlockUniTensorTest, permute2) { std::vector a = {1, 0}; auto permuted = UT_permute_2.permute(a, -1); - for (size_t j = 0; j < 10; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 j = 0; j < 10; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({j, k}).exists(), UT_permute_ans2.at({j, k}).exists()); if (permuted.at({j, k}).exists()) EXPECT_EQ(double(permuted.at({j, k}).real()), double(UT_permute_ans2.at({j, k}).real())); @@ -288,9 +288,9 @@ TEST_F(BlockUniTensorTest, permute_1) { std::vector a = {1, 2, 0}; auto permuted = UT_permute_1.clone(); permuted.permute_(a, -1); - for (size_t i = 0; i < 10; i++) - for (size_t j = 0; j < 6; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 i = 0; i < 10; i++) + for (cytnx_int64 j = 0; j < 6; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({i, j, k}).exists(), UT_permute_ans1.at({i, j, k}).exists()); if (permuted.at({i, j, k}).exists()) EXPECT_EQ(double(permuted.at({i, j, k}).real()), @@ -302,8 +302,8 @@ TEST_F(BlockUniTensorTest, permute_2) { std::vector a = {1, 0}; auto permuted = UT_permute_2.clone(); permuted.permute_(a, -1); - for (size_t j = 0; j < 10; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 j = 0; j < 10; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({j, k}).exists(), UT_permute_ans2.at({j, k}).exists()); if (permuted.at({j, k}).exists()) EXPECT_EQ(double(permuted.at({j, k}).real()), double(UT_permute_ans2.at({j, k}).real())); @@ -424,9 +424,9 @@ TEST_F(BlockUniTensorTest, put_block_byidx) { UT_pB.put_block(t1a, 1); UT_pB.put_block(t1b, 2); UT_pB.put_block(t2, 3); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -449,9 +449,9 @@ TEST_F(BlockUniTensorTest, put_block__byidx) { UT_pB.put_block_(t1a, 1); UT_pB.put_block_(t1b, 2); UT_pB.put_block_(t2, 3); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -474,9 +474,9 @@ TEST_F(BlockUniTensorTest, put_block_byqnum) { UT_pB.put_block(t1a, {0, 1, 1}, true); UT_pB.put_block(t1b, {1, 0, 1}, true); UT_pB.put_block(t2, {1, 1, 2}, true); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -499,9 +499,9 @@ TEST_F(BlockUniTensorTest, put_block__byqnum) { UT_pB.put_block_(t1a, {0, 1, 1}, true); UT_pB.put_block_(t1b, {1, 0, 1}, true); UT_pB.put_block_(t2, {1, 1, 2}, true); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -532,8 +532,8 @@ TEST_F(BlockUniTensorTest, contract1) { auto outbks = out.get_blocks(); auto ansbks = UT_contract_ans1.get_blocks(); for (int i = 0; i < ansbks.size(); i++) { - std::cout << outbks[i] << std::endl; - std::cout << ansbks[i] << std::endl; + // std::cout << outbks[i] << std::endl; + // std::cout << ansbks[i] << std::endl; EXPECT_EQ(AreNearlyEqTensor(outbks[i], ansbks[i], 1e-5), true); } } @@ -547,8 +547,8 @@ TEST_F(BlockUniTensorTest, contract2) { auto outbks = out.get_blocks(); auto ansbks = UT_contract_ans2.get_blocks(); for (int i = 0; i < ansbks.size(); i++) { - std::cout << outbks[i] << std::endl; - std::cout << ansbks[i] << std::endl; + // std::cout << outbks[i] << std::endl; + // std::cout << ansbks[i] << std::endl; EXPECT_EQ(AreNearlyEqTensor(outbks[i], ansbks[i], 1e-5), true); } } @@ -562,8 +562,8 @@ TEST_F(BlockUniTensorTest, contract3) { auto outbks = out.get_blocks(); auto ansbks = UT_contract_ans3.get_blocks(); for (int i = 0; i < ansbks.size(); i++) { - std::cout << outbks[i] << std::endl; - std::cout << ansbks[i] << std::endl; + // std::cout << outbks[i] << std::endl; + // std::cout << ansbks[i] << std::endl; EXPECT_EQ(AreNearlyEqTensor(outbks[i], ansbks[i], 1e-5), true); } } @@ -598,51 +598,51 @@ TEST_F(BlockUniTensorTest, Add) { // } BUT4 = BUT4.Load(data_dir + "OriginalBUT.cytnx"); auto out2 = BUT4.Add(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out2.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out2.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).real()), + double(BUTpT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).imag()), + double(BUTpT2.at({i, j, k, l}).imag())); } BUT4.Add_(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTpT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTpT2.at({i, j, k, l}).imag())); } } TEST_F(BlockUniTensorTest, Mul) { auto out = BUT4.Mul(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).real()), + double(BUTm9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).imag()), + double(BUTm9.at({i, j, k, l}).imag())); } BUT4.Mul_(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTm9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTm9.at({i, j, k, l}).imag())); } } @@ -669,51 +669,51 @@ TEST_F(BlockUniTensorTest, Sub) { // } BUT4 = BUT4.Load(data_dir + "OriginalBUT.cytnx"); auto out2 = BUT4.Sub(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out2.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out2.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).real()), + double(BUTsT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).imag()), + double(BUTsT2.at({i, j, k, l}).imag())); } BUT4.Sub_(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTsT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTsT2.at({i, j, k, l}).imag())); } } TEST_F(BlockUniTensorTest, Div) { auto out = BUT4.Div(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).real()), + double(BUTd9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).imag()), + double(BUTd9.at({i, j, k, l}).imag())); } BUT4.Div_(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTd9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTd9.at({i, j, k, l}).imag())); } // BUT4 = BUT4.Load("OriginalBUT.cytnx"); @@ -768,7 +768,7 @@ TEST_F(BlockUniTensorTest, Norm) { cytnx_double tmp = double(UT_diag.Norm().at({0}).real()); cytnx_double ans = 0; - for (size_t i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { + for (cytnx_int64 i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag.bonds()[0]._impl->_degs[i]; for (int j = 0; j < deg; j++) ans += (i + 1) * (i + 1); } @@ -789,10 +789,10 @@ TEST_F(BlockUniTensorTest, Inv) { tmp.Inv_(clip); // test inline version EXPECT_TRUE(AreEqUniTensor(BUT4.Inv(clip), tmp)); tmp = BUT4.clone(); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 11; j++) - for (size_t k = 0; k < 3; k++) - for (size_t l = 0; l < 5; l++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) { auto proxy = tmp.at({i, j, k, l}); if (proxy.exists()) { Scalar val = proxy; @@ -816,10 +816,10 @@ TEST_F(BlockUniTensorTest, Pow) { EXPECT_TRUE(AreEqUniTensor(BUT4.Pow(2.3), tmp)); for (double p = 0.; p < 1.6; p += 0.5) { tmp = BUT4.clone(); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 11; j++) - for (size_t k = 0; k < 3; k++) - for (size_t l = 0; l < 5; l++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) { auto proxy = tmp.at({i, j, k, l}); if (proxy.exists()) { Scalar val = proxy; @@ -833,35 +833,35 @@ TEST_F(BlockUniTensorTest, Pow) { TEST_F(BlockUniTensorTest, Conj) { auto tmp = BUT4.Conj(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { // EXPECT_TRUE(Scalar(tmp.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag())); + EXPECT_DOUBLE_EQ(double(tmp.at({i, j, k, l}).real()), + double(BUT4.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(tmp.at({i, j, k, l}).imag()), + -double(BUT4.at({i, j, k, l}).imag())); } tmp = BUT4.clone(); - BUT4.Conj_(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { + tmp.Conj_(); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { // EXPECT_TRUE(Scalar(BUT4.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(tmp.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + -double(tmp.at({i, j, k, l}).imag())); } tmp = UT_diag_cplx.Conj(); - for (size_t i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { + for (cytnx_int64 i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag.bonds()[0]._impl->_degs[i]; - for (size_t j = 0; j < deg; j++) { + for (cytnx_int64 j = 0; j < deg; j++) { EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).real()), double(UT_diag_cplx.get_block_(i).at({j}).real())); EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).imag()), @@ -871,29 +871,29 @@ TEST_F(BlockUniTensorTest, Conj) { } TEST_F(BlockUniTensorTest, Transpose) { - auto tmp = BUT1.Transpose(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[1].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[2].type(), BD_IN); - EXPECT_EQ(tmp.bonds()[3].type(), BD_IN); + auto tmp = BUT1.Transpose().set_name("BUT1.Transpose"); + EXPECT_EQ(tmp.bonds()[0].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[1].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[2].type(), BD_OUT); + EXPECT_EQ(tmp.bonds()[3].type(), BD_OUT); tmp = BUT5.Transpose(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_BRA); - EXPECT_EQ(tmp.bonds()[1].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[0].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[1].type(), BD_BRA); EXPECT_EQ(tmp.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(tmp.bonds()[1].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); BUT1.Transpose_(); - EXPECT_EQ(BUT1.bonds()[0].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[1].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[2].type(), BD_IN); - EXPECT_EQ(BUT1.bonds()[3].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[0].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[1].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[2].type(), BD_OUT); + EXPECT_EQ(BUT1.bonds()[3].type(), BD_OUT); BUT5.Transpose_(); - EXPECT_EQ(BUT5.bonds()[0].type(), BD_BRA); - EXPECT_EQ(BUT5.bonds()[1].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[0].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[1].type(), BD_BRA); EXPECT_EQ(BUT5.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(BUT5.bonds()[1].qnums(), @@ -905,19 +905,17 @@ TEST_F(BlockUniTensorTest, Trace) { auto tmp = BUT4.Trace(0, 3); // std::cout<_degs[i]; for (int j = 0; j < deg; j++) ans += i + 1; } @@ -939,63 +937,90 @@ TEST_F(BlockUniTensorTest, Trace) { TEST_F(BlockUniTensorTest, Dagger) { auto tmp = BUT1.Dagger(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[1].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[2].type(), BD_IN); - EXPECT_EQ(tmp.bonds()[3].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[0].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[1].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[2].type(), BD_OUT); + EXPECT_EQ(tmp.bonds()[3].type(), BD_OUT); tmp = BUT5.Dagger(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_BRA); - EXPECT_EQ(tmp.bonds()[1].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[0].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[1].type(), BD_BRA); EXPECT_EQ(tmp.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(tmp.bonds()[1].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); BUT1.Dagger_(); - EXPECT_EQ(BUT1.bonds()[0].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[1].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[2].type(), BD_IN); - EXPECT_EQ(BUT1.bonds()[3].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[0].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[1].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[2].type(), BD_OUT); + EXPECT_EQ(BUT1.bonds()[3].type(), BD_OUT); BUT5.Dagger_(); - EXPECT_EQ(BUT5.bonds()[0].type(), BD_BRA); - EXPECT_EQ(BUT5.bonds()[1].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[0].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[1].type(), BD_BRA); EXPECT_EQ(BUT5.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(BUT5.bonds()[1].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); - tmp = BUT4.Dagger(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { + tmp = BUT4.Dagger().set_name("BUT4.Dagger"); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) { + if (BUT4.at({i, j, k, l}).exists()) { // EXPECT_TRUE(Scalar(tmp.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag())); + // std::cout << " BUT4(i=" << i << ", j=" << j << ", k=" << k << ", l=" << l + // << ") = " << double(BUT4.at({i, j, k, l}).real()) << " + i * " + // << double(BUT4.at({i, j, k, l}).imag()) << std::endl; + // std::cout << "BUT4.Dagger(k=" << k << ", l=" << l << ", i=" << i << ", j=" << j + // << ") = " << double(tmp.at({k, l, i, j}).real()) << " + i * " + // << double(tmp.at({k, l, i, j}).imag()) << std::endl; + EXPECT_DOUBLE_EQ(double(tmp.at({k, l, i, j}).real()), + double(BUT4.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(tmp.at({k, l, i, j}).imag()), + -double(BUT4.at({i, j, k, l}).imag())); + } else { + EXPECT_FALSE(tmp.at({k, l, i, j}).exists()); } + } tmp = BUT4.clone(); - BUT4.Dagger_(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { + tmp.Dagger_(); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) { + if (BUT4.at({i, j, k, l}).exists()) { // EXPECT_TRUE(Scalar(BUT4.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(tmp.at({k, l, i, j}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + -double(tmp.at({k, l, i, j}).imag())); + } else { + EXPECT_FALSE(tmp.at({k, l, i, j}).exists()); } + } + + tmp = UT_pB.set_rowrank(2).Dagger().set_name("UT_pB.Dagger"); + EXPECT_EQ(tmp.rowrank(), 1); + EXPECT_EQ(tmp.bonds()[0].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[1].type(), BD_OUT); + EXPECT_EQ(tmp.bonds()[2].type(), BD_OUT); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 0; k < 30; k++) { + if (UT_pB.at({i, j, k}).exists()) { + EXPECT_DOUBLE_EQ(double(tmp.at({k, i, j}).real()), double(UT_pB.at({i, j, k}).real())); + } else { + EXPECT_FALSE(tmp.at({k, i, j}).exists()); + } + } tmp = UT_diag_cplx.Dagger(); - for (size_t i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { + for (cytnx_int64 i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag_cplx.bonds()[0]._impl->_degs[i]; - for (size_t j = 0; j < deg; j++) { + for (cytnx_int64 j = 0; j < deg; j++) { EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).real()), double(UT_diag_cplx.get_block_(i).at({j}).real())); EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).imag()), @@ -1005,25 +1030,24 @@ TEST_F(BlockUniTensorTest, Dagger) { } TEST_F(BlockUniTensorTest, elem_exist) { - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.elem_exists({i - 1, j - 1, k - 1, l - 1})) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.elem_exists({i, j, k, l})) { cytnx_int64 _a; std::vector _b; - ((BlockUniTensor*)BUT4._impl.get()) - ->_fx_locate_elem(_a, _b, {i - 1, j - 1, k - 1, l - 1}); + ((BlockUniTensor*)BUT4._impl.get())->_fx_locate_elem(_a, _b, {i, j, k, l}); std::vector qind = BUT4.get_qindices(_a); EXPECT_EQ(BUT4.bonds()[0].qnums()[qind[0]][0] - BUT4.bonds()[1].qnums()[qind[1]][0] + BUT4.bonds()[2].qnums()[qind[2]][0] - BUT4.bonds()[3].qnums()[qind[3]][0], 0); } - size_t offset = 0; - for (size_t i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { + cytnx_int64 offset = 0; + for (cytnx_int64 i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag_cplx.bonds()[0]._impl->_degs[i]; - for (size_t j = 0; j < deg; j++) { + for (cytnx_int64 j = 0; j < deg; j++) { EXPECT_TRUE(UT_diag_cplx.elem_exists({offset + j, offset + j})); EXPECT_DOUBLE_EQ(double(UT_diag_cplx.at({offset + j, offset + j}).real()), double(i + 1)); EXPECT_DOUBLE_EQ(double(UT_diag_cplx.at({offset + j, offset + j}).imag()), double(i + 1)); diff --git a/tests/BlockUniTensor_test.h b/tests/BlockUniTensor_test.h index f4b975a1b..794e126f6 100644 --- a/tests/BlockUniTensor_test.h +++ b/tests/BlockUniTensor_test.h @@ -15,17 +15,17 @@ class BlockUniTensorTest : public ::testing::Test { Bond B2 = Bond(BD_IN, {Qs(0), Qs(1)}, {3, 4}); Bond B3 = Bond(BD_OUT, {Qs(0) >> 2, Qs(1) >> 3}); Bond B4 = Bond(BD_OUT, {Qs(0), Qs(1)}, {1, 2}); - UniTensor BUT1 = UniTensor({B1, B2, B3, B4}); + UniTensor BUT1 = UniTensor({B1, B2, B3, B4}).set_name("BUT1"); Bond bd_sym_a = Bond(BD_KET, {{0, 2}, {3, 5}, {1, 6}, {4, 1}}, {4, 7, 2, 3}); Bond bd_sym_b = Bond(BD_BRA, {{0, 2}, {3, 5}, {1, 6}, {4, 1}}, {4, 7, 2, 3}); - UniTensor BUT2 = UniTensor({bd_sym_a, bd_sym_b}); + UniTensor BUT2 = UniTensor({bd_sym_a, bd_sym_b}).set_name("BUT2"); Bond bd_sym_c = Bond(BD_KET, {{0, 2}, {1, 5}, {1, 6}, {0, 1}}, {4, 7, 2, 3}, {Symmetry::Zn(2), Symmetry::U1()}); Bond bd_sym_d = Bond(BD_BRA, {{0, 2}, {1, 5}, {1, 6}, {0, 1}}, {4, 7, 2, 3}, {Symmetry::Zn(2), Symmetry::U1()}); - UniTensor BUT3 = UniTensor({bd_sym_c, bd_sym_d}); + UniTensor BUT3 = UniTensor({bd_sym_c, bd_sym_d}).set_name("BUT3"); Bond B1p = Bond(BD_IN, {Qs(-1), Qs(0), Qs(1)}, {2, 1, 2}); Bond B2p = Bond(BD_OUT, {Qs(-1), Qs(0), Qs(1)}, {4, 3, 4}); @@ -114,7 +114,7 @@ class BlockUniTensorTest : public ::testing::Test { protected: void SetUp() override { - BUT4 = UniTensor::Load(data_dir + "OriginalBUT.cytnx"); + BUT4 = UniTensor::Load(data_dir + "OriginalBUT.cytnx").set_name("BUT4"); BUT4_2 = UniTensor::Load(data_dir + "OriginalBUT2.cytnx"); BUconjT4 = UniTensor::Load(data_dir + "BUconjT.cytnx"); BUtrT4 = UniTensor::Load(data_dir + "BUtrT.cytnx"); diff --git a/tests/DenseUniTensor_test.cpp b/tests/DenseUniTensor_test.cpp index 5b4b57c7b..d92e54568 100644 --- a/tests/DenseUniTensor_test.cpp +++ b/tests/DenseUniTensor_test.cpp @@ -4004,7 +4004,7 @@ TEST_F(DenseUniTensorTest, Conj_utuninit) { } /*=====test info===== -describe:test Trnaspose +describe:test Transpose ====================*/ TEST_F(DenseUniTensorTest, Transpose) { auto row_rank = 2u; @@ -4018,7 +4018,7 @@ TEST_F(DenseUniTensorTest, Transpose) { for (size_t i = 0; i < ut_t.rank(); i++) { EXPECT_EQ(ut_t.bonds()[i].type(), BD_REG); } - // a, b; c -> c;a, b + // a, b; c -> c; a, b EXPECT_EQ(ut.labels(), std::vector({"a", "b", "c"})); EXPECT_EQ(ut_t.labels(), std::vector({"c", "a", "b"})); EXPECT_EQ(ut.rowrank(), row_rank); @@ -4035,7 +4035,7 @@ TEST_F(DenseUniTensorTest, Transpose) { } /*=====test info===== -describe:test Trnaspose with diagonal UniTensor +describe:test Transpose with diagonal UniTensor ====================*/ TEST_F(DenseUniTensorTest, Transpose_diag) { auto row_rank = 1u; @@ -4052,7 +4052,7 @@ TEST_F(DenseUniTensorTest, Transpose_diag) { for (size_t i = 0; i < ut_t.rank(); i++) { EXPECT_EQ(ut_t.bonds()[i].type(), BD_REG); } - // a, b; c -> c;a, b + // a; b -> b; a EXPECT_EQ(ut_diag.labels(), std::vector({"a", "b"})); EXPECT_EQ(ut_t.labels(), std::vector({"b", "a"})); EXPECT_EQ(ut_diag.rowrank(), row_rank); @@ -4065,7 +4065,44 @@ TEST_F(DenseUniTensorTest, Transpose_diag) { } /*=====test info===== -describe:test Trnaspose_ +describe:test Transpose with tagged UniTensor +====================*/ +TEST_F(DenseUniTensorTest, Transpose_tagged) { + auto Spcd_t = Spcd.Transpose(); + // test tag, rowrank, rank + EXPECT_TRUE(Spcd_t.is_tag()); + EXPECT_EQ(Spcd.rowrank(), 1); + EXPECT_EQ(Spcd_t.rowrank(), 2); + EXPECT_EQ(Spcd_t.rank(), 3); + // test bond types + std::vector bonds_t = Spcd_t.bonds(); + EXPECT_EQ(bonds_t[0].type(), BD_IN); + EXPECT_EQ(bonds_t[1].type(), BD_OUT); + EXPECT_EQ(bonds_t[2].type(), BD_OUT); + // test labels + std::vector labels = Spcd.labels(); + std::vector labels_t = Spcd_t.labels(); + EXPECT_EQ(labels_t[0], labels[1]); + EXPECT_EQ(labels_t[1], labels[2]); + EXPECT_EQ(labels_t[2], labels[0]); + // test shape + auto shape = Spcd.shape(); + auto shape_t = Spcd_t.shape(); + EXPECT_EQ(shape_t[0], shape[1]); + EXPECT_EQ(shape_t[1], shape[2]); + EXPECT_EQ(shape_t[2], shape[0]); + // test tensors + EXPECT_TRUE(AreEqUniTensor(Spcd_t.Transpose(), Spcd)); + auto Spcd_p = Spcd_t.permute(Spcd.labels()); + std::vector bonds_p = Spcd_p.bonds(); + for (auto bond : bonds_p) { + bond.redirect_(); + } + EXPECT_TRUE(AreEqUniTensor(Spcd_p, Spcd)); +} + +/*=====test info===== +describe:test Transpose_ ====================*/ TEST_F(DenseUniTensorTest, Transpose_) { auto row_rank = 2u; @@ -4081,7 +4118,7 @@ TEST_F(DenseUniTensorTest, Transpose_) { } /*=====test info===== -describe:test Trnaspose with uninitialized UniTensor +describe:test Transpose with uninitialized UniTensor ====================*/ TEST_F(DenseUniTensorTest, Transpose_uninit) { EXPECT_ANY_THROW(ut_uninit.Transpose()); diff --git a/tests/gpu/BlockUniTensor_test.cpp b/tests/gpu/BlockUniTensor_test.cpp index 2d42a592a..45dda38e0 100644 --- a/tests/gpu/BlockUniTensor_test.cpp +++ b/tests/gpu/BlockUniTensor_test.cpp @@ -6,8 +6,8 @@ TEST_F(BlockUniTensorTest, gpu_Trace) { auto tmp = BUT4.Trace(0, 3); // std::cout<_degs[i]; for (int j = 0; j < deg; j++) ans += i + 1; } @@ -76,22 +76,22 @@ TEST_F(BlockUniTensorTest, gpu_relabels_) { TEST_F(BlockUniTensorTest, gpu_relabel) { auto tmp = BUT1.clone(); - BUT1 = BUT1.relabels({"a", "b", "cd", "d"}); + BUT1 = BUT1.relabel({"a", "b", "cd", "d"}); EXPECT_EQ(BUT1.labels()[0], "a"); EXPECT_EQ(BUT1.labels()[1], "b"); EXPECT_EQ(BUT1.labels()[2], "cd"); EXPECT_EQ(BUT1.labels()[3], "d"); - BUT1 = BUT1.relabels({"1", "-1", "2", "1000"}); + BUT1 = BUT1.relabel({"1", "-1", "2", "1000"}); EXPECT_EQ(BUT1.labels()[0], "1"); EXPECT_EQ(BUT1.labels()[1], "-1"); EXPECT_EQ(BUT1.labels()[2], "2"); EXPECT_EQ(BUT1.labels()[3], "1000"); - EXPECT_THROW(BUT1.relabels({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(BUT1.relabels({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(BUT1.relabels({"a"}), std::logic_error); - EXPECT_THROW(BUT1.relabels({"1", "2"}), std::logic_error); - EXPECT_THROW(BUT1.relabels({"a", "b", "c", "d", "e"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"a"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"1", "2"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"a", "b", "c", "d", "e"}), std::logic_error); BUT1 = tmp; BUT1 = BUT1.relabel("0", "a"); @@ -129,21 +129,21 @@ TEST_F(BlockUniTensorTest, gpu_relabel) { } TEST_F(BlockUniTensorTest, gpu_relabel_) { auto tmp = BUT1.clone(); - BUT1.relabels_({"a", "b", "cd", "d"}); + BUT1.relabel_({"a", "b", "cd", "d"}); EXPECT_EQ(BUT1.labels()[0], "a"); EXPECT_EQ(BUT1.labels()[1], "b"); EXPECT_EQ(BUT1.labels()[2], "cd"); EXPECT_EQ(BUT1.labels()[3], "d"); - BUT1.relabels_({"1", "-1", "2", "1000"}); + BUT1.relabel_({"1", "-1", "2", "1000"}); EXPECT_EQ(BUT1.labels()[0], "1"); EXPECT_EQ(BUT1.labels()[1], "-1"); EXPECT_EQ(BUT1.labels()[2], "2"); EXPECT_EQ(BUT1.labels()[3], "1000"); - EXPECT_THROW(BUT1.relabels_({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(BUT1.relabels_({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(BUT1.relabels_({"a"}), std::logic_error); - EXPECT_THROW(BUT1.relabels_({"1", "2"}), std::logic_error); - EXPECT_THROW(BUT1.relabels_({"a", "b", "c", "d", "e"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"a"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"1", "2"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"a", "b", "c", "d", "e"}), std::logic_error); BUT1 = tmp; BUT1.relabel_("0", "a"); @@ -186,7 +186,7 @@ TEST_F(BlockUniTensorTest, gpu_Norm) { cytnx_double tmp = double(UT_diag.Norm().at({0}).real()); cytnx_double ans = 0; - for (size_t i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { + for (cytnx_int64 i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag.bonds()[0]._impl->_degs[i]; for (int j = 0; j < deg; j++) ans += (i + 1) * (i + 1); } @@ -228,35 +228,35 @@ TEST_F(BlockUniTensorTest, gpu_Pow) { TEST_F(BlockUniTensorTest, gpu_Conj) { auto tmp = BUT4.Conj(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - // EXPECT_TRUE(Scalar(tmp.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + // EXPECT_TRUE(Scalar(tmp.at({i, j, k, l})-BUconjT4.at({i, j, k, l})).abs()<1e-5); + EXPECT_DOUBLE_EQ(double(tmp.at({i, j, k, l}).real()), + double(BUT4.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(tmp.at({i, j, k, l}).imag()), + -double(BUT4.at({i, j, k, l}).imag())); } tmp = BUT4.clone(); BUT4.Conj_(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - // EXPECT_TRUE(Scalar(BUT4.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + // EXPECT_TRUE(Scalar(BUT4.at({i, j, k, l})-BUconjT4.at({i, j, k, l})).abs()<1e-5); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(tmp.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + -double(tmp.at({i, j, k, l}).imag())); } tmp = UT_diag_cplx.Conj(); - for (size_t i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { + for (cytnx_int64 i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag.bonds()[0]._impl->_degs[i]; - for (size_t j = 0; j < deg; j++) { + for (cytnx_int64 j = 0; j < deg; j++) { EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).real()), double(UT_diag_cplx.get_block_(i).at({j}).real())); EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).imag()), @@ -267,28 +267,28 @@ TEST_F(BlockUniTensorTest, gpu_Conj) { TEST_F(BlockUniTensorTest, gpu_Transpose) { auto tmp = BUT1.Transpose(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[1].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[2].type(), BD_IN); - EXPECT_EQ(tmp.bonds()[3].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[0].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[1].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[2].type(), BD_OUT); + EXPECT_EQ(tmp.bonds()[3].type(), BD_OUT); tmp = BUT5.Transpose(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_BRA); - EXPECT_EQ(tmp.bonds()[1].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[0].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[1].type(), BD_BRA); EXPECT_EQ(tmp.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(tmp.bonds()[1].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); BUT1.Transpose_(); - EXPECT_EQ(BUT1.bonds()[0].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[1].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[2].type(), BD_IN); - EXPECT_EQ(BUT1.bonds()[3].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[0].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[1].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[2].type(), BD_OUT); + EXPECT_EQ(BUT1.bonds()[3].type(), BD_OUT); BUT5.Transpose_(); - EXPECT_EQ(BUT5.bonds()[0].type(), BD_BRA); - EXPECT_EQ(BUT5.bonds()[1].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[0].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[1].type(), BD_BRA); EXPECT_EQ(BUT5.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(BUT5.bonds()[1].qnums(), @@ -297,63 +297,70 @@ TEST_F(BlockUniTensorTest, gpu_Transpose) { TEST_F(BlockUniTensorTest, gpu_Dagger) { auto tmp = BUT1.Dagger(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[1].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[2].type(), BD_IN); - EXPECT_EQ(tmp.bonds()[3].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[0].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[1].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[2].type(), BD_OUT); + EXPECT_EQ(tmp.bonds()[3].type(), BD_OUT); tmp = BUT5.Dagger(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_BRA); - EXPECT_EQ(tmp.bonds()[1].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[0].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[1].type(), BD_BRA); EXPECT_EQ(tmp.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(tmp.bonds()[1].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); BUT1.Dagger_(); - EXPECT_EQ(BUT1.bonds()[0].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[1].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[2].type(), BD_IN); - EXPECT_EQ(BUT1.bonds()[3].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[0].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[1].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[2].type(), BD_OUT); + EXPECT_EQ(BUT1.bonds()[3].type(), BD_OUT); BUT5.Dagger_(); - EXPECT_EQ(BUT5.bonds()[0].type(), BD_BRA); - EXPECT_EQ(BUT5.bonds()[1].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[0].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[1].type(), BD_BRA); EXPECT_EQ(BUT5.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(BUT5.bonds()[1].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); tmp = BUT4.Dagger(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - // EXPECT_TRUE(Scalar(tmp.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) { + if (BUT4.at({i, j, k, l}).exists()) { + // EXPECT_TRUE(Scalar(tmp.at({i, j, k, l})-BUconjT4.at({i, j, k, l})).abs()<1e-5); + EXPECT_DOUBLE_EQ(double(tmp.at({k, l, i, j}).real()), + double(BUT4.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(tmp.at({k, l, i, j}).imag()), + -double(BUT4.at({i, j, k, l}).imag())); + } else { + EXPECT_FALSE(tmp.at({k, l, i, j}).exists()); } + } + tmp = BUT4.clone(); - BUT4.Dagger_(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - // EXPECT_TRUE(Scalar(BUT4.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag())); + tmp.Dagger_(); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) { + if (BUT4.at({i, j, k, l}).exists()) { + // EXPECT_TRUE(Scalar(BUT4.at({i, j, k, l})-BUconjT4.at({i, j, k, l})).abs()<1e-5); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(tmp.at({k, l, i, j}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + -double(tmp.at({k, l, i, j}).imag())); + } else { + EXPECT_FALSE(tmp.at({k, l, i, j}).exists()); } + } tmp = UT_diag_cplx.Dagger(); - for (size_t i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { + for (cytnx_int64 i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag_cplx.bonds()[0]._impl->_degs[i]; - for (size_t j = 0; j < deg; j++) { + for (cytnx_int64 j = 0; j < deg; j++) { EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).real()), double(UT_diag_cplx.get_block_(i).at({j}).real())); EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).imag()), @@ -389,25 +396,24 @@ TEST_F(BlockUniTensorTest, gpu_truncate) { } TEST_F(BlockUniTensorTest, gpu_elem_exist) { - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.elem_exists({i - 1, j - 1, k - 1, l - 1})) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.elem_exists({i, j, k, l})) { cytnx_int64 _a; std::vector _b; - ((BlockUniTensor*)BUT4._impl.get()) - ->_fx_locate_elem(_a, _b, {i - 1, j - 1, k - 1, l - 1}); + ((BlockUniTensor*)BUT4._impl.get())->_fx_locate_elem(_a, _b, {i, j, k, l}); std::vector qind = BUT4.get_qindices(_a); EXPECT_EQ(BUT4.bonds()[0].qnums()[qind[0]][0] - BUT4.bonds()[1].qnums()[qind[1]][0] + BUT4.bonds()[2].qnums()[qind[2]][0] - BUT4.bonds()[3].qnums()[qind[3]][0], 0); } - size_t offset = 0; - for (size_t i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { + cytnx_int64 offset = 0; + for (cytnx_int64 i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag_cplx.bonds()[0]._impl->_degs[i]; - for (size_t j = 0; j < deg; j++) { + for (cytnx_int64 j = 0; j < deg; j++) { EXPECT_TRUE(UT_diag_cplx.elem_exists({offset + j, offset + j})); EXPECT_DOUBLE_EQ(double(UT_diag_cplx.at({offset + j, offset + j}).real()), double(i + 1)); EXPECT_DOUBLE_EQ(double(UT_diag_cplx.at({offset + j, offset + j}).imag()), double(i + 1)); @@ -615,9 +621,9 @@ TEST_F(BlockUniTensorTest, gpu_put_block__byidx) { UT_pB.put_block_(t1a, 1); UT_pB.put_block_(t1b, 2); UT_pB.put_block_(t2, 3); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -640,9 +646,9 @@ TEST_F(BlockUniTensorTest, gpu_put_block_byidx) { UT_pB.put_block(t1a, 1); UT_pB.put_block(t1b, 2); UT_pB.put_block(t2, 3); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -665,9 +671,9 @@ TEST_F(BlockUniTensorTest, gpu_put_block__byqnum) { UT_pB.put_block_(t1a, {0, 1, 1}, true); UT_pB.put_block_(t1b, {1, 0, 1}, true); UT_pB.put_block_(t2, {1, 1, 2}, true); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -690,9 +696,9 @@ TEST_F(BlockUniTensorTest, gpu_put_block_byqnum) { UT_pB.put_block(t1a, {0, 1, 1}, true); UT_pB.put_block(t1b, {1, 0, 1}, true); UT_pB.put_block(t2, {1, 1, 2}, true); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -712,9 +718,9 @@ TEST_F(BlockUniTensorTest, gpu_put_block_byqnum) { TEST_F(BlockUniTensorTest, gpu_clone) { UniTensor cloned = UT_pB_ans.clone(); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(cloned.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (cloned.at({i, j, k}).exists()) EXPECT_EQ(cloned.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -724,9 +730,9 @@ TEST_F(BlockUniTensorTest, gpu_permute1) { // rank-3 tensor std::vector a = {1, 2, 0}; auto permuted = UT_permute_1.permute(a, -1); - for (size_t i = 0; i < 10; i++) - for (size_t j = 0; j < 6; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 i = 0; i < 10; i++) + for (cytnx_int64 j = 0; j < 6; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({i, j, k}).exists(), UT_permute_ans1.at({i, j, k}).exists()); if (permuted.at({i, j, k}).exists()) EXPECT_EQ(double(permuted.at({i, j, k}).real()), @@ -738,8 +744,8 @@ TEST_F(BlockUniTensorTest, gpu_permute2) { std::vector a = {1, 0}; auto permuted = UT_permute_2.permute(a, -1); - for (size_t j = 0; j < 10; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 j = 0; j < 10; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({j, k}).exists(), UT_permute_ans2.at({j, k}).exists()); if (permuted.at({j, k}).exists()) EXPECT_EQ(double(permuted.at({j, k}).real()), double(UT_permute_ans2.at({j, k}).real())); @@ -751,9 +757,9 @@ TEST_F(BlockUniTensorTest, gpu_permute_1) { std::vector a = {1, 2, 0}; auto permuted = UT_permute_1.clone(); permuted.permute_(a, -1); - for (size_t i = 0; i < 10; i++) - for (size_t j = 0; j < 6; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 i = 0; i < 10; i++) + for (cytnx_int64 j = 0; j < 6; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({i, j, k}).exists(), UT_permute_ans1.at({i, j, k}).exists()); if (permuted.at({i, j, k}).exists()) EXPECT_EQ(double(permuted.at({i, j, k}).real()), @@ -765,8 +771,8 @@ TEST_F(BlockUniTensorTest, gpu_permute_2) { std::vector a = {1, 0}; auto permuted = UT_permute_2.clone(); permuted.permute_(a, -1); - for (size_t j = 0; j < 10; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 j = 0; j < 10; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({j, k}).exists(), UT_permute_ans2.at({j, k}).exists()); if (permuted.at({j, k}).exists()) EXPECT_EQ(double(permuted.at({j, k}).real()), double(UT_permute_ans2.at({j, k}).real())); @@ -832,26 +838,26 @@ TEST_F(BlockUniTensorTest, gpu_Add) { // } BUT4 = BUT4.Load(data_dir + "OriginalBUT.cytnx").to(cytnx::Device.cuda); auto out2 = BUT4.Add(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out2.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out2.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).real()), + double(BUTpT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).imag()), + double(BUTpT2.at({i, j, k, l}).imag())); } BUT4.Add_(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTpT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTpT2.at({i, j, k, l}).imag())); } } @@ -878,76 +884,76 @@ TEST_F(BlockUniTensorTest, gpu_Sub) { // } BUT4 = BUT4.Load(data_dir + "OriginalBUT.cytnx").to(cytnx::Device.cuda); auto out2 = BUT4.Sub(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out2.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out2.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).real()), + double(BUTsT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).imag()), + double(BUTsT2.at({i, j, k, l}).imag())); } BUT4.Sub_(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTsT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTsT2.at({i, j, k, l}).imag())); } } TEST_F(BlockUniTensorTest, gpu_Mul) { auto out = BUT4.Mul(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).real()), + double(BUTm9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).imag()), + double(BUTm9.at({i, j, k, l}).imag())); } BUT4.Mul_(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTm9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTm9.at({i, j, k, l}).imag())); } } TEST_F(BlockUniTensorTest, gpu_Div) { auto out = BUT4.Div(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).real()), + double(BUTd9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).imag()), + double(BUTd9.at({i, j, k, l}).imag())); } BUT4.Div_(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTd9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTd9.at({i, j, k, l}).imag())); } // BUT4 = BUT4.Load("OriginalBUT.cytnx"); diff --git a/tests/gpu/linalg_test/GeSvd_test.cpp b/tests/gpu/linalg_test/GeSvd_test.cpp index b77d065a5..3e0fdbafc 100644 --- a/tests/gpu/linalg_test/GeSvd_test.cpp +++ b/tests/gpu/linalg_test/GeSvd_test.cpp @@ -381,16 +381,6 @@ namespace GesvdTest { return (relative_err < tol); } - // no use - void Check_UU_VV_Identity(const UniTensor& Tin, const std::vector& Tout) { - const UniTensor& U = Tout[1]; - const UniTensor& V = Tout[2]; - auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); - UD.permute_({2, 0, 1}, 1); - auto UUD = Contract(U, UD); - } - bool CheckResult(const std::string& case_name) { // test data source file std::string src_file_name = src_data_root + case_name + ".cytnx"; diff --git a/tests/gpu/linalg_test/Svd_test.cpp b/tests/gpu/linalg_test/Svd_test.cpp index 139939fa8..a0f0f4f72 100644 --- a/tests/gpu/linalg_test/Svd_test.cpp +++ b/tests/gpu/linalg_test/Svd_test.cpp @@ -382,16 +382,6 @@ namespace SvdTest { return (relative_err < tol); } - // no use - void Check_UU_VV_Identity(const UniTensor& Tin, const std::vector& Tout) { - const UniTensor& U = Tout[1]; - const UniTensor& V = Tout[2]; - auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); - UD.permute_({2, 0, 1}, 1); - auto UUD = Contract(U, UD); - } - bool CheckResult(const std::string& case_name) { // test data source file std::string src_file_name = src_data_root + case_name + ".cytnx"; diff --git a/tests/linalg_test/GeSvd_test.cpp b/tests/linalg_test/GeSvd_test.cpp index 1c4974734..4ee747ce9 100644 --- a/tests/linalg_test/GeSvd_test.cpp +++ b/tests/linalg_test/GeSvd_test.cpp @@ -366,16 +366,6 @@ namespace GesvdTest { return (relative_err < tol); } - // no use - void Check_UU_VV_Identity(const UniTensor& Tin, const std::vector& Tout) { - const UniTensor& U = Tout[1]; - const UniTensor& V = Tout[2]; - auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); - UD.permute_({2, 0, 1}, 1); - auto UUD = Contract(U, UD); - } - bool CheckResult(const std::string& case_name) { // test data source file std::string src_file_name = src_data_root + case_name + ".cytnx"; diff --git a/tests/linalg_test/Gesvd_truncate_test.cpp b/tests/linalg_test/Gesvd_truncate_test.cpp index ab2998acd..494a65dc9 100644 --- a/tests/linalg_test/Gesvd_truncate_test.cpp +++ b/tests/linalg_test/Gesvd_truncate_test.cpp @@ -200,16 +200,6 @@ namespace GesvdTruncateTest { return (relative_err < tol); } - // no use - void Check_UU_VV_Identity(const UniTensor& Tin, const std::vector& Tout) { - const UniTensor& U = Tout[1]; - const UniTensor& V = Tout[2]; - auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); - UD.permute_({2, 0, 1}, 1); - auto UUD = Contract(U, UD); - } - bool CheckResult(const std::string& case_name, const cytnx_uint64& keepdim) { // test data source file std::string src_file_name = src_data_root + case_name + ".cytnx"; diff --git a/tests/linalg_test/Rsvd_test.cpp b/tests/linalg_test/Rsvd_test.cpp index 735fac9ff..8df0798c7 100644 --- a/tests/linalg_test/Rsvd_test.cpp +++ b/tests/linalg_test/Rsvd_test.cpp @@ -181,16 +181,6 @@ namespace RsvdTest { return (relative_err < tol); } - // no use - void Check_UU_VV_Identity(const UniTensor& Tin, const std::vector& Tout) { - const UniTensor& U = Tout[1]; - const UniTensor& V = Tout[2]; - auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); - UD.permute_({2, 0, 1}, 1); - auto UUD = Contract(U, UD); - } - bool CheckResult(const std::string& case_name, const cytnx_uint64& keepdim, const cytnx_uint64& power_iteration) { // test data source file diff --git a/tests/linalg_test/Rsvd_truncate_test.cpp b/tests/linalg_test/Rsvd_truncate_test.cpp index 7c2b1fdcc..741684629 100644 --- a/tests/linalg_test/Rsvd_truncate_test.cpp +++ b/tests/linalg_test/Rsvd_truncate_test.cpp @@ -181,16 +181,6 @@ namespace RsvdTruncateTest { return (relative_err < tol); } - // no use - void Check_UU_VV_Identity(const UniTensor& Tin, const std::vector& Tout) { - const UniTensor& U = Tout[1]; - const UniTensor& V = Tout[2]; - auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); - UD.permute_({2, 0, 1}, 1); - auto UUD = Contract(U, UD); - } - bool CheckResult(const std::string& case_name, const cytnx_uint64& keepdim, const cytnx_uint64& power_iteration) { // test data source file diff --git a/tests/linalg_test/Svd_test.cpp b/tests/linalg_test/Svd_test.cpp index 7bdf16369..ee3d01ff2 100644 --- a/tests/linalg_test/Svd_test.cpp +++ b/tests/linalg_test/Svd_test.cpp @@ -367,16 +367,6 @@ namespace SvdTest { return (relative_err < tol); } - // no use - void Check_UU_VV_Identity(const UniTensor& Tin, const std::vector& Tout) { - const UniTensor& U = Tout[1]; - const UniTensor& V = Tout[2]; - auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); - UD.permute_({2, 0, 1}, 1); - auto UUD = Contract(U, UD); - } - bool CheckResult(const std::string& case_name) { // test data source file std::string src_file_name = src_data_root + case_name + ".cytnx"; diff --git a/tests/linalg_test/Svd_truncate_test.cpp b/tests/linalg_test/Svd_truncate_test.cpp index 962fa926e..3484bbe0a 100644 --- a/tests/linalg_test/Svd_truncate_test.cpp +++ b/tests/linalg_test/Svd_truncate_test.cpp @@ -184,16 +184,6 @@ namespace SvdTruncateTest { return (relative_err < tol); } - // no use - void Check_UU_VV_Identity(const UniTensor& Tin, const std::vector& Tout) { - const UniTensor& U = Tout[1]; - const UniTensor& V = Tout[2]; - auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); - UD.permute_({2, 0, 1}, 1); - auto UUD = Contract(U, UD); - } - bool CheckResult(const std::string& case_name, const cytnx_uint64& keepdim) { // test data source file std::string src_file_name = src_data_root + case_name + ".cytnx";