1+ // ======================
2+ // AUTHOR : Peize Lin
3+ // DATE : 2021-11-21
4+ // ======================
5+
6+ #include " write_wfc_realspace.h"
7+ #include " src_pw/global.h"
8+ #include " module_base/tool_title.h"
9+ #include < fstream>
10+ #include < stdexcept>
11+ #include < cstdlib>
12+
13+ namespace Write_Wfc_Realspace
14+ {
15+ // write ||wfc_r|| for all k-points and all bands
16+ // Input: wfc_g[ik](ib,ig)
17+ // loop order is for(z){for(y){for(x)}}
18+ void write_wfc_realspace_1 (const ModuleBase::ComplexMatrix*const wfc_g, const std::string &folder_name)
19+ {
20+ ModuleBase::TITLE (" Write_Wfc_Realspace" , " write_wfc_realspace_1" );
21+
22+ const string outdir = GlobalV::global_out_dir + folder_name + " /" ;
23+ const std::string command0 = " test -d " + outdir + " || mkdir " + outdir;
24+ if (GlobalV::MY_RANK==0 )
25+ system ( command0.c_str () );
26+
27+ #ifdef __MPI
28+ std::vector<MPI_Request> mpi_requests;
29+ #endif
30+ for (int ik=0 ; ik<GlobalC::kv.nks ; ++ik)
31+ {
32+ const int ik_out = (GlobalV::NSPIN!=2 )
33+ ? ik + GlobalC::Pkpoints.startk_pool [GlobalV::MY_POOL]
34+ : ik - GlobalC::kv.nks /2 *GlobalC::kv.isk [ik] + GlobalC::kv.nkstot /2 *GlobalC::kv.isk [ik] + GlobalC::Pkpoints.startk_pool [GlobalV::MY_POOL];
35+ for (int ib=0 ; ib<wfc_g[ik].nr ; ++ib)
36+ {
37+ const std::vector<std::complex <double >> wfc_r = cal_wfc_r (wfc_g[ik], ik, ib);
38+
39+ std::vector<double > wfc_r2 (wfc_r.size ());
40+ for (int ir=0 ; ir<wfc_r2.size (); ++ir)
41+ wfc_r2[ir] = std::norm (wfc_r[ir]);
42+
43+ const std::string file_name = outdir + " wfc_realspace_"
44+ + ModuleBase::GlobalFunc::TO_STRING (ik_out)
45+ + " _" + ModuleBase::GlobalFunc::TO_STRING (ib);
46+ #ifdef __MPI
47+ mpi_requests.push_back ({});
48+ write_charge_realspace_1 (wfc_r2, file_name, mpi_requests.back ());
49+ #else
50+ write_charge_realspace_1 (wfc_r2, file_name);
51+ #endif
52+ }
53+ }
54+ #ifdef __MPI
55+ MPI_Waitall ( mpi_requests.size (), mpi_requests.data (), MPI_STATUSES_IGNORE );
56+ #endif
57+ }
58+ // processes output pipeline:
59+ //
60+ // t0 t1 t2 t3 t4 t5 t6 t7
61+ // -------------------------------->
62+ // rank0 k0 k1 k2 k3 k4 k5
63+ // \ \ \ \ \ \
64+ // rank1 k0 k1 k2 k3 k4 k5
65+ // \ \ \ \ \ \
66+ // rank2 k0 k1 k2 k3 k4 k5
67+
68+
69+
70+ // Input: wfc_g(ib,ig)
71+ // Output: wfc_r[ir]
72+ std::vector<std::complex <double >> cal_wfc_r (const ModuleBase::ComplexMatrix &wfc_g, const int ik, const int ib)
73+ {
74+ ModuleBase::GlobalFunc::ZEROS (GlobalC::UFFT.porter , GlobalC::pw.nrxx );
75+ std::vector<std::complex <double >> wfc_r (GlobalC::pw.nrxx );
76+ for (int ig=0 ; ig<GlobalC::kv.ngk [ik]; ++ig)
77+ GlobalC::UFFT.porter [ GlobalC::pw.ig2fftw [GlobalC::wf.igk (ik,ig)] ] = wfc_g (ib,ig);
78+ GlobalC::pw.FFT_wfc .FFT3D (GlobalC::UFFT.porter ,1 );
79+ for (int ir=0 ; ir<GlobalC::pw.nrxx ; ++ir)
80+ wfc_r[ir] = GlobalC::UFFT.porter [ir];
81+ return wfc_r;
82+ }
83+
84+
85+
86+ // Input: chg_r[ir]
87+ #ifdef __MPI
88+ void write_charge_realspace_1 (const std::vector<double > &chg_r, const std::string &file_name, MPI_Request &mpi_request)
89+ #else
90+ void write_charge_realspace_1 (const std::vector<double > &chg_r, const std::string &file_name)
91+ #endif
92+ {
93+ std::ofstream ofs;
94+
95+ #ifdef __MPI
96+ constexpr int mpi_tag=100 ;
97+ if (GlobalV::RANK_IN_POOL==0 )
98+ {
99+ #endif
100+ ofs.open (file_name);
101+
102+ ofs<<" calculated by ABACUS" <<std::endl;
103+ ofs<<GlobalC::ucell.lat0_angstrom <<std::endl;
104+ ofs<<GlobalC::ucell.latvec .e11 <<" " <<GlobalC::ucell.latvec .e12 <<" " <<GlobalC::ucell.latvec .e13 <<std::endl
105+ <<GlobalC::ucell.latvec .e21 <<" " <<GlobalC::ucell.latvec .e22 <<" " <<GlobalC::ucell.latvec .e23 <<std::endl
106+ <<GlobalC::ucell.latvec .e31 <<" " <<GlobalC::ucell.latvec .e32 <<" " <<GlobalC::ucell.latvec .e33 <<std::endl;
107+
108+ for (int it=0 ; it<GlobalC::ucell.ntype ; ++it)
109+ ofs<<GlobalC::ucell.atoms [it].label <<" \t " ;
110+ ofs<<std::endl;
111+ for (int it=0 ; it<GlobalC::ucell.ntype ; ++it)
112+ ofs<<GlobalC::ucell.atoms [it].na <<" \t " ;
113+ ofs<<std::endl;
114+
115+ ofs<<" Direct" <<std::endl;
116+ for (int it=0 ; it<GlobalC::ucell.ntype ; ++it)
117+ for (int ia=0 ; ia<GlobalC::ucell.atoms [it].na ; ++ia)
118+ ofs<<GlobalC::ucell.atoms [it].taud [ia].x <<" " <<GlobalC::ucell.atoms [it].taud [ia].y <<" " <<GlobalC::ucell.atoms [it].taud [ia].z <<std::endl;
119+ ofs<<std::endl;
120+
121+ ofs<<GlobalC::pw.ncx <<" " <<GlobalC::pw.ncy <<" " <<GlobalC::pw.ncz <<std::endl;
122+ #ifdef __MPI
123+ }
124+ else
125+ {
126+ char recv_tmp;
127+ MPI_Recv ( &recv_tmp, 1 , MPI_CHAR, GlobalV::RANK_IN_POOL-1 , mpi_tag, POOL_WORLD, MPI_STATUS_IGNORE);
128+
129+ ofs.open (file_name, ofstream::app);
130+ }
131+ #endif
132+
133+ assert (GlobalC::pw.ncx * GlobalC::pw.ncy * GlobalC::pw.nczp == chg_r.size ());
134+ for (int iz=0 ; iz<GlobalC::pw.nczp ; ++iz)
135+ {
136+ for (int iy=0 ; iy<GlobalC::pw.ncy ; ++iy)
137+ {
138+ for (int ix=0 ; ix<GlobalC::pw.ncx ; ++ix)
139+ {
140+ const int ir = (ix*GlobalC::pw.ncy +iy)*GlobalC::pw.nczp +iz;
141+ ofs<<chg_r[ir]<<" " ;
142+ }
143+ ofs<<std::endl;
144+ }
145+ }
146+ ofs.close ();
147+
148+ #ifdef __MPI
149+ if (GlobalV::RANK_IN_POOL < GlobalV::NPROC_IN_POOL-1 )
150+ {
151+ const char send_tmp = ' c' ;
152+ MPI_Isend ( &send_tmp, 1 , MPI_CHAR, GlobalV::RANK_IN_POOL+1 , mpi_tag, POOL_WORLD, &mpi_request );
153+ }
154+ else
155+ {
156+ mpi_request = MPI_REQUEST_NULL;
157+ }
158+ #endif
159+ }
160+ };
0 commit comments