Skip to content
Closed
Show file tree
Hide file tree
Changes from 14 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions source/Makefile.Objects
Original file line number Diff line number Diff line change
Expand Up @@ -507,6 +507,7 @@ OBJS_IO=input_conv.o\
write_dipole.o\
td_current_io.o\
write_wfc_r.o\
write_libxc_r.o\
output_log.o\
output_mat_sparse.o\
para_json.o\
Expand Down
21 changes: 21 additions & 0 deletions source/module_esolver/esolver_fp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@
#include "module_io/cif_io.h"
#include "module_elecstate/module_charge/symmetry_rho.h"

#ifdef USE_LIBXC
#include "module_io/write_libxc_r.h"
#endif

namespace ModuleESolver
{

Expand Down Expand Up @@ -280,6 +284,23 @@ void ESolver_FP::after_scf(const int istep)
&(GlobalC::ucell),
PARAM.inp.out_elf[1]);
}

// 6) write xc(r)
if (PARAM.inp.out_xc_r[0]>=0)
{
#ifdef USE_LIBXC
ModuleIO::write_libxc_r(
PARAM.inp.out_xc_r[0],
XC_Functional::get_func_id(),
this->pw_rhod->nrxx, // number of real-space grid
GlobalC::ucell.omega, // volume of cell
GlobalC::ucell.tpiba,
&this->chr,
this);
#else
throw std::invalid_argument("out_xc_r must compile with libxc.\nSee "+std::string(__FILE__)+" line "+std::to_string(__LINE__));
#endif
}
}
}

Expand Down
1 change: 1 addition & 0 deletions source/module_io/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ list(APPEND objects
write_dipole.cpp
td_current_io.cpp
write_wfc_r.cpp
write_libxc_r.cpp
output_log.cpp
para_json.cpp
parse_args.cpp
Expand Down
18 changes: 13 additions & 5 deletions source/module_io/cube_io.h
Original file line number Diff line number Diff line change
Expand Up @@ -77,22 +77,30 @@ void read_cube_core_mismatch(
const int ny_read,
const int nz_read);

#ifdef __MPI
// when MPI:
// write data[ixy*nplane+iz] to file as order (ixy,iz)
// when serial:
// write data[iz*nxy+ixy] to file as order (ixy,iz)
// write data[ixy*nplane+iz*nld] to file as order (ixy,iz)
void write_cube_core(
std::ofstream &ofs_cube,
#ifdef __MPI
const int bz,
const int nbz,
const int nplane,
const int startz_current,
#endif
const double*const data,
const int nxy,
const int nz,
const int nld,
const int n_data_newline);
#else
// when serial:
// write data[iz*nxy+ixy] to file as order (ixy,iz)
void write_cube_core(
std::ofstream &ofs_cube,
const double*const data,
const int nxy,
const int nz,
const int n_data_newline);
#endif

/**
* @brief The trilinear interpolation method
Expand Down
15 changes: 15 additions & 0 deletions source/module_io/read_input_item_output.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,21 @@ void ReadInput::item_output()
read_sync_bool(input.out_wfc_r);
this->add_item(item);
}
{
Input_Item item("out_xc_r");
item.annotation = "if >=0, output the derivatives of exchange correlation in realspace, second parameter controls the precision";
item.read_value = [](const Input_Item& item, Parameter& para) {
size_t count = item.get_size();
std::vector<int> out_xc_r(count); // create a placeholder vector
std::transform(item.str_values.begin(), item.str_values.end(), out_xc_r.begin(), [](std::string s) { return std::stoi(s); });
std::cout<<out_xc_r[0]<<"\t"<<out_xc_r[1]<<std::endl;
// assign non-negative values to para.input.out_xc_r
std::copy(out_xc_r.begin(), out_xc_r.end(), para.input.out_xc_r.begin());
std::cout<<para.input.out_xc_r[0]<<"\t"<<para.input.out_xc_r[1]<<std::endl;
};
sync_intvec(input.out_xc_r, 2, -1);
this->add_item(item);
}
{
Input_Item item("printe");
item.annotation = "Print out energy for each band for every printe steps";
Expand Down
32 changes: 20 additions & 12 deletions source/module_io/write_cube.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ void ModuleIO::write_cube(
}

#ifdef __MPI
ModuleIO::write_cube_core(ofs_cube, bz, nbz, nplane, startz_current, data, nx*ny, nz, 6);
ModuleIO::write_cube_core(ofs_cube, bz, nbz, nplane, startz_current, data, nx*ny, nz, 1, 6);
#else
ModuleIO::write_cube_core(ofs_cube, data, nx*ny, nz, 6);
#endif
Expand All @@ -141,28 +141,25 @@ void ModuleIO::write_cube(
/// for cube file
ofs_cube.close();
}

return;
}


#ifdef __MPI

void ModuleIO::write_cube_core(
std::ofstream &ofs_cube,
#ifdef __MPI
const int bz,
const int nbz,
const int nplane,
const int startz_current,
#endif
const double*const data,
const int nxy,
const int nz,
const int nld,
const int n_data_newline)
{
ModuleBase::TITLE("ModuleIO", "write_cube_core");

#ifdef __MPI

const int my_rank = GlobalV::MY_RANK;
const int my_pool = GlobalV::MY_POOL;
const int rank_in_pool = GlobalV::RANK_IN_POOL;
Expand All @@ -177,7 +174,6 @@ void ModuleIO::write_cube_core(

// num_z: how many planes on processor 'ip'
std::vector<int> num_z(nproc_in_pool, 0);

for (int iz = 0; iz < nbz; iz++)
{
const int ip = iz % nproc_in_pool;
Expand Down Expand Up @@ -231,7 +227,7 @@ void ModuleIO::write_cube_core(
// mohan change to rho_save on 2012-02-10
// because this can make our next restart calculation lead
// to the same scf_thr as the one saved.
zpiece[ixy] = data[ixy * nplane + iz - startz_current];
zpiece[ixy] = data[ixy * nplane + (iz - startz_current) * nld];
}
}
// case 2: > first part rho: send the rho to
Expand All @@ -240,7 +236,7 @@ void ModuleIO::write_cube_core(
{
for (int ixy = 0; ixy < nxy; ixy++)
{
zpiece[ixy] = data[ixy * nplane + iz - startz_current];
zpiece[ixy] = data[ixy * nplane + (iz - startz_current) * nld];
}
MPI_Send(zpiece.data(), nxy, MPI_DOUBLE, 0, tag, POOL_WORLD);
}
Expand Down Expand Up @@ -282,7 +278,18 @@ void ModuleIO::write_cube_core(
/// for cube file
}
MPI_Barrier(MPI_COMM_WORLD);
#else
}

#else // #ifdef __MPI

void ModuleIO::write_cube_core(
std::ofstream &ofs_cube,
const double*const data,
const int nxy,
const int nz,
const int n_data_newline)
{
ModuleBase::TITLE("ModuleIO", "write_cube_core");
for (int ixy = 0; ixy < nxy; ixy++)
{
for (int iz = 0; iz < nz; iz++)
Expand All @@ -296,5 +303,6 @@ void ModuleIO::write_cube_core(
}
ofs_cube << "\n";
}
#endif
}

#endif // #ifdef __MPI
Loading
Loading