-
Notifications
You must be signed in to change notification settings - Fork 121
[WIP] LAMMPS Flows #1185
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
[WIP] LAMMPS Flows #1185
Changes from 200 commits
d5b89c8
243c071
646719c
2e24f0a
1dd4db8
08989a0
06d5aab
6dea0f9
454cfc1
9a4d4f0
c1e670f
c260a97
ec3b7d0
af60b51
da7b087
ee94293
7550f5f
2630a52
5ee8730
fc0b845
36792eb
af85468
4ecfec1
2d86231
277d98a
eb9e910
2ff7ecb
2b10f7f
92953bf
02805f2
313d0c8
d37b3f4
28e5049
e89a1d5
2bd2fad
c1a28d0
092e524
1dea27c
d7e9e00
51358b4
95a245e
386d460
4f920af
a52ed77
3e71805
0f3906e
ca19673
9d6808e
a18a3c7
44fb6cf
92fe8c4
bbf1167
b4bf075
f7081f3
458a73b
aabc1ee
1ce3922
1e33d11
5cfc4e3
6fb4e34
f086b22
ffc69f1
5138774
88c51b4
fb458f4
fc2359e
4d372e2
8a11622
d1489d0
7abee64
786295c
43c4b57
a92a575
e642082
19080fa
0d11119
cbfa83f
7e85462
0ba672e
5a230eb
ebe0598
96f7b90
6034c03
7307aad
6344446
727f480
202b592
fffeaed
b3ebc9f
fe89fa9
81a8ab3
f5842ab
6393d3c
218e78a
d9ecc91
0f7d33c
32ee9a2
0d3d6f9
dec10ed
e8e7073
e50d384
ffdc708
80b16af
64b124f
b52f01d
68eba60
a750e8e
d44fa8b
8939d5d
226c2a1
d43f30b
5d59a11
e4c70f6
e96ac01
5004337
ec91bd7
c8c51f1
1b66489
6025a61
02c3a81
91ab9c2
057447f
2f3dc7f
dc37da1
1b7c747
47eb43e
d2f51f8
f49c734
037e297
b658e9a
9df8e59
9628687
2468a15
928eac3
9ecb1d5
6ef17d2
5c1088b
60d5c38
41e9902
0948c4c
e49d24e
dac0f39
b27e5bb
d9390ff
b3008a1
5318b96
1b6ba53
a605eef
017cb36
abb33b2
bbaf4d4
0bdf2b5
e01721a
bfda7a7
51c71d7
79ee014
dfec30e
22e8931
b439c60
c8bc63f
c13d838
80ffcd6
0a5abef
78a2ee8
f71ef1f
a71e062
6d3dece
688f804
8b4e671
5c58ae8
a2c1d14
682048a
85defc6
f395171
937d67d
be1e42b
434b9f9
c32438e
4429951
afce9cf
cef5b32
cd12b08
8829e3b
aa736fe
cc9c572
1c186ea
c1490dd
026997b
36c5d50
96b7a2a
79929ce
32c5cd7
be9acd1
72746a7
c124f93
1893e5c
05aea9e
cd46e38
895ec9f
855e27c
6bc8b39
353c2af
15932a5
9c74fa3
27f8f7f
2ad291f
2714a30
4a43bfc
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,92 @@ | ||
from pathlib import Path | ||
from pymatgen.core import Structure, Molecule | ||
from pymatgen.io.lammps.generators import BaseLammpsGenerator | ||
from ase.io import read | ||
from pymatgen.io.ase import AseAtomsAdaptor | ||
from pymatgen.io.lammps.data import LammpsData, CombinedData | ||
from pymatgen.core.trajectory import Trajectory as PmgTrajectory | ||
from ase.io import Trajectory as AseTrajectory | ||
from typing import Literal | ||
from monty.serialization import dumpfn | ||
from monty.json import MSONable | ||
from emmet.core.vasp.calculation import StoreTrajectoryOption | ||
|
||
def write_lammps_input_set( | ||
data: Structure | LammpsData | CombinedData, | ||
input_set_generator: BaseLammpsGenerator, | ||
additional_data : LammpsData | CombinedData | None = None, | ||
directory: str | Path = ".", | ||
): | ||
input_set = input_set_generator.get_input_set(data, | ||
additional_data, | ||
) | ||
input_set.write_input(directory) | ||
|
||
|
||
class DumpConvertor(MSONable): | ||
''' | ||
Class to convert LAMMPS dump files to pymatgen or ase Trajectory objects. Based on TrajectoryObserver from atomate2.ase. | ||
|
||
args: | ||
dumpfile : str | ||
Path to the LAMMPS dump file | ||
store_md_outputs : StoreTrajectoryOption | ||
Option to store MD outputs in the Trajectory object | ||
read_index : str | int | ||
Index of the frame to read from the dump file (default is ':', i.e. read all frames). | ||
Use an integer to read a specific frame (practical for large files). | ||
|
||
''' | ||
def __init__(self, dumpfile, store_md_outputs : StoreTrajectoryOption = StoreTrajectoryOption.NO, read_index: str | int = ':') -> None: | ||
self.store_md_outputs = store_md_outputs | ||
self.traj = read(dumpfile, index=read_index) if isinstance(read_index, str) else [read(dumpfile, index=read_index)] | ||
|
||
self.is_periodic = any(self.traj[0].pbc) | ||
self.frame_properties_keys = ['forces', 'velocities'] | ||
|
||
def to_ase_trajectory(self, filename : str | None = None): | ||
for idx, atoms in enumerate(self.traj): | ||
with AseTrajectory(filename, 'a' if idx > 0 else 'w', atoms = atoms) as file: #check logic here | ||
file.write() | ||
return AseTrajectory(filename, 'r') | ||
|
||
def to_pymatgen_trajectory(self, filename : str | None = None) -> PmgTrajectory: | ||
|
||
species = AseAtomsAdaptor.get_structure(self.traj[0], cls = Structure if self.is_periodic else Molecule).species | ||
|
||
frames = [] | ||
frame_properties = [] | ||
|
||
for atoms in self.traj: | ||
if self.store_md_outputs == StoreTrajectoryOption.FULL: | ||
frame_properties.append({key : getattr(atoms, f'get_{key}')() for key in self.frame_properties_keys}) | ||
|
||
if self.is_periodic: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. One potential issue of this approach is that in case of a system in box but without PBC the size of the box will be lost if converted to a Molecule. Could that be a relevant information to preserve? Could it be better to always store a Structure keeping track of the PBC? |
||
frames.append(Structure(lattice = atoms.get_cell(), | ||
|
||
species = species, | ||
coords = atoms.get_positions(), | ||
coords_are_cartesian = True) | ||
) | ||
else: | ||
frames.append(Molecule(species = species, | ||
coords = atoms.get_positions(), | ||
charge = atoms.get_charges(), | ||
) | ||
) | ||
traj_method = 'from_structures' if self.is_periodic else 'from_molecules' | ||
|
||
pmg_traj = getattr(PmgTrajectory, traj_method)( | ||
frames, | ||
frame_properties=frame_properties if frame_properties else None, | ||
constant_lattice=False, | ||
) | ||
|
||
if filename: | ||
dumpfn(pmg_traj, filename) | ||
|
||
return pmg_traj | ||
|
||
def save(self, filename : str | None = None, fmt : Literal["pmg", "ase"] = "pmg"): | ||
filename = str(filename) if filename is not None else None | ||
if fmt == "pmg" and filename: | ||
return self.to_pymatgen_trajectory(filename=filename) | ||
if fmt == "ase" and filename: | ||
return self.to_ase_trajectory(filename=filename) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,66 @@ | ||
from dataclasses import dataclass, field | ||
|
||
from jobflow import Flow, Maker | ||
from pymatgen.core import Structure | ||
|
||
from atomate2.lammps.jobs.base import BaseLammpsMaker | ||
from atomate2.lammps.jobs.core import LammpsNVTMaker, LammpsNPTMaker | ||
|
||
from copy import deepcopy | ||
|
||
import warnings | ||
|
||
|
||
@dataclass | ||
class MeltQuenchThermalizeMaker(Maker): | ||
# potentially remove and replace with single job | ||
name: str = "melt-quench-thermalize" | ||
melt_maker: BaseLammpsMaker = field(default_factory=LammpsNPTMaker) | ||
quench_maker: BaseLammpsMaker = field(default_factory=LammpsNPTMaker) | ||
thermalize_maker: BaseLammpsMaker = field(default_factory=LammpsNVTMaker) | ||
|
||
def make(self, structure: Structure): | ||
melt = self.melt_maker.make(structure) | ||
quench = self.quench_maker.make(melt.output.structure) | ||
vir-k01 marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
thermalize = self.thermalize_maker.make(quench.output.structure) | ||
return Flow([melt, quench, thermalize], name=self.name) | ||
|
||
@classmethod | ||
def from_temperature_steps( | ||
cls, | ||
start_temperature: float = 300, | ||
melt_temperature: float = 3000, | ||
quench_temperature: float = 300, | ||
n_steps_melt: int = 10000, | ||
n_steps_quench: int = 10000, | ||
n_steps_thermalize: int = 10000, | ||
npt_maker : LammpsNPTMaker = None, | ||
nvt_maker : LammpsNVTMaker = None | ||
) -> "MeltQuenchThermalizeMaker": | ||
if nvt_maker is None: | ||
warnings.warn("No NVT maker provided, using NPT maker for thermalize.") | ||
if npt_maker is None: | ||
vir-k01 marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
raise ValueError("NPT maker must be provided.") | ||
|
||
melt_maker = deepcopy(npt_maker) | ||
melt_maker.name = "melt" | ||
melt_maker.input_set_generator.update_settings({"start_temp" : start_temperature, | ||
"end_temp": melt_temperature, | ||
"nsteps" : n_steps_melt}) | ||
|
||
quench_maker = deepcopy(npt_maker) | ||
quench_maker.name = "quench" | ||
quench_maker.input_set_generator.update_settings({"start_temp" : melt_temperature, | ||
"end_temp": quench_temperature, | ||
"nsteps" : n_steps_quench}) | ||
|
||
thermalize_maker = deepcopy(nvt_maker) if nvt_maker else deepcopy(npt_maker) | ||
thermalize_maker.name = "thermalize" | ||
thermalize_maker.input_set_generator.update_settings({"start_temp" : quench_temperature, | ||
"end_temp": quench_temperature, | ||
"nsteps" : n_steps_thermalize}) | ||
return cls( | ||
melt_maker=melt_maker, | ||
quench_maker=quench_maker, | ||
thermalize_maker=thermalize_maker, | ||
) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
"""Lammps job makers for atomate2.""" | ||
|
||
from .base import BaseLammpsMaker | ||
from .core import CustomLammpsMaker, LammpsNPTMaker, LammpsNVTMaker, MinimizationMaker |
Original file line number | Diff line number | Diff line change | ||
---|---|---|---|---|
@@ -0,0 +1,148 @@ | ||||
"""Base job maker for LAMMPS calculations.""" | ||||
|
||||
import glob | ||||
import os | ||||
import warnings | ||||
from collections.abc import Callable | ||||
from dataclasses import dataclass, field | ||||
from pathlib import Path | ||||
|
||||
from emmet.core.vasp.task_valid import TaskState | ||||
from jobflow import Maker, Response, job | ||||
from pymatgen.core import Structure | ||||
from pymatgen.io.lammps.generators import ( | ||||
BaseLammpsSetGenerator, | ||||
CombinedData, | ||||
LammpsData, | ||||
) | ||||
|
||||
from atomate2.common.files import gzip_files | ||||
from atomate2.lammps.files import write_lammps_input_set | ||||
from atomate2.lammps.run import run_lammps | ||||
from atomate2.lammps.schemas.task import LammpsTaskDocument, StoreTrajectoryOption | ||||
|
||||
_DATA_OBJECTS: list[str] = [ | ||||
"raw_log_file", | ||||
"inputs", | ||||
"trajectories", | ||||
"dump_files", | ||||
] | ||||
|
||||
__all__ = ("BaseLammpsMaker", "lammps_job") | ||||
|
||||
|
||||
class LammpsRunError(Exception): | ||||
"""Custom exception for LAMMPS jobs.""" | ||||
|
||||
def __init__(self, message: str) -> None: | ||||
super().__init__(message) | ||||
self.message = message | ||||
|
||||
|
||||
def lammps_job(method: Callable) -> job: | ||||
"""Job decorator for LAMMPS jobs.""" | ||||
return job(method, data=_DATA_OBJECTS, output_schema=LammpsTaskDocument) | ||||
|
||||
|
||||
@dataclass | ||||
class BaseLammpsMaker(Maker): | ||||
""" | ||||
Basic Maker class for LAMMPS jobs. | ||||
|
||||
name: str | ||||
Name of the job | ||||
input_set_generator: BaseLammpsGenerator | ||||
Input set generator for the job, default is the BaseLammpsSetGenerator. | ||||
Check the sets module for more options on input kwargs. | ||||
write_input_set_kwargs: dict | ||||
Additional kwargs to write_lammps_input_set | ||||
run_lammps_kwargs: dict | ||||
Additional kwargs to run_lammps | ||||
task_document_kwargs: dict | ||||
Additional kwargs to TaskDocument.from_directory | ||||
write_additional_data: dict | ||||
Additional data to write to the job directory | ||||
""" | ||||
|
||||
name: str = "Base LAMMPS job" | ||||
input_set_generator: BaseLammpsSetGenerator = field( | ||||
default_factory=BaseLammpsSetGenerator | ||||
) | ||||
force_field: str | dict | None = field(default=None) | ||||
write_input_set_kwargs: dict = field(default_factory=dict) | ||||
run_lammps_kwargs: dict = field(default_factory=dict) | ||||
task_document_kwargs: dict = field(default_factory=dict) | ||||
write_additional_data: LammpsData | CombinedData = field(default_factory=dict) | ||||
|
||||
def __post_init__(self) -> None: | ||||
"""Post-initialization warnings for the job.""" | ||||
if ( | ||||
self.task_document_kwargs.get("store_trajectory", StoreTrajectoryOption.NO) | ||||
!= StoreTrajectoryOption.NO | ||||
): | ||||
warnings.warn( | ||||
"Trajectory data might be large, only store if absolutely necessary. \ | ||||
Consider manually parsing the dump files instead.", | ||||
stacklevel=1, | ||||
) | ||||
|
||||
if self.force_field: | ||||
self.input_set_generator.force_field = self.force_field | ||||
|
||||
@lammps_job | ||||
def make( | ||||
self, | ||||
input_structure: Structure | Path | LammpsData = None, | ||||
prev_dir: Path = None, | ||||
vir-k01 marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||||
) -> Response: | ||||
"""Run a LAMMPS calculation.""" | ||||
if prev_dir: | ||||
restart_files = glob.glob(os.path.join(prev_dir, "*restart*")) | ||||
if len(restart_files) != 1: | ||||
raise FileNotFoundError( | ||||
"No/More than one restart file found in the previous directory. \ | ||||
If present, it should have the extension '.restart'!" | ||||
) | ||||
|
||||
self.input_set_generator.update_settings( | ||||
{"read_restart": os.path.join(prev_dir, restart_files[0])} | ||||
) | ||||
|
||||
if isinstance(input_structure, Path): | ||||
input_structure = LammpsData.from_file( | ||||
input_structure, | ||||
atom_style=self.input_set_generator.settings.get("atom_style", "full"), | ||||
) | ||||
|
||||
write_lammps_input_set( | ||||
data=input_structure, | ||||
input_set_generator=self.input_set_generator, | ||||
additional_data=self.write_additional_data, | ||||
**self.write_input_set_kwargs, | ||||
) | ||||
|
||||
run_lammps(**self.run_lammps_kwargs) | ||||
|
||||
task_doc = LammpsTaskDocument.from_directory( | ||||
os.getcwd(), task_label=self.name, **self.task_document_kwargs | ||||
) | ||||
|
||||
if task_doc.state == TaskState.ERROR: | ||||
try: | ||||
error = "" | ||||
for index, line in enumerate(task_doc.raw_log_file.split("\n")): | ||||
if "ERROR" in line: | ||||
error = error.join(task_doc.raw_log_file.split("\n")[index:]) | ||||
break | ||||
except ValueError: | ||||
error = "could not parse log file" | ||||
raise LammpsRunError(f"Task {task_doc.task_label} failed, error: {error}") | ||||
|
||||
task_doc.composition = input_structure.composition | ||||
task_doc.reduced_formula = input_structure.composition.reduced_formula | ||||
task_doc.task_label = self.name | ||||
task_doc.inputs = self.input_set_generator.settings.dict | ||||
vir-k01 marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||||
|
||||
gzip_files(".") | ||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It may not be strictly necessary, but would it be easy to make something equivalent to what is done in VASP and some other code, where only lammps related files are zipped? atomate2/src/atomate2/vasp/jobs/base.py Line 238 in 34682fb
One downside of zipping everything inside the folder is that when running on a cluster also the queue files are zipped and the queueing system may not be able to write it anymore. Anyway, this can also be done at a later stage |
||||
|
||||
return Response(output=task_doc) |
Uh oh!
There was an error while loading. Please reload this page.