Skip to content

support NvNMD-train and NvNMD-explore #298

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 54 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
54 commits
Select commit Hold shift + click to select a range
73b4a48
support NvNMD-train and NvNMD-explore
jiongwalai Apr 24, 2025
7f60686
fix run nvnmd
Apr 28, 2025
5255d2e
fix rerun command in nvnmd if number of atoms in dump is changing.
Apr 28, 2025
91a6620
add model deviation calculation function
Apr 29, 2025
48aac9c
fix rerun in lmp-nvnmd-template
May 2, 2025
8a6918c
fix rerun command in lmp-nvnmd-template
May 2, 2025
ff5da8d
fix traj reader lammps for multi-systems in a traj
May 3, 2025
2d4e32d
support init-model from freezed model in nvnmd
May 16, 2025
c1dd741
fix args in nvnmd
May 16, 2025
6b3a0e1
add nvnmd test unit
May 17, 2025
af591a4
add nvnmd test unit
Jun 4, 2025
2c0ea10
add nvnmd test unit
Jun 6, 2025
6c69e4e
fix test unit
Jun 6, 2025
77a0e3a
fix test unit
Jun 6, 2025
afecaad
fix test unit
Jun 6, 2025
e542b0a
fix test unit
Jun 6, 2025
ca17fe1
fix test unit
Jun 6, 2025
694889d
fix test unit
Jun 6, 2025
daae459
fix run_nvnmd
Jun 6, 2025
26ab174
fix test unit
Jun 6, 2025
c4e3f46
fix run nvnmd
Jun 6, 2025
a89bd70
fix test unit
Jun 6, 2025
2aa102b
support init model from model.ckpt in dp-nvnmd train
Jun 9, 2025
c1e46ac
fix nvnmd test unit
Jun 10, 2025
3791618
fix nvnmd test unit
Jun 10, 2025
e5a16ec
Merge branch 'deepmodeling:master' into master
jiongwalai Jun 10, 2025
8064b74
fix nvnmd test unit
Jun 10, 2025
228f620
support nvnmd train and inference in dpgen2
jiongwalai Jun 10, 2025
b73ea11
fix dpgen_loop in nvnmd
Jun 10, 2025
ba7fc22
fix run nvnmd command && fix model_ckpt input and output
Jun 11, 2025
e329de9
fix nvnmd test unit
Jun 11, 2025
fcb7747
fix dl test unit
Jun 11, 2025
b8bdce6
fix dl test unit
Jun 11, 2025
35804c5
fix dl art
Jun 11, 2025
26db68f
fix dl test unit
Jun 11, 2025
849ffeb
fix dl test unit
Jun 11, 2025
d7210b9
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 11, 2025
05ba10f
reuse PrepRunDPTrain and PrepRunLmp superop
pre-commit-ci[bot] Jun 11, 2025
38c50ea
fix download testunit
Jun 22, 2025
d33ce44
fix download testunit
Jun 22, 2025
d7eac9a
fix lmp template task group in lmp-nvnmd
Jun 22, 2025
ed4cbbe
fix lmp-nvnmd ipnut file
Jun 22, 2025
179fa31
fix lmp-nvnmd testunit
Jun 23, 2025
40d9c82
Merge pull request #2 from jiongwalai/nvnmd_0623
jiongwalai Jun 26, 2025
3e83044
reuse PrepRunDPTrain and PrepRunLmp superop
Jun 26, 2025
a232ae1
delete info
Jun 26, 2025
7a5924d
remove preprunnvnmd superop
Jun 27, 2025
ede835a
remove prep_run_nvnmd_train superop
Jun 27, 2025
86b2a66
reuse PrepRunDPTrain and PrepRunLmp superop
jiongwalai Jun 27, 2025
2b14439
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 27, 2025
2a25efe
remove prep_run_nvnmd_train superop && accept suggestion
jiongwalai Jun 28, 2025
3c8982c
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 28, 2025
aa2e96b
add _check_nvnmd_model_files function and remove debug info
jiongwalai Jun 28, 2025
c50b411
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 28, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions dpgen2/constants.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
train_index_pattern = "%04d"
train_task_pattern = "task." + train_index_pattern
train_script_name = "input.json"
train_cnn_script_name = "input_cnn.json"
train_qnn_script_name = "input_qnn.json"
train_log_name = "train.log"
model_name_pattern = "model.%03d.pb"
nvnmd_model_name_pattern = "nvnmd_model.%03d"
pytorch_model_name_pattern = "model.%03d.pth"
model_name_match_pattern = r"model\.[0-9]{3,}(\.pb|\.pth)"
lmp_index_pattern = "%06d"
Expand Down
48 changes: 48 additions & 0 deletions dpgen2/entrypoint/args.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,12 +127,59 @@ def dp_train_args():
]


def nvnmd_train_args():
doc_numb_models = "Number of models trained for evaluating the model deviation"
doc_config = "Configuration of training"
doc_template_script = "File names of the template training script. It can be a `List[str]`, the length of which is the same as `numb_models`. Each template script in the list is used to train a model. Can be a `str`, the models share the same template training script. "
doc_init_models_paths = "the paths to initial models"
doc_init_models_uri = "The URI of initial models"
doc_optional_files = "Optional files for training"

return [
Argument(
"config",
dict,
RunDPTrain.training_args(),
optional=True,
default=RunDPTrain.normalize_config({}),
doc=doc_config,
),
Argument("numb_models", int, optional=True, default=4, doc=doc_numb_models),
Argument(
"template_script", [List[str], str], optional=False, doc=doc_template_script
),
Argument(
"init_models_paths",
List[str],
optional=True,
default=None,
doc=doc_init_models_paths,
alias=["training_iter0_model_path"],
),
Argument(
"init_models_uri",
str,
optional=True,
default=None,
doc=doc_init_models_uri,
),
Argument(
"optional_files",
list,
optional=True,
default=None,
doc=doc_optional_files,
),
]


def variant_train():
doc = "the type of the training"
return Variant(
"type",
[
Argument("dp", dict, dp_train_args()),
Argument("dp-nvnmd", dict, nvnmd_train_args()),
Argument("dp-dist", dict, dp_dist_train_args()),
],
doc=doc,
Expand Down Expand Up @@ -454,6 +501,7 @@ def variant_explore():
"type",
[
Argument("lmp", dict, lmp_args(), doc=doc_lmp),
Argument("lmp-nvnmd", dict, lmp_args(), doc=doc_lmp),
Argument("calypso", dict, caly_args(), doc=doc_calypso),
Argument("calypso:default", dict, caly_args(), doc=doc_calypso),
Argument("calypso:merge", dict, caly_args(), doc=doc_calypso),
Expand Down
35 changes: 34 additions & 1 deletion dpgen2/entrypoint/submit.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,8 @@
RunDPTrain,
RunLmp,
RunLmpHDF5,
RunNvNMD,
RunNvNMDTrain,
RunRelax,
RunRelaxHDF5,
SelectConfs,
Expand Down Expand Up @@ -182,6 +184,17 @@ def make_concurrent_learning_op(
valid_data=valid_data,
optional_files=train_optional_files,
)
elif train_style == "dp-nvnmd":
prep_run_train_op = PrepRunDPTrain(
"prep-run-nvnmd-train",
PrepDPTrain,
RunNvNMDTrain,
prep_config=prep_train_config,
run_config=run_train_config,
upload_python_packages=upload_python_packages,
valid_data=valid_data,
optional_files=train_optional_files,
)
else:
raise RuntimeError(f"unknown train_style {train_style}")
if explore_style == "lmp":
Expand All @@ -193,6 +206,15 @@ def make_concurrent_learning_op(
run_config=run_explore_config,
upload_python_packages=upload_python_packages,
)
elif "lmp-nvnmd" in explore_style:
prep_run_explore_op = PrepRunLmp(
"prep-run-nvnmd",
PrepLmp,
RunNvNMD,
prep_config=prep_explore_config,
run_config=run_explore_config,
upload_python_packages=upload_python_packages,
)
elif "calypso" in explore_style:
expl_mode = explore_style.split(":")[-1] if ":" in explore_style else "default"
if expl_mode == "merge":
Expand Down Expand Up @@ -286,7 +308,7 @@ def make_naive_exploration_scheduler(
# use npt task group
explore_style = config["explore"]["type"]

if explore_style == "lmp":
if explore_style in ("lmp", "lmp-nvnmd"):
return make_lmp_naive_exploration_scheduler(config)
elif "calypso" in explore_style or explore_style == "diffcsp":
return make_naive_exploration_scheduler_without_conf(config, explore_style)
Expand Down Expand Up @@ -374,6 +396,7 @@ def make_lmp_naive_exploration_scheduler(config):
output_nopbc = config["explore"]["output_nopbc"]
conf_filters = get_conf_filters(config["explore"]["filters"])
use_ele_temp = config["inputs"]["use_ele_temp"]
config["explore"]["type"]
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Remove unused variable extraction.

The explore type is extracted from the config but never used.

-    config["explore"]["type"]
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
config["explore"]["type"]
🤖 Prompt for AI Agents
In dpgen2/entrypoint/submit.py at line 399, the variable extracting the explore
type from the config is assigned but never used. Remove this line entirely to
clean up the code and avoid unused variable warnings.

scheduler = ExplorationScheduler()
# report
conv_style = convergence.pop("type")
Expand Down Expand Up @@ -506,6 +529,16 @@ def workflow_concurrent_learning(
else None
)
config["train"]["numb_models"] = 1

elif train_style == "dp-nvnmd":
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why not reuse the logic for train_style == "dp"?

init_models_paths = config["train"].get("init_models_paths", None)
numb_models = config["train"]["numb_models"]
if init_models_paths is not None and len(init_models_paths) != numb_models:
raise RuntimeError(
f"{len(init_models_paths)} init models provided, which does "
"not match numb_models={numb_models}"
)

else:
raise RuntimeError(f"unknown params, train_style: {train_style}")

Expand Down
20 changes: 14 additions & 6 deletions dpgen2/exploration/render/traj_render_lammps.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,10 @@ def get_confs(
conf_filters: Optional["ConfFilters"] = None,
optional_outputs: Optional[List[Path]] = None,
) -> dpdata.MultiSystems:
from ase.io import ( # type: ignore
read,
)

ntraj = len(trajs)
ele_temp = None
if optional_outputs:
Expand All @@ -123,12 +127,16 @@ def get_confs(
traj = StringIO(trajs[ii].get_data()) # type: ignore
else:
traj = trajs[ii]
ss = dpdata.System(traj, fmt=traj_fmt, type_map=type_map)
ss.nopbc = self.nopbc
if ele_temp:
self.set_ele_temp(ss, ele_temp[ii])
ss = ss.sub_system(id_selected[ii])
ms.append(ss)
# ss = dpdata.System(traj, fmt=traj_fmt, type_map=type_map)
ss = read(
str(traj), format="lammps-dump-text", index=":", specorder=type_map
)
for jj in id_selected[ii]:
s = dpdata.System(ss[jj], fmt="ase/structure", type_map=type_map)
Comment on lines 127 to +135
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

StringIO handling is broken – ase.io.read(str(traj) …) receives “<_io.StringIO…>”

read() accepts a file-like object; converting the StringIO to str passes a
representation instead of the data and will raise “No such file or directory”.
Fix:

-                ss = read(
-                    str(traj), format="lammps-dump-text", index=":", specorder=type_map
-                )
+                ss = read(
+                    traj,                    # pass the file-object directly
+                    format="lammps-dump-text",
+                    index=":",
+                    specorder=type_map,
+                )

This keeps the code path for on-disk files unchanged while restoring support
for in-memory datasets.

📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
traj = StringIO(trajs[ii].get_data()) # type: ignore
else:
traj = trajs[ii]
ss = dpdata.System(traj, fmt=traj_fmt, type_map=type_map)
ss.nopbc = self.nopbc
if ele_temp:
self.set_ele_temp(ss, ele_temp[ii])
ss = ss.sub_system(id_selected[ii])
ms.append(ss)
# ss = dpdata.System(traj, fmt=traj_fmt, type_map=type_map)
ss = read(
str(traj), format="lammps-dump-text", index=":", specorder=type_map
)
for jj in id_selected[ii]:
s = dpdata.System(ss[jj], fmt="ase/structure", type_map=type_map)
traj = StringIO(trajs[ii].get_data()) # type: ignore
else:
traj = trajs[ii]
# ss = dpdata.System(traj, fmt=traj_fmt, type_map=type_map)
ss = read(
traj, # pass the file-object directly
format="lammps-dump-text",
index=":",
specorder=type_map,
)
for jj in id_selected[ii]:
s = dpdata.System(ss[jj], fmt="ase/structure", type_map=type_map)
🤖 Prompt for AI Agents
In dpgen2/exploration/render/traj_render_lammps.py around lines 126 to 134, the
code incorrectly converts a StringIO object to a string before passing it to
ase.io.read(), which causes it to receive a string representation of the object
instead of the file content, leading to an error. To fix this, pass the StringIO
object directly to read() without converting it to a string, so that read()
receives a proper file-like object for in-memory data, while leaving the on-disk
file handling unchanged.

s.nopbc = self.nopbc
if ele_temp:
self.set_ele_temp(s, ele_temp[ii])
ms.append(s)
if conf_filters is not None:
ms = conf_filters.check(ms)
return ms
38 changes: 30 additions & 8 deletions dpgen2/exploration/task/lmp/lmp_input.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ def make_lmp_input(
nopbc: bool = False,
max_seed: int = 1000000,
deepmd_version="2.0",
nvnmd_version=None,
trj_seperate_files=True,
pimd_bead: Optional[str] = None,
):
Expand All @@ -69,9 +70,9 @@ def make_lmp_input(
ret += "variable THERMO_FREQ equal %d\n" % trj_freq
ret += "variable DUMP_FREQ equal %d\n" % trj_freq
ret += "variable TEMP equal %f\n" % temp
if ele_temp_f is not None:
if ele_temp_f is not None and nvnmd_version is None:
ret += "variable ELE_TEMP equal %f\n" % ele_temp_f
if ele_temp_a is not None:
if ele_temp_a is not None and nvnmd_version is None:
ret += "variable ELE_TEMP equal %f\n" % ele_temp_a
if pres is not None:
ret += "variable PRES equal %f\n" % pres
Expand Down Expand Up @@ -106,12 +107,14 @@ def make_lmp_input(
if pimd_bead is not None
else lmp_model_devi_name
)
if Version(deepmd_version) < Version("1"):
if Version(deepmd_version) < Version("1") and nvnmd_version is None:
# 0.x
ret += "pair_style deepmd %s ${THERMO_FREQ} %s\n" % (
graph_list,
model_devi_file_name,
)
elif nvnmd_version is not None:
ret += "pair_style nvnmd %s\n" % ("model.pb")
else:
Comment on lines +116 to 118
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Consider using a configurable model name instead of hardcoding "model.pb".

The NVNMD pair style uses a hardcoded model filename, while the DeepMD branches use the graph_list variable. This reduces flexibility for users who might want to use different model names.

Apply this diff to use the first model from the graphs list:

 elif nvnmd_version is not None:
-    ret += "pair_style      nvnmd %s\n" % ("model.pb")
+    ret += "pair_style      nvnmd %s\n" % (graphs[0] if graphs else "model.pb")
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
elif nvnmd_version is not None:
ret += "pair_style nvnmd %s\n" % ("model.pb")
else:
elif nvnmd_version is not None:
ret += "pair_style nvnmd %s\n" % (graphs[0] if graphs else "model.pb")
else:
🤖 Prompt for AI Agents
In dpgen2/exploration/task/lmp/lmp_input.py around lines 116 to 118, the code
hardcodes the model filename "model.pb" for the NVNMD pair style, reducing
flexibility. Modify the code to use the first model name from the existing
graphs list variable instead of the hardcoded string. This will make the model
name configurable and consistent with the DeepMD branches.

Comment on lines +116 to 118
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

💡 Verification agent

🧩 Analysis chain

Consider making the model filename configurable.

The nvnmd pair style uses a hardcoded "model.pb" filename, which differs from the flexible graph list approach used for regular deepmd. This might limit flexibility when using multiple models or custom model names.


🏁 Script executed:

#!/bin/bash
# Check if there are other places in the codebase that assume "model.pb" for nvnmd
rg -n "model\.pb" --type py | grep -i nvnmd

Length of output: 1986


🏁 Script executed:

#!/bin/bash
# Display the LAMMPS input generator around the nvnmd section for context
sed -n '1,200p' dpgen2/exploration/task/lmp/lmp_input.py

Length of output: 7322


Make nvnmd model filenames configurable

The lmp_input.py branch for nvnmd_version hardcodes "model.pb" (line 117), but elsewhere (in run_nvnmd_train.py and its tests) the CNN model uses "nvnmd_cnn/frozen_model.pb" and the QNN model uses "nvnmd_qnn/model.pb". This mismatch will break CNN‐only runs.

• File dpgen2/exploration/task/lmp/lmp_input.py, lines 116–118
Replace the hardcoded "model.pb" with one or more configurable model file paths, for example:

-    elif nvnmd_version is not None:
-        ret += "pair_style      nvnmd %s\n" % ("model.pb")
+    elif nvnmd_version is not None:
+        # allow passing specific model filenames per graph
+        # e.g. ["nvnmd_cnn/frozen_model.pb", "nvnmd_qnn/model.pb"]
+        model_files = " ".join(nvnmd_model_files)
+        ret += "pair_style      nvnmd %s %s\n" % (graph_list, model_files)

• Add a new parameter (e.g. nvnmd_model_files: List[str]) to make_lmp_input to supply these paths, and update callers/tests accordingly.
• Ensure nvnmd_model_files aligns with the order and names of graphs.

Committable suggestion skipped: line range outside the PR's diff.

🤖 Prompt for AI Agents
In dpgen2/exploration/task/lmp/lmp_input.py around lines 116 to 118, the nvnmd
pair style uses a hardcoded "model.pb" filename which limits flexibility. To fix
this, add a new parameter like nvnmd_model_files (a list of strings) to the
make_lmp_input function to accept configurable model file paths. Replace the
hardcoded "model.pb" with these supplied paths, ensuring the order and names
align with the graphs. Update all callers and tests of make_lmp_input to pass
the appropriate nvnmd_model_files argument accordingly.

# 1.x
keywords = ""
Expand All @@ -135,17 +138,28 @@ def make_lmp_input(
ret += "thermo_style custom step temp pe ke etotal press vol lx ly lz xy xz yz\n"
ret += "thermo ${THERMO_FREQ}\n"
if trj_seperate_files:
ret += "dump 1 all custom ${DUMP_FREQ} traj/*.lammpstrj id type x y z fx fy fz\n"
if nvnmd_version is None:
ret += "dump 1 all custom ${DUMP_FREQ} traj/*.lammpstrj id type x y z fx fy fz\n"
else:
ret += "dump 1 all custom ${DUMP_FREQ} ${rerun}_traj/*.lammpstrj id type x y z fx fy fz\n"
else:
lmp_traj_file_name = (
lmp_pimd_traj_name % pimd_bead if pimd_bead is not None else lmp_traj_name
)
ret += (
"dump 1 all custom ${DUMP_FREQ} %s id type x y z fx fy fz\n"
% lmp_traj_file_name
)
if nvnmd_version is None:
ret += (
"dump 1 all custom ${DUMP_FREQ} %s id type x y z fx fy fz\n"
% lmp_traj_file_name
)
else:
ret += (
"dump 1 all custom ${DUMP_FREQ} ${rerun}_%s id type x y z fx fy fz\n"
% lmp_traj_file_name
)
ret += "restart 10000 dpgen.restart\n"
ret += "\n"
if nvnmd_version is not None:
ret += 'if "${rerun} > 0" then "jump SELF rerun"\n'
if pka_e is None:
ret += 'if "${restart} == 0" then "velocity all create ${TEMP} %d"' % (
Comment on lines +161 to 164
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

rerun variable is never defined in the template

The jump predicates on ${rerun}, but that variable is not declared in the
header (contrast with ${restart}). LAMMPS will abort with
ERROR: Variable rerun is not defined. Either:

+# declare default
+variable        RERUN equal 0

or document that callers must always pass -var rerun ….

Committable suggestion skipped: line range outside the PR's diff.

🤖 Prompt for AI Agents
In dpgen2/exploration/task/lmp/lmp_input.py around lines 152 to 155, the
template uses the variable ${rerun} without defining it in the header, causing
LAMMPS to abort with an undefined variable error. To fix this, define the rerun
variable in the template header similarly to how restart is defined, or clearly
document that callers must always provide the rerun variable via the command
line using -var rerun. Ensure the variable is properly declared before use to
prevent runtime errors.

random.randrange(max_seed - 1) + 1
Expand Down Expand Up @@ -193,4 +207,12 @@ def make_lmp_input(
ret += "\n"
ret += "timestep %f\n" % dt
ret += "run ${NSTEPS} upto\n"
if nvnmd_version is not None:
ret += "jump SELF end\n"
ret += "label rerun\n"
if trj_seperate_files:
ret += "rerun 0_traj/*.lammpstrj dump x y z fx fy fz add yes\n"
else:
ret += "rerun 0_%s dump x y z fx fy fz add yes\n" % lmp_traj_name
ret += "label end\n"
return ret
56 changes: 44 additions & 12 deletions dpgen2/exploration/task/lmp_template_task_group.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,12 +47,14 @@ def set_lmp(
revisions: dict = {},
traj_freq: int = 10,
extra_pair_style_args: str = "",
nvnmd_version: Optional[str] = None,
pimd_bead: Optional[str] = None,
) -> None:
self.lmp_template = Path(lmp_template_fname).read_text().split("\n")
self.revisions = revisions
self.traj_freq = traj_freq
self.extra_pair_style_args = extra_pair_style_args
self.nvnmd_version = nvnmd_version
self.pimd_bead = pimd_bead
self.lmp_set = True
self.model_list = sorted([model_name_pattern % ii for ii in range(numb_models)])
Expand All @@ -62,10 +64,16 @@ def set_lmp(
self.traj_freq,
self.extra_pair_style_args,
self.pimd_bead,
nvnmd_version=self.nvnmd_version,
)
self.lmp_template = revise_lmp_input_dump(
self.lmp_template, self.traj_freq, self.pimd_bead
self.lmp_template,
self.traj_freq,
self.pimd_bead,
nvnmd_version=self.nvnmd_version,
)
if nvnmd_version is not None:
self.lmp_template = revise_lmp_input_rerun(self.lmp_template)
if plm_template_fname is not None:
self.plm_template = Path(plm_template_fname).read_text().split("\n")
self.plm_set = True
Expand Down Expand Up @@ -158,8 +166,8 @@ def revise_lmp_input_model(
extra_pair_style_args="",
pimd_bead=None,
deepmd_version="1",
nvnmd_version=None,
):
idx = find_only_one_key(lmp_lines, ["pair_style", "deepmd"])
if extra_pair_style_args:
extra_pair_style_args = " " + extra_pair_style_args
graph_list = " ".join(task_model_list)
Expand All @@ -168,23 +176,39 @@ def revise_lmp_input_model(
if pimd_bead is not None
else lmp_model_devi_name
)
lmp_lines[idx] = "pair_style deepmd %s out_freq %d out_file %s%s" % (
graph_list,
trj_freq,
model_devi_file_name,
extra_pair_style_args,
)
if nvnmd_version is None:
idx = find_only_one_key(lmp_lines, ["pair_style", "deepmd"])
lmp_lines[idx] = "pair_style deepmd %s out_freq %d out_file %s%s" % (
graph_list,
trj_freq,
model_devi_file_name,
extra_pair_style_args,
)
else:
idx = find_only_one_key(lmp_lines, ["pair_style", "nvnmd"])
lmp_lines[idx] = "pair_style nvnmd %s %s" % (
"model.pb",
extra_pair_style_args,
)

return lmp_lines


def revise_lmp_input_dump(lmp_lines, trj_freq, pimd_bead=None):
def revise_lmp_input_dump(lmp_lines, trj_freq, pimd_bead=None, nvnmd_version=None):
idx = find_only_one_key(lmp_lines, ["dump", "dpgen_dump"])
lmp_traj_file_name = (
lmp_pimd_traj_name % pimd_bead if pimd_bead is not None else lmp_traj_name
)
lmp_lines[
idx
] = f"dump dpgen_dump all custom {trj_freq} {lmp_traj_file_name} id type x y z"
if nvnmd_version is None:
lmp_lines[
idx
] = f"dump dpgen_dump all custom {trj_freq} {lmp_traj_file_name} id type x y z"
else:
lmp_lines[idx] = (
"dump dpgen_dump all custom %s ${rerun}_%s id type x y z fx fy fz"
% (trj_freq, lmp_traj_file_name)
)
lmp_lines.insert(idx + 1, 'if "${rerun} > 0" then "jump SELF rerun"')
return lmp_lines


Expand All @@ -197,6 +221,14 @@ def revise_lmp_input_plm(lmp_lines, in_plm, out_plm="output.plumed"):
return lmp_lines


def revise_lmp_input_rerun(lmp_lines):
lmp_lines.append("jump SELF end")
lmp_lines.append("label rerun")
lmp_lines.append(f"rerun 0_{lmp_traj_name} dump x y z fx fy fz add yes")
lmp_lines.append("label end")
return lmp_lines


def revise_by_keys(lmp_lines, keys, values):
for kk, vv in zip(keys, values): # type: ignore
for ii in range(len(lmp_lines)):
Expand Down
Loading