diff --git a/doc/batch.md b/doc/batch.md index b234ba63..e5385fd4 100644 --- a/doc/batch.md +++ b/doc/batch.md @@ -18,7 +18,7 @@ To avoid running multiple jobs at the same time, one could set {dargs:argument}` {dargs:argument}`batch_type `: `Slurm`, `SlurmJobArray` [Slurm](https://slurm.schedmd.com/) is a job scheduling system used by lots of HPCs. -One needs to make sure slurm has been setup in the remote server and the related environment is activated. +One needs to make sure slurm has been set up in the remote server and the related environment is activated. When `SlurmJobArray` is used, dpdispatcher submits Slurm jobs with [job arrays](https://slurm.schedmd.com/job_array.html). In this way, several dpdispatcher {class}`task `s map to a Slurm job and a dpdispatcher {class}`job ` maps to a Slurm job array. @@ -30,7 +30,7 @@ One can use {dargs:argument}`group_size ` and {dargs:argum {dargs:argument}`batch_type `: `PBS` [OpenPBS](https://www.openpbs.org/) is an open-source job scheduling of the Linux Foundation and [PBS Profession](https://www.altair.com/pbs-professional/) is its commercial solution. -One needs to make sure OpenPBS has been setup in the remote server and the related environment is activated. +One needs to make sure OpenPBS has been set up in the remote server and the related environment is activated. Note that do not use `PBS` for Torque. @@ -40,14 +40,22 @@ Note that do not use `PBS` for Torque. The [Terascale Open-source Resource and QUEue Manager (TORQUE)](https://adaptivecomputing.com/cherry-services/torque-resource-manager/) is a distributed resource manager based on standard OpenPBS. However, not all OpenPBS flags are still supported in TORQUE. -One needs to make sure TORQUE has been setup in the remote server and the related environment is activated. +One needs to make sure TORQUE has been set up in the remote server and the related environment is activated. ## LSF {dargs:argument}`batch_type `: `LSF` [IBM Spectrum LSF Suites](https://www.ibm.com/products/hpc-workload-management) is a comprehensive workload management solution used by HPCs. -One needs to make sure LSF has been setup in the remote server and the related environment is activated. +One needs to make sure LSF has been set up in the remote server and the related environment is activated. + +## JH UniScheduler + +{dargs:argument}`batch_type `: `JH_UniScheduler` + +[JH UniScheduler](http://www.jhinno.com/m/custom_case_05.html) was developed by JHINNO company and uses "jsub" to submit tasks. +Its overall architecture is similar to that of IBM's LSF. However, there are still some differences between them. One needs to +make sure JH UniScheduler has been set up in the remote server and the related environment is activated. ## Bohrium @@ -74,10 +82,10 @@ Read Fujitsu cloud service documentation for details. ## OpenAPI {dargs:argument}`batcy_type `: `OpenAPI` -OpenAPI is a new way to submit jobs to Bohrium. It using [AccessKey](https://bohrium.dp.tech/personal/setting) instead of username and password. Read Bohrium documentation for details. +OpenAPI is a new way to submit jobs to Bohrium. It is using [AccessKey](https://bohrium.dp.tech/personal/setting) instead of username and password. Read Bohrium documentation for details. ## SGE {dargs:argument}`batch_type `: `SGE` -The [Sun Grid Engine (SGE) scheduler](https://gridscheduler.sourceforge.net) is a batch-queueing system distributed resource management. The commands and flags of SGE share a lot similarity with PBS except when checking job status. Use this argument if one is submitting job to SGE based batch system. +The [Sun Grid Engine (SGE) scheduler](https://gridscheduler.sourceforge.net) is a batch-queueing system distributed resource management. The commands and flags of SGE share a lot of similarity with PBS except when checking job status. Use this argument if one is submitting job to an SGE-based batch system. diff --git a/doc/index.rst b/doc/index.rst index 2bba0005..e4984177 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -6,7 +6,7 @@ DPDispatcher's documentation ====================================== -DPDispatcher is a Python package used to generate HPC (High Performance Computing) scheduler systems (Slurm/PBS/LSF/dpcloudserver) jobs input scripts and submit these scripts to HPC systems and poke until they finish. +DPDispatcher is a Python package used to generate HPC (High Performance Computing) scheduler systems (Slurm/PBS/LSF/JH_SCheduler/dpcloudserver) jobs input scripts and submit these scripts to HPC systems and poke until they finish. DPDispatcher will monitor (poke) until these jobs finish and download the results files (if these jobs is running on remote systems connected by SSH). diff --git a/dpdispatcher/machines/JH_UniScheduler.py b/dpdispatcher/machines/JH_UniScheduler.py new file mode 100644 index 00000000..d13b66b5 --- /dev/null +++ b/dpdispatcher/machines/JH_UniScheduler.py @@ -0,0 +1,175 @@ +import shlex +from typing import List + +from dargs import Argument + +from dpdispatcher.dlog import dlog +from dpdispatcher.machine import Machine +from dpdispatcher.utils.job_status import JobStatus +from dpdispatcher.utils.utils import ( + RetrySignal, + customized_script_header_template, + retry, +) + +JH_UniScheduler_script_header_template = """\ +#!/bin/bash -l +#JSUB -e %J.err +#JSUB -o %J.out +{JH_UniScheduler_nodes_line} +{JH_UniScheduler_ptile_line} +{JH_UniScheduler_partition_line} +{JH_UniScheduler_number_gpu_line}""" + + +class JH_UniScheduler(Machine): + """JH_UniScheduler batch.""" + + def gen_script(self, job): + JH_UniScheduler_script = super().gen_script(job) + return JH_UniScheduler_script + + def gen_script_header(self, job): + resources = job.resources + script_header_dict = { + "JH_UniScheduler_nodes_line": f"#JSUB -n {resources.number_node * resources.cpu_per_node}", + "JH_UniScheduler_ptile_line": f"#JSUB -R 'span[ptile={resources.cpu_per_node}]'", + "JH_UniScheduler_partition_line": f"#JSUB -q {resources.queue_name}", + } + custom_gpu_line = resources.kwargs.get("custom_gpu_line", None) + if not custom_gpu_line: + script_header_dict["JH_UniScheduler_number_gpu_line"] = ( + "" f"#JSUB -gpgpu {resources.gpu_per_node}" + ) + else: + script_header_dict["JH_UniScheduler_number_gpu_line"] = custom_gpu_line + if ( + resources["strategy"].get("customized_script_header_template_file") + is not None + ): + JH_UniScheduler_script_header = customized_script_header_template( + resources["strategy"]["customized_script_header_template_file"], + resources, + ) + else: + JH_UniScheduler_script_header = ( + JH_UniScheduler_script_header_template.format(**script_header_dict) + ) + + return JH_UniScheduler_script_header + + @retry() + def do_submit(self, job): + script_file_name = job.script_file_name + script_str = self.gen_script(job) + job_id_name = job.job_hash + "_job_id" + self.context.write_file(fname=script_file_name, write_str=script_str) + script_run_str = self.gen_script_command(job) + script_run_file_name = f"{job.script_file_name}.run" + self.context.write_file(fname=script_run_file_name, write_str=script_run_str) + + try: + stdin, stdout, stderr = self.context.block_checkcall( + "cd {} && {} {}".format( + shlex.quote(self.context.remote_root), + "jsub < ", + shlex.quote(script_file_name), + ) + ) + except RuntimeError as err: + raise RetrySignal(err) from err + + subret = stdout.readlines() + job_id = subret[0].split()[1][1:-1] + self.context.write_file(job_id_name, job_id) + return job_id + + def default_resources(self, resources): + pass + + @retry() + def check_status(self, job): + try: + job_id = job.job_id + except AttributeError: + return JobStatus.terminated + if job_id == "": + return JobStatus.unsubmitted + ret, stdin, stdout, stderr = self.context.block_call("jjobs " + job_id) + err_str = stderr.read().decode("utf-8") + if (f"Job <{job_id}> is not found") in err_str: + if self.check_finish_tag(job): + return JobStatus.finished + else: + return JobStatus.terminated + elif ret != 0: + # just retry when any unknown error raised. + raise RetrySignal( + "Get error code %d in checking status through ssh with job: %s . message: %s" + % (ret, job.job_hash, err_str) + ) + status_out = stdout.read().decode("utf-8").split("\n") + if len(status_out) < 2: + return JobStatus.unknown + else: + status_line = status_out[1] + status_word = status_line.split()[2] + + if status_word in ["PEND"]: + return JobStatus.waiting + elif status_word in ["RUN", "PSUSP", "SSUSP", "USUSP"]: + return JobStatus.running + elif status_word in ["DONE", "EXIT"]: + if self.check_finish_tag(job): + dlog.info(f"job: {job.job_hash} {job.job_id} finished") + return JobStatus.finished + else: + return JobStatus.terminated + else: + return JobStatus.unknown + + def check_finish_tag(self, job): + job_tag_finished = job.job_hash + "_job_tag_finished" + return self.context.check_file_exists(job_tag_finished) + + @classmethod + def resources_subfields(cls) -> List[Argument]: + """Generate the resources subfields. + + Returns + ------- + list[Argument] + resources subfields + """ + doc_custom_gpu_line = "Custom GPU configuration, starting with #JSUB" + + return [ + Argument( + "kwargs", + dict, + [ + Argument( + "custom_gpu_line", + str, + optional=True, + default=None, + doc=doc_custom_gpu_line, + ), + ], + optional=False, + doc="Extra arguments.", + ) + ] + + def kill(self, job): + """Kill the job. + + Parameters + ---------- + job : Job + job + """ + job_id = job.job_id + ret, stdin, stdout, stderr = self.context.block_call( + "jctrl kill " + str(job_id) + ) diff --git a/pyproject.toml b/pyproject.toml index 36184b8d..c1c478d3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,7 +32,7 @@ dependencies = [ ] requires-python = ">=3.7" readme = "README.md" -keywords = ["dispatcher", "hpc", "slurm", "lsf", "pbs", "ssh"] +keywords = ["dispatcher", "hpc", "slurm", "lsf", "pbs", "ssh", "jh_unischeduler"] [project.urls] Homepage = "https://github.com/deepmodeling/dpdispatcher" diff --git a/tests/context.py b/tests/context.py index 1a6cd690..2d5e3a7e 100644 --- a/tests/context.py +++ b/tests/context.py @@ -19,6 +19,7 @@ from dpdispatcher.machine import Machine # noqa: F401 from dpdispatcher.machines.distributed_shell import DistributedShell # noqa: F401 from dpdispatcher.machines.dp_cloud_server import Lebesgue # noqa: F401 +from dpdispatcher.machines.JH_UniScheduler import JH_UniScheduler # noqa: F401 from dpdispatcher.machines.lsf import LSF # noqa: F401 from dpdispatcher.machines.pbs import PBS # noqa: F401 from dpdispatcher.machines.shell import Shell # noqa: F401 diff --git a/tests/devel_test_JH_UniScheduler.py b/tests/devel_test_JH_UniScheduler.py new file mode 100644 index 00000000..8670c686 --- /dev/null +++ b/tests/devel_test_JH_UniScheduler.py @@ -0,0 +1,57 @@ +import json +import os +import sys + +from dpdispatcher.machine import Machine +from dpdispatcher.submission import Resources, Submission, Task + +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) + +# task_need_resources has no effect +with open("jsons/machine_jh_unischeduler.json") as f: + mdata = json.load(f) + +machine = Machine.load_from_dict(mdata["machine"]) +resources = Resources.load_from_dict(mdata["resources"]) + +submission = Submission( + work_base="0_md/", + machine=machine, + resources=resources, + forward_common_files=["graph.pb"], + backward_common_files=[], +) + +task1 = Task( + command="lmp -i input.lammps", + task_work_path="bct-1/", + forward_files=["conf.lmp", "input.lammps"], + backward_files=["log.lammps"], +) +task2 = Task( + command="lmp -i input.lammps", + task_work_path="bct-2/", + forward_files=["conf.lmp", "input.lammps"], + backward_files=["log.lammps"], +) +task3 = Task( + command="lmp -i input.lammps", + task_work_path="bct-3/", + forward_files=["conf.lmp", "input.lammps"], + backward_files=["log.lammps"], +) +task4 = Task( + command="lmp -i input.lammps", + task_work_path="bct-4/", + forward_files=["conf.lmp", "input.lammps"], + backward_files=["log.lammps"], +) +submission.register_task_list( + [ + task1, + task2, + task3, + task4, + ] +) +submission.run_submission(clean=True) diff --git a/tests/jsons/machine_JH_UniScheduler.json b/tests/jsons/machine_JH_UniScheduler.json new file mode 100644 index 00000000..5cb93da4 --- /dev/null +++ b/tests/jsons/machine_JH_UniScheduler.json @@ -0,0 +1,16 @@ +{ + "machine": { + "batch_type": "JH_UniScheduler", + "context_type": "local", + "local_root": "./", + "remote_root": "/data/home/wangsimin/machine_learning/dpgen/task/test/dpgen_example/run1" + }, + "resources":{ + "number_node": 1, + "cpu_per_node": 4, + "gpu_per_node": 1, + "queue_name": "gpu", + "group_size": 4, + "source_list": ["/public/software/deepmd-kit/bin/activate /public/software/deepmd-kit"] + } +} diff --git a/tests/jsons/machine_lazy_local_jh_unischeduler.json b/tests/jsons/machine_lazy_local_jh_unischeduler.json new file mode 100644 index 00000000..5e6adcee --- /dev/null +++ b/tests/jsons/machine_lazy_local_jh_unischeduler.json @@ -0,0 +1,18 @@ +{ + "machine": { + "batch_type": "JH_UniScheduler", + "context_type": "LazyLocalContext", + "local_root": "./test_jh_unischeduler" + }, + "resources": { + "number_node": 1, + "cpu_per_node": 4, + "queue_name": "gpu", + "gpu_per_node": 1, + "group_size": 4, + "strategy": { + "if_cuda_multi_devices": false + }, + "source_list": ["./slurm_test.env"] + } +} diff --git a/tests/test_JH_UniScheduler_script_generation.py b/tests/test_JH_UniScheduler_script_generation.py new file mode 100644 index 00000000..b7d29a8d --- /dev/null +++ b/tests/test_JH_UniScheduler_script_generation.py @@ -0,0 +1,157 @@ +# from dpdispatcher.batch_object import BatchObject +# from dpdispatcher.batch import Batch +import os +import sys +import tempfile +import textwrap + +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) +__package__ = "tests" +import json +import unittest + +from .context import ( + Machine, + Resources, + Submission, + Task, + setUpModule, # noqa: F401 +) + + +class TestJHUniSchedulerScriptGeneration(unittest.TestCase): + def setUp(self): + self.maxDiff = None + + def test_shell_trival(self): + with open("jsons/machine_lazy_local_jh_unischeduler.json") as f: + machine_dict = json.load(f) + + machine = Machine(**machine_dict["machine"]) + resources = Resources(**machine_dict["resources"]) + + task1 = Task( + command="cat example.txt", + task_work_path="dir1/", + forward_files=["example.txt"], + backward_files=["out.txt"], + outlog="out.txt", + ) + task2 = Task( + command="cat example.txt", + task_work_path="dir2/", + forward_files=["example.txt"], + backward_files=["out.txt"], + outlog="out.txt", + ) + task3 = Task( + command="cat example.txt", + task_work_path="dir3/", + forward_files=["example.txt"], + backward_files=["out.txt"], + outlog="out.txt", + ) + task4 = Task( + command="cat example.txt", + task_work_path="dir4/", + forward_files=["example.txt"], + backward_files=["out.txt"], + outlog="out.txt", + ) + task_list = [task1, task2, task3, task4] + + submission = Submission( + work_base="parent_dir/", + machine=machine, + resources=resources, + forward_common_files=["graph.pb"], + backward_common_files=[], + task_list=task_list, + ) + submission.generate_jobs() + + task_hash = submission.get_hash() + test_job = submission.belonging_jobs[0] + job_hash = test_job.job_hash + + header_str = machine.gen_script_header(test_job) + benchmark_header = textwrap.dedent( + """\ + #!/bin/bash -l + #JSUB -e %J.err + #JSUB -o %J.out + #JSUB -n 4 + #JSUB -R 'span[ptile=4]' + #JSUB -q gpu + #JSUB -gpgpu 1""" + ) + self.assertEqual(header_str, benchmark_header) + + @unittest.skipIf(sys.platform == "win32", "skip for persimission error") + def test_template(self): + with open("jsons/machine_lazy_local_jh_unischeduler.json") as f: + machine_dict = json.load(f) + + benchmark_str = textwrap.dedent( + """\ + #!/bin/bash -l + # JSUB -e %J.err + # JSUB -o %J.out + # JSUB -n 4 + # JSUB -R 'span[ptile=4]' + # JSUB -q gpu + #JSUB -gpgpu 1""" + ) + + with tempfile.NamedTemporaryFile("w") as f: + f.write(benchmark_str) + f.flush() + + machine_dict["resources"]["strategy"][ + "customized_script_header_template_file" + ] = f.name + + machine = Machine(**machine_dict["machine"]) + resources = Resources(**machine_dict["resources"]) + + task1 = Task( + command="cat example.txt", + task_work_path="dir1/", + forward_files=["example.txt"], + backward_files=["out.txt"], + outlog="out.txt", + ) + task2 = Task( + command="cat example.txt", + task_work_path="dir2/", + forward_files=["example.txt"], + backward_files=["out.txt"], + outlog="out.txt", + ) + task3 = Task( + command="cat example.txt", + task_work_path="dir3/", + forward_files=["example.txt"], + backward_files=["out.txt"], + outlog="out.txt", + ) + task4 = Task( + command="cat example.txt", + task_work_path="dir4/", + forward_files=["example.txt"], + backward_files=["out.txt"], + outlog="out.txt", + ) + task_list = [task1, task2, task3, task4] + + submission = Submission( + work_base="parent_dir/", + machine=machine, + resources=resources, + forward_common_files=["graph.pb"], + backward_common_files=[], + task_list=task_list, + ) + submission.generate_jobs() + str = machine.gen_script_header(submission.belonging_jobs[0]) + self.assertEqual(str, benchmark_str) diff --git a/tests/test_class_machine_dispatch.py b/tests/test_class_machine_dispatch.py index f1cf328d..db912a31 100644 --- a/tests/test_class_machine_dispatch.py +++ b/tests/test_class_machine_dispatch.py @@ -7,18 +7,19 @@ __package__ = "tests" from dargs.dargs import ArgumentValueError -from .context import ( +from .context import ( # noqa: F401 LSF, PBS, BaseContext, DistributedShell, + JH_UniScheduler, LazyLocalContext, Lebesgue, LocalContext, Machine, Shell, Slurm, - setUpModule, # noqa: F401 + setUpModule, ) @@ -154,6 +155,15 @@ def test_slurm(self): machine = Machine.load_from_dict(machine_dict=machine_dict) self.assertIsInstance(machine, Slurm) + def test_jh_unischeduler(self): + machine_dict = { + "batch_type": "JH_UniScheduler", + "context_type": "LazyLocalContext", + "local_root": "./", + } + machine = Machine.load_from_dict(machine_dict=machine_dict) + self.assertIsInstance(machine, JH_UniScheduler) + def test_shell(self): machine_dict = { "batch_type": "Shell", diff --git a/tests/test_jh_unischeduler/0_md/bct-1/conf.lmp b/tests/test_jh_unischeduler/0_md/bct-1/conf.lmp new file mode 100644 index 00000000..7087abb1 --- /dev/null +++ b/tests/test_jh_unischeduler/0_md/bct-1/conf.lmp @@ -0,0 +1,12 @@ + +2 atoms +1 atom types + 0.0000000000 4.0000000000 xlo xhi + 0.0000000000 4.0000000000 ylo yhi + 0.0000000000 3.3800000000 zlo zhi + 0.0000000000 0.0000000000 0.0000000000 xy xz yz + +Atoms # atomic + + 1 1 0.0000000000 0.0000000000 0.0000000000 + 2 1 2.0000000000 2.0000000000 1.6900000000 diff --git a/tests/test_jh_unischeduler/0_md/bct-1/input.lammps b/tests/test_jh_unischeduler/0_md/bct-1/input.lammps new file mode 100644 index 00000000..cd62a858 --- /dev/null +++ b/tests/test_jh_unischeduler/0_md/bct-1/input.lammps @@ -0,0 +1,31 @@ +clear +units metal +dimension 3 +boundary p p p +atom_style atomic +# box tilt large +read_data conf.lmp +mass 1 118.71 +neigh_modify every 1 delay 0 check no +pair_style deepmd ../graph.pb +pair_coeff +compute mype all pe +compute mymsd all msd + +thermo 20 +thermo_style custom step temp pe pxx pyy pzz pxy pxz pyz lx ly lz vol c_mymsd[*] spcpu +min_style cg +fix 1 all box/relax iso 0.0 +minimize 1.000000e-12 1.000000e-06 5000 500000 + +# timestep 0.002 +# velocity all create 2.0 7369221 + +# fix 2 all npt temp 2.0 200.0 $(100.0*dt) aniso 0.0 200000.0 $(1000.0*dt) +# run 2000 +# unfix 2 + +dump 1 all custom 1 final.dump.relax id type xs ys zs fx fy fz +run 10000 + +write_data out.lmp diff --git a/tests/test_jh_unischeduler/0_md/bct-2/conf.lmp b/tests/test_jh_unischeduler/0_md/bct-2/conf.lmp new file mode 100644 index 00000000..7087abb1 --- /dev/null +++ b/tests/test_jh_unischeduler/0_md/bct-2/conf.lmp @@ -0,0 +1,12 @@ + +2 atoms +1 atom types + 0.0000000000 4.0000000000 xlo xhi + 0.0000000000 4.0000000000 ylo yhi + 0.0000000000 3.3800000000 zlo zhi + 0.0000000000 0.0000000000 0.0000000000 xy xz yz + +Atoms # atomic + + 1 1 0.0000000000 0.0000000000 0.0000000000 + 2 1 2.0000000000 2.0000000000 1.6900000000 diff --git a/tests/test_jh_unischeduler/0_md/bct-2/input.lammps b/tests/test_jh_unischeduler/0_md/bct-2/input.lammps new file mode 100644 index 00000000..cd62a858 --- /dev/null +++ b/tests/test_jh_unischeduler/0_md/bct-2/input.lammps @@ -0,0 +1,31 @@ +clear +units metal +dimension 3 +boundary p p p +atom_style atomic +# box tilt large +read_data conf.lmp +mass 1 118.71 +neigh_modify every 1 delay 0 check no +pair_style deepmd ../graph.pb +pair_coeff +compute mype all pe +compute mymsd all msd + +thermo 20 +thermo_style custom step temp pe pxx pyy pzz pxy pxz pyz lx ly lz vol c_mymsd[*] spcpu +min_style cg +fix 1 all box/relax iso 0.0 +minimize 1.000000e-12 1.000000e-06 5000 500000 + +# timestep 0.002 +# velocity all create 2.0 7369221 + +# fix 2 all npt temp 2.0 200.0 $(100.0*dt) aniso 0.0 200000.0 $(1000.0*dt) +# run 2000 +# unfix 2 + +dump 1 all custom 1 final.dump.relax id type xs ys zs fx fy fz +run 10000 + +write_data out.lmp diff --git a/tests/test_jh_unischeduler/0_md/bct-3/conf.lmp b/tests/test_jh_unischeduler/0_md/bct-3/conf.lmp new file mode 100644 index 00000000..7087abb1 --- /dev/null +++ b/tests/test_jh_unischeduler/0_md/bct-3/conf.lmp @@ -0,0 +1,12 @@ + +2 atoms +1 atom types + 0.0000000000 4.0000000000 xlo xhi + 0.0000000000 4.0000000000 ylo yhi + 0.0000000000 3.3800000000 zlo zhi + 0.0000000000 0.0000000000 0.0000000000 xy xz yz + +Atoms # atomic + + 1 1 0.0000000000 0.0000000000 0.0000000000 + 2 1 2.0000000000 2.0000000000 1.6900000000 diff --git a/tests/test_jh_unischeduler/0_md/bct-3/input.lammps b/tests/test_jh_unischeduler/0_md/bct-3/input.lammps new file mode 100644 index 00000000..cd62a858 --- /dev/null +++ b/tests/test_jh_unischeduler/0_md/bct-3/input.lammps @@ -0,0 +1,31 @@ +clear +units metal +dimension 3 +boundary p p p +atom_style atomic +# box tilt large +read_data conf.lmp +mass 1 118.71 +neigh_modify every 1 delay 0 check no +pair_style deepmd ../graph.pb +pair_coeff +compute mype all pe +compute mymsd all msd + +thermo 20 +thermo_style custom step temp pe pxx pyy pzz pxy pxz pyz lx ly lz vol c_mymsd[*] spcpu +min_style cg +fix 1 all box/relax iso 0.0 +minimize 1.000000e-12 1.000000e-06 5000 500000 + +# timestep 0.002 +# velocity all create 2.0 7369221 + +# fix 2 all npt temp 2.0 200.0 $(100.0*dt) aniso 0.0 200000.0 $(1000.0*dt) +# run 2000 +# unfix 2 + +dump 1 all custom 1 final.dump.relax id type xs ys zs fx fy fz +run 10000 + +write_data out.lmp diff --git a/tests/test_jh_unischeduler/0_md/bct-4/conf.lmp b/tests/test_jh_unischeduler/0_md/bct-4/conf.lmp new file mode 100644 index 00000000..7087abb1 --- /dev/null +++ b/tests/test_jh_unischeduler/0_md/bct-4/conf.lmp @@ -0,0 +1,12 @@ + +2 atoms +1 atom types + 0.0000000000 4.0000000000 xlo xhi + 0.0000000000 4.0000000000 ylo yhi + 0.0000000000 3.3800000000 zlo zhi + 0.0000000000 0.0000000000 0.0000000000 xy xz yz + +Atoms # atomic + + 1 1 0.0000000000 0.0000000000 0.0000000000 + 2 1 2.0000000000 2.0000000000 1.6900000000 diff --git a/tests/test_jh_unischeduler/0_md/bct-4/input.lammps b/tests/test_jh_unischeduler/0_md/bct-4/input.lammps new file mode 100644 index 00000000..cd62a858 --- /dev/null +++ b/tests/test_jh_unischeduler/0_md/bct-4/input.lammps @@ -0,0 +1,31 @@ +clear +units metal +dimension 3 +boundary p p p +atom_style atomic +# box tilt large +read_data conf.lmp +mass 1 118.71 +neigh_modify every 1 delay 0 check no +pair_style deepmd ../graph.pb +pair_coeff +compute mype all pe +compute mymsd all msd + +thermo 20 +thermo_style custom step temp pe pxx pyy pzz pxy pxz pyz lx ly lz vol c_mymsd[*] spcpu +min_style cg +fix 1 all box/relax iso 0.0 +minimize 1.000000e-12 1.000000e-06 5000 500000 + +# timestep 0.002 +# velocity all create 2.0 7369221 + +# fix 2 all npt temp 2.0 200.0 $(100.0*dt) aniso 0.0 200000.0 $(1000.0*dt) +# run 2000 +# unfix 2 + +dump 1 all custom 1 final.dump.relax id type xs ys zs fx fy fz +run 10000 + +write_data out.lmp diff --git a/tests/test_jh_unischeduler/0_md/graph.pb b/tests/test_jh_unischeduler/0_md/graph.pb new file mode 100644 index 00000000..9e112b9d --- /dev/null +++ b/tests/test_jh_unischeduler/0_md/graph.pb @@ -0,0 +1 @@ +../../graph.pb \ No newline at end of file