Skip to content

Commit

Permalink
[pre-commit.ci] pre-commit autoupdate (#3720)
Browse files Browse the repository at this point in the history
<!--pre-commit.ci start-->
updates:
- [github.com/astral-sh/ruff-pre-commit: v0.4.1 →
v0.4.2](astral-sh/ruff-pre-commit@v0.4.1...v0.4.2)
<!--pre-commit.ci end-->

---------

Signed-off-by: Jinzhe Zeng <jinzhe.zeng@rutgers.edu>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Jinzhe Zeng <jinzhe.zeng@rutgers.edu>
  • Loading branch information
pre-commit-ci[bot] and njzjz committed Apr 30, 2024
1 parent 95d92e9 commit ebd809b
Show file tree
Hide file tree
Showing 46 changed files with 114 additions and 153 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ repos:
exclude: ^source/3rdparty
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.4.1
rev: v0.4.2
hooks:
- id: ruff
args: ["--fix"]
Expand Down
2 changes: 1 addition & 1 deletion backend/read_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def get_argument_from_env() -> Tuple[str, list, list, dict, str]:
if hipcc_flags is not None:
os.environ["HIPFLAGS"] = os.environ.get("HIPFLAGS", "") + " " + hipcc_flags
else:
raise RuntimeError("Unsupported DP_VARIANT option: %s" % dp_variant)
raise RuntimeError(f"Unsupported DP_VARIANT option: {dp_variant}")

if os.environ.get("DP_BUILD_TESTING", "0") == "1":
cmake_args.append("-DBUILD_TESTING:BOOL=TRUE")
Expand Down
2 changes: 1 addition & 1 deletion data/raw/copy_raw.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def _main():
)
args = parser.parse_args()

print("# copy the system by %s copies" % args.ncopies) # noqa: T201
print(f"# copy the system by {args.ncopies} copies") # noqa: T201
assert np.all(
np.array(args.ncopies, dtype=int) >= np.array([1, 1, 1], dtype=int)
), "number of copies should be larger than or equal to 1"
Expand Down
2 changes: 1 addition & 1 deletion deepmd/dpmodel/descriptor/make_base_descriptor.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ def deserialize(cls, data: dict) -> "BD":
"""
if cls is BD:
return BD.get_class_by_type(data["type"]).deserialize(data)
raise NotImplementedError("Not implemented in class %s" % cls.__name__)
raise NotImplementedError(f"Not implemented in class {cls.__name__}")

@classmethod
@abstractmethod
Expand Down
2 changes: 1 addition & 1 deletion deepmd/dpmodel/fitting/make_base_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def deserialize(cls, data: dict) -> "BF":
"""
if cls is BF:
return BF.get_class_by_type(data["type"]).deserialize(data)
raise NotImplementedError("Not implemented in class %s" % cls.__name__)
raise NotImplementedError(f"Not implemented in class {cls.__name__}")

setattr(BF, fwd_method_name, BF.fwd)
delattr(BF, "fwd")
Expand Down
2 changes: 1 addition & 1 deletion deepmd/dpmodel/model/base_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def deserialize(cls, data: dict) -> "BaseBaseModel":
if model_type == "standard":
model_type = data.get("fitting", {}).get("type", "ener")
return cls.get_class_by_type(model_type).deserialize(data)
raise NotImplementedError("Not implemented in class %s" % cls.__name__)
raise NotImplementedError(f"Not implemented in class {cls.__name__}")

model_def_script: str

Expand Down
2 changes: 1 addition & 1 deletion deepmd/entrypoints/doc.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,5 +16,5 @@ def doc_train_input(*, out_type: str = "rst", **kwargs):
elif out_type == "json":
doc_str = gen_json()
else:
raise RuntimeError("Unsupported out type %s" % out_type)
raise RuntimeError(f"Unsupported out type {out_type}")
print(doc_str) # noqa: T201
4 changes: 2 additions & 2 deletions deepmd/entrypoints/neighbor_stat.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,6 @@ def neighbor_stat(
data.get_batch()
nei = NeighborStat(data.get_ntypes(), rcut, mixed_type=mixed_type)
min_nbor_dist, max_nbor_size = nei.get_stat(data)
log.info("min_nbor_dist: %f" % min_nbor_dist)
log.info("max_nbor_size: %s" % str(max_nbor_size))
log.info(f"min_nbor_dist: {min_nbor_dist:f}")
log.info(f"max_nbor_size: {max_nbor_size!s}")
return min_nbor_dist, max_nbor_size
11 changes: 5 additions & 6 deletions deepmd/entrypoints/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -460,14 +460,14 @@ def test_ener(
save_txt_file(
detail_path.with_suffix(".e.out"),
pe,
header="%s: data_e pred_e" % system,
header=f"{system}: data_e pred_e",
append=append_detail,
)
pe_atom = pe / natoms
save_txt_file(
detail_path.with_suffix(".e_peratom.out"),
pe_atom,
header="%s: data_e pred_e" % system,
header=f"{system}: data_e pred_e",
append=append_detail,
)
if not out_put_spin:
Expand All @@ -481,7 +481,7 @@ def test_ener(
save_txt_file(
detail_path.with_suffix(".f.out"),
pf,
header="%s: data_fx data_fy data_fz pred_fx pred_fy pred_fz" % system,
header=f"{system}: data_fx data_fy data_fz pred_fx pred_fy pred_fz",
append=append_detail,
)
else:
Expand All @@ -496,14 +496,13 @@ def test_ener(
save_txt_file(
detail_path.with_suffix(".fr.out"),
pf_real,
header="%s: data_fx data_fy data_fz pred_fx pred_fy pred_fz" % system,
header=f"{system}: data_fx data_fy data_fz pred_fx pred_fy pred_fz",
append=append_detail,
)
save_txt_file(
detail_path.with_suffix(".fm.out"),
pf_mag,
header="%s: data_fmx data_fmy data_fmz pred_fmx pred_fmy pred_fmz"
% system,
header=f"{system}: data_fmx data_fmy data_fmz pred_fmx pred_fmy pred_fmz",
append=append_detail,
)
pv = np.concatenate(
Expand Down
4 changes: 2 additions & 2 deletions deepmd/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,9 @@
global_float_prec = "float"
else:
raise RuntimeError(
"Unsupported float precision option: %s. Supported: high,"
f"Unsupported float precision option: {dp_float_prec}. Supported: high,"
"low. Please set precision with environmental variable "
"DP_INTERFACE_PREC." % dp_float_prec
"DP_INTERFACE_PREC."
)


Expand Down
2 changes: 1 addition & 1 deletion deepmd/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -650,7 +650,7 @@ def main_parser() -> argparse.ArgumentParser:

# --version
parser.add_argument(
"--version", action="version", version="DeePMD-kit v%s" % __version__
"--version", action="version", version=f"DeePMD-kit v{__version__}"
)

# * train nvnmd script ******************************************************************
Expand Down
2 changes: 1 addition & 1 deletion deepmd/pt/optimizer/LKF.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def __init_P(self):

P = []
params_packed_index = []
logging.info("LKF parameter nums: %s" % param_nums)
logging.info(f"LKF parameter nums: {param_nums}")
if self.dist_init:
block_num = 0
for param_num in param_nums:
Expand Down
6 changes: 3 additions & 3 deletions deepmd/pt/train/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -651,7 +651,7 @@ def warm_up_linear(step, warmup_steps):
self.wrapper.parameters(), 0.98, 0.99870, self.opt_param["kf_blocksize"]
)
else:
raise ValueError("Not supported optimizer type '%s'" % self.opt_type)
raise ValueError(f"Not supported optimizer type '{self.opt_type}'")

# Get model prob for multi-task
if self.multi_task:
Expand Down Expand Up @@ -808,7 +808,7 @@ def fake_model():
learning_rate=pref_lr,
)
else:
raise ValueError("Not supported optimizer type '%s'" % self.opt_type)
raise ValueError(f"Not supported optimizer type '{self.opt_type}'")

# Log and persist
if _step_id % self.disp_freq == 0:
Expand Down Expand Up @@ -1170,7 +1170,7 @@ def print_on_training(self, fout, step_id, cur_lr, train_results, valid_results)
prop_fmt = " %11.2e"
for k in sorted(train_results[model_key].keys()):
print_str += prop_fmt % (train_results[model_key][k])
print_str += " %8.1e\n" % cur_lr
print_str += f" {cur_lr:8.1e}\n"
fout.write(print_str)
fout.flush()

Expand Down
2 changes: 1 addition & 1 deletion deepmd/tf/cluster/local.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def get_gpus():
stdout, stderr = p.communicate()
if p.returncode != 0:
decoded = stderr.decode("UTF-8")
raise RuntimeError("Failed to detect availbe GPUs due to:\n%s" % decoded)
raise RuntimeError(f"Failed to detect availbe GPUs due to:\n{decoded}")
decoded = stdout.decode("UTF-8").strip()
num_gpus = int(decoded)
return list(range(num_gpus)) if num_gpus > 0 else None
Expand Down
16 changes: 7 additions & 9 deletions deepmd/tf/descriptor/descriptor.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ def enable_compression(
This method is called by others when the descriptor supported compression.
"""
raise NotImplementedError(
"Descriptor %s doesn't support compression!" % type(self).__name__
f"Descriptor {type(self).__name__} doesn't support compression!"
)

def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None:
Expand All @@ -263,8 +263,7 @@ def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None:
This method is called by others when the descriptor supported compression.
"""
raise NotImplementedError(
"Descriptor %s doesn't support mixed precision training!"
% type(self).__name__
f"Descriptor {type(self).__name__} doesn't support mixed precision training!"
)

@abstractmethod
Expand Down Expand Up @@ -315,8 +314,7 @@ def init_variables(
This method is called by others when the descriptor supported initialization from the given variables.
"""
raise NotImplementedError(
"Descriptor %s doesn't support initialization from the given variables!"
% type(self).__name__
f"Descriptor {type(self).__name__} doesn't support initialization from the given variables!"
)

def get_tensor_names(self, suffix: str = "") -> Tuple[str]:
Expand All @@ -333,7 +331,7 @@ def get_tensor_names(self, suffix: str = "") -> Tuple[str]:
Names of tensors
"""
raise NotImplementedError(
"Descriptor %s doesn't support this property!" % type(self).__name__
f"Descriptor {type(self).__name__} doesn't support this property!"
)

def pass_tensors_from_frz_model(
Expand All @@ -353,7 +351,7 @@ def pass_tensors_from_frz_model(
:meth:`get_tensor_names`.
"""
raise NotImplementedError(
"Descriptor %s doesn't support this method!" % type(self).__name__
f"Descriptor {type(self).__name__} doesn't support this method!"
)

def build_type_exclude_mask(
Expand Down Expand Up @@ -498,7 +496,7 @@ def deserialize(cls, data: dict, suffix: str = "") -> "Descriptor":
return Descriptor.get_class_by_type(
j_get_type(data, cls.__name__)
).deserialize(data, suffix=suffix)
raise NotImplementedError("Not implemented in class %s" % cls.__name__)
raise NotImplementedError(f"Not implemented in class {cls.__name__}")

def serialize(self, suffix: str = "") -> dict:
"""Serialize the model.
Expand All @@ -513,4 +511,4 @@ def serialize(self, suffix: str = "") -> dict:
suffix : str, optional
Name suffix to identify this descriptor
"""
raise NotImplementedError("Not implemented in class %s" % self.__name__)
raise NotImplementedError(f"Not implemented in class {self.__name__}")
8 changes: 2 additions & 6 deletions deepmd/tf/descriptor/loc_frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -427,12 +427,8 @@ def init_variables(
suffix : str, optional
The suffix of the scope
"""
self.davg = get_tensor_by_name_from_graph(
graph, "descrpt_attr%s/t_avg" % suffix
)
self.dstd = get_tensor_by_name_from_graph(
graph, "descrpt_attr%s/t_std" % suffix
)
self.davg = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_avg")
self.dstd = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_std")

@classmethod
def update_sel(cls, global_jdata: dict, local_jdata: dict):
Expand Down
8 changes: 2 additions & 6 deletions deepmd/tf/descriptor/se.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,12 +141,8 @@ def init_variables(
self.embedding_net_variables = get_embedding_net_variables_from_graph_def(
graph_def, suffix=suffix
)
self.davg = get_tensor_by_name_from_graph(
graph, "descrpt_attr%s/t_avg" % suffix
)
self.dstd = get_tensor_by_name_from_graph(
graph, "descrpt_attr%s/t_std" % suffix
)
self.davg = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_avg")
self.dstd = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_std")

@property
def precision(self) -> tf.DType:
Expand Down
16 changes: 6 additions & 10 deletions deepmd/tf/descriptor/se_a.py
Original file line number Diff line number Diff line change
Expand Up @@ -554,12 +554,8 @@ def enable_compression(
min_nbor_dist, table_extrapolate, table_stride_1, table_stride_2
)

self.davg = get_tensor_by_name_from_graph(
graph, "descrpt_attr%s/t_avg" % suffix
)
self.dstd = get_tensor_by_name_from_graph(
graph, "descrpt_attr%s/t_std" % suffix
)
self.davg = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_avg")
self.dstd = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_std")

def enable_mixed_precision(self, mixed_prec: Optional[dict] = None) -> None:
"""Reveive the mixed precision setting.
Expand Down Expand Up @@ -1305,14 +1301,14 @@ def init_variables(
super().init_variables(graph=graph, graph_def=graph_def, suffix=suffix)
try:
self.original_sel = get_tensor_by_name_from_graph(
graph, "descrpt_attr%s/original_sel" % suffix
graph, f"descrpt_attr{suffix}/original_sel"
)
except GraphWithoutTensorError:
# original_sel is not restored in old graphs, assume sel never changed before
pass
# check sel == original sel?
try:
sel = get_tensor_by_name_from_graph(graph, "descrpt_attr%s/sel" % suffix)
sel = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/sel")
except GraphWithoutTensorError:
# sel is not restored in old graphs
pass
Expand Down Expand Up @@ -1387,7 +1383,7 @@ def deserialize(cls, data: dict, suffix: str = ""):
The deserialized model
"""
if cls is not DescrptSeA:
raise NotImplementedError("Not implemented in class %s" % cls.__name__)
raise NotImplementedError(f"Not implemented in class {cls.__name__}")
data = data.copy()
check_version_compatibility(data.pop("@version", 1), 1, 1)
data.pop("@class", None)
Expand Down Expand Up @@ -1422,7 +1418,7 @@ def serialize(self, suffix: str = "") -> dict:
"""
if type(self) is not DescrptSeA:
raise NotImplementedError(
"Not implemented in class %s" % self.__class__.__name__
f"Not implemented in class {self.__class__.__name__}"
)
if self.stripped_type_embedding:
raise NotImplementedError(
Expand Down
14 changes: 5 additions & 9 deletions deepmd/tf/descriptor/se_atten.py
Original file line number Diff line number Diff line change
Expand Up @@ -492,12 +492,8 @@ def enable_compression(
)
self.two_embd = make_data(self, self.final_type_embedding)

self.davg = get_tensor_by_name_from_graph(
graph, "descrpt_attr%s/t_avg" % suffix
)
self.dstd = get_tensor_by_name_from_graph(
graph, "descrpt_attr%s/t_std" % suffix
)
self.davg = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_avg")
self.dstd = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_std")

def build(
self,
Expand Down Expand Up @@ -1672,7 +1668,7 @@ def deserialize(cls, data: dict, suffix: str = ""):
The deserialized model
"""
if cls is not DescrptSeAtten:
raise NotImplementedError("Not implemented in class %s" % cls.__name__)
raise NotImplementedError(f"Not implemented in class {cls.__name__}")
data = data.copy()
check_version_compatibility(data.pop("@version"), 1, 1)
data.pop("@class")
Expand Down Expand Up @@ -1711,7 +1707,7 @@ def serialize(self, suffix: str = "") -> dict:
"""
if type(self) not in [DescrptSeAtten, DescrptDPA1Compat]:
raise NotImplementedError(
"Not implemented in class %s" % self.__class__.__name__
f"Not implemented in class {self.__class__.__name__}"
)
if self.stripped_type_embedding:
raise NotImplementedError(
Expand Down Expand Up @@ -2075,7 +2071,7 @@ def deserialize(cls, data: dict, suffix: str = ""):
The deserialized model
"""
if cls is not DescrptDPA1Compat:
raise NotImplementedError("Not implemented in class %s" % cls.__name__)
raise NotImplementedError(f"Not implemented in class {cls.__name__}")
data = data.copy()
check_version_compatibility(data.pop("@version"), 1, 1)
data.pop("@class")
Expand Down
12 changes: 4 additions & 8 deletions deepmd/tf/descriptor/se_r.py
Original file line number Diff line number Diff line change
Expand Up @@ -376,12 +376,8 @@ def enable_compression(
min_nbor_dist, table_extrapolate, table_stride_1, table_stride_2
)

self.davg = get_tensor_by_name_from_graph(
graph, "descrpt_attr%s/t_avg" % suffix
)
self.dstd = get_tensor_by_name_from_graph(
graph, "descrpt_attr%s/t_std" % suffix
)
self.davg = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_avg")
self.dstd = get_tensor_by_name_from_graph(graph, f"descrpt_attr{suffix}/t_std")

def build(
self,
Expand Down Expand Up @@ -737,7 +733,7 @@ def deserialize(cls, data: dict, suffix: str = ""):
The deserialized model
"""
if cls is not DescrptSeR:
raise NotImplementedError("Not implemented in class %s" % cls.__name__)
raise NotImplementedError(f"Not implemented in class {cls.__name__}")
data = data.copy()
check_version_compatibility(data.pop("@version", 1), 1, 1)
embedding_net_variables = cls.deserialize_network(
Expand Down Expand Up @@ -770,7 +766,7 @@ def serialize(self, suffix: str = "") -> dict:
"""
if type(self) is not DescrptSeR:
raise NotImplementedError(
"Not implemented in class %s" % self.__class__.__name__
f"Not implemented in class {self.__class__.__name__}"
)
if self.embedding_net_variables is None:
raise RuntimeError("init_variables must be called before serialize")
Expand Down

0 comments on commit ebd809b

Please sign in to comment.