Skip to content

Commit

Permalink
[pre-commit.ci] pre-commit autoupdate (#350)
Browse files Browse the repository at this point in the history
<!--pre-commit.ci start-->
updates:
- [github.com/psf/black: 23.3.0 →
23.7.0](psf/black@23.3.0...23.7.0)
- https://github.com/charliermarsh/ruff-pre-commithttps://github.com/astral-sh/ruff-pre-commit
- [github.com/astral-sh/ruff-pre-commit: v0.0.275 →
v0.0.278](astral-sh/ruff-pre-commit@v0.0.275...v0.0.278)
- [github.com/asottile/blacken-docs: 1.14.0 →
1.15.0](adamchainz/blacken-docs@1.14.0...1.15.0)
<!--pre-commit.ci end-->

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
pre-commit-ci[bot] committed Jul 18, 2023
1 parent 251ca87 commit b678969
Show file tree
Hide file tree
Showing 10 changed files with 24 additions and 67 deletions.
8 changes: 4 additions & 4 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,12 @@ repos:
- id: check-toml
# Python
- repo: https://github.com/psf/black
rev: 23.3.0
rev: 23.7.0
hooks:
- id: black-jupyter
- repo: https://github.com/charliermarsh/ruff-pre-commit
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.0.275
rev: v0.0.278
hooks:
- id: ruff
args: ["--fix"]
Expand All @@ -34,6 +34,6 @@ repos:
args: ["--write"]
# Python inside docs
- repo: https://github.com/asottile/blacken-docs
rev: 1.14.0
rev: 1.15.0
hooks:
- id: blacken-docs
4 changes: 1 addition & 3 deletions dpdispatcher/dp_cloud_server_context.py
Original file line number Diff line number Diff line change
Expand Up @@ -256,9 +256,7 @@ def check_home_file_exits(self, fname):
return os.path.isfile(os.path.join(DP_CLOUD_SERVER_HOME_DIR, fname))

def clean(self):
submission_file_name = "{submission_hash}.json".format(
submission_hash=self.submission.submission_hash
)
submission_file_name = f"{self.submission.submission_hash}.json"
submission_json = os.path.join(DP_CLOUD_SERVER_HOME_DIR, submission_file_name)
os.remove(submission_json)
return True
Expand Down
4 changes: 1 addition & 3 deletions dpdispatcher/fugaku.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,7 @@ def gen_script_header(self, job):
] = f'#PJM -L "node={resources.number_node}" '
fugaku_script_header_dict[
"fugaku_ntasks_per_node_line"
] = '#PJM --mpi "max-proc-per-node={cpu_per_node}"'.format(
cpu_per_node=resources.cpu_per_node
)
] = f'#PJM --mpi "max-proc-per-node={resources.cpu_per_node}"'
fugaku_script_header_dict[
"queue_name_line"
] = f'#PJM -L "rscgrp={resources.queue_name}"'
Expand Down
16 changes: 4 additions & 12 deletions dpdispatcher/hdfs_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,9 +90,7 @@ def copy_from_local(local_path, to_uri):
raise RuntimeError(
"try to access local_path[{}] " "but failed".format(local_path)
)
cmd = "hadoop fs -copyFromLocal -f {local} {remote}".format(
local=local_path, remote=to_uri
)
cmd = f"hadoop fs -copyFromLocal -f {local_path} {to_uri}"
try:
ret, out, err = run_cmd_with_all_output(cmd)
if ret == 0:
Expand All @@ -106,9 +104,7 @@ def copy_from_local(local_path, to_uri):
)
except Exception as e:
raise RuntimeError(
"Cannot copy local[{}] to remote[{}] with cmd[{}]".format(
local_path, to_uri, cmd
)
f"Cannot copy local[{local_path}] to remote[{to_uri}] with cmd[{cmd}]"
) from e

@staticmethod
Expand All @@ -118,9 +114,7 @@ def copy_to_local(from_uri, local_path):
remote = from_uri
elif isinstance(from_uri, list) or isinstance(from_uri, tuple):
remote = " ".join(from_uri)
cmd = "hadoop fs -copyToLocal {remote} {local}".format(
remote=remote, local=local_path
)
cmd = f"hadoop fs -copyToLocal {remote} {local_path}"

try:
ret, out, err = run_cmd_with_all_output(cmd)
Expand All @@ -135,9 +129,7 @@ def copy_to_local(from_uri, local_path):
)
except Exception as e:
raise RuntimeError(
"Cannot copy remote[{}] to local[{}] with cmd[{}]".format(
from_uri, local_path, cmd
)
f"Cannot copy remote[{from_uri}] to local[{local_path}] with cmd[{cmd}]"
) from e

@staticmethod
Expand Down
5 changes: 1 addition & 4 deletions dpdispatcher/hdfs_context.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,10 +137,7 @@ def download(
if os.path.exists(gz_dir):
shutil.rmtree(gz_dir, ignore_errors=True)
os.mkdir(os.path.join(self.local_root, "tmp"))
rfile_tgz = "{}/{}_*_download.tar.gz".format(
self.remote_root,
submission.submission_hash,
)
rfile_tgz = f"{self.remote_root}/{submission.submission_hash}_*_download.tar.gz"
lfile_tgz = "%s/tmp/" % (self.local_root)
HDFS.copy_to_local(rfile_tgz, lfile_tgz)

Expand Down
8 changes: 2 additions & 6 deletions dpdispatcher/lsf.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,8 @@ def gen_script_header(self, job):
"lsf_nodes_line": "#BSUB -n {number_cores}".format(
number_cores=resources.number_node * resources.cpu_per_node
),
"lsf_ptile_line": "#BSUB -R 'span[ptile={cpu_per_node}]'".format(
cpu_per_node=resources.cpu_per_node
),
"lsf_partition_line": "#BSUB -q {queue_name}".format(
queue_name=resources.queue_name
),
"lsf_ptile_line": f"#BSUB -R 'span[ptile={resources.cpu_per_node}]'",
"lsf_partition_line": f"#BSUB -q {resources.queue_name}",
}
gpu_usage_flag = resources.kwargs.get("gpu_usage", False)
gpu_new_syntax_flag = resources.kwargs.get("gpu_new_syntax", False)
Expand Down
4 changes: 1 addition & 3 deletions dpdispatcher/machine.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,9 +208,7 @@ def gen_script(self, job):

def check_if_recover(self, submission):
submission_hash = submission.submission_hash
submission_file_name = "{submission_hash}.json".format(
submission_hash=submission_hash
)
submission_file_name = f"{submission_hash}.json"
if_recover = self.context.check_file_exists(submission_file_name)
return if_recover

Expand Down
16 changes: 4 additions & 12 deletions dpdispatcher/pbs.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,16 +22,12 @@ def gen_script_header(self, job):
pbs_script_header_dict = {}
pbs_script_header_dict[
"select_node_line"
] = "#PBS -l select={number_node}:ncpus={cpu_per_node}".format(
number_node=resources.number_node, cpu_per_node=resources.cpu_per_node
)
] = f"#PBS -l select={resources.number_node}:ncpus={resources.cpu_per_node}"
if resources.gpu_per_node != 0:
pbs_script_header_dict[
"select_node_line"
] += f":ngpus={resources.gpu_per_node}"
pbs_script_header_dict["queue_name_line"] = "#PBS -q {queue_name}".format(
queue_name=resources.queue_name
)
pbs_script_header_dict["queue_name_line"] = f"#PBS -q {resources.queue_name}"
pbs_script_header = pbs_script_header_template.format(**pbs_script_header_dict)
return pbs_script_header

Expand Down Expand Up @@ -147,15 +143,11 @@ def gen_script_header(self, job):
pbs_script_header_dict = {}
pbs_script_header_dict[
"select_node_line"
] = "#PBS -l nodes={number_node}:ppn={cpu_per_node}".format(
number_node=resources.number_node, cpu_per_node=resources.cpu_per_node
)
] = f"#PBS -l nodes={resources.number_node}:ppn={resources.cpu_per_node}"
if resources.gpu_per_node != 0:
pbs_script_header_dict["select_node_line"] += ":gpus={gpu_per_node}".format(
gpu_per_node=resources.gpu_per_node
)
pbs_script_header_dict["queue_name_line"] = "#PBS -q {queue_name}".format(
queue_name=resources.queue_name
)
pbs_script_header_dict["queue_name_line"] = f"#PBS -q {resources.queue_name}"
pbs_script_header = pbs_script_header_template.format(**pbs_script_header_dict)
return pbs_script_header
8 changes: 2 additions & 6 deletions dpdispatcher/slurm.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,16 +34,12 @@ def gen_script_header(self, job):
)
script_header_dict[
"slurm_ntasks_per_node_line"
] = "#SBATCH --ntasks-per-node {cpu_per_node}".format(
cpu_per_node=resources.cpu_per_node
)
] = f"#SBATCH --ntasks-per-node {resources.cpu_per_node}"
custom_gpu_line = resources.kwargs.get("custom_gpu_line", None)
if not custom_gpu_line:
script_header_dict[
"slurm_number_gpu_line"
] = "#SBATCH --gres=gpu:{gpu_per_node}".format(
gpu_per_node=resources.gpu_per_node
)
] = f"#SBATCH --gres=gpu:{resources.gpu_per_node}"
else:
script_header_dict["slurm_number_gpu_line"] = custom_gpu_line
if resources.queue_name != "":
Expand Down
18 changes: 4 additions & 14 deletions dpdispatcher/submission.py
Original file line number Diff line number Diff line change
Expand Up @@ -509,9 +509,7 @@ def clean_jobs(self):
def submission_to_json(self):
# self.update_submission_state()
write_str = json.dumps(self.serialize(), indent=4, default=str)
submission_file_name = "{submission_hash}.json".format(
submission_hash=self.submission_hash
)
submission_file_name = f"{self.submission_hash}.json"
self.machine.context.write_file(submission_file_name, write_str=write_str)

@classmethod
Expand All @@ -525,9 +523,7 @@ def submission_from_json(cls, json_file_name="submission.json"):
# def check_if_recover()

def try_recover_from_json(self):
submission_file_name = "{submission_hash}.json".format(
submission_hash=self.submission_hash
)
submission_file_name = f"{self.submission_hash}.json"
if_recover = self.machine.context.check_file_exists(submission_file_name)
submission = None
submission_dict = {}
Expand Down Expand Up @@ -787,9 +783,7 @@ def deserialize(cls, job_dict, machine=None):
"""
if len(job_dict.keys()) != 1:
raise RuntimeError(
"json file may be broken, len(job_dict.keys()) must be 1. {job_dict}".format(
job_dict=job_dict
)
f"json file may be broken, len(job_dict.keys()) must be 1. {job_dict}"
)
job_hash = list(job_dict.keys())[0]

Expand Down Expand Up @@ -871,11 +865,7 @@ def handle_unexpected_job_state(self):
# raise RuntimeError("job:job {job} failed 3 times".format(job=self))
self.submit_job()
if self.job_state != JobStatus.unsubmitted:
dlog.info(
"job: {job_hash} submit; job_id is {job_id}".format(
job_hash=self.job_hash, job_id=self.job_id
)
)
dlog.info(f"job: {self.job_hash} submit; job_id is {self.job_id}")
if self.resources.wait_time != 0:
time.sleep(self.resources.wait_time)
# self.get_job_state()
Expand Down

0 comments on commit b678969

Please sign in to comment.