Skip to content

Commit

Permalink
Convert logging f-strings to use % format, part five (#98765)
Browse files Browse the repository at this point in the history
This does some annoying but simple cases by hand.

Signed-off-by: Edward Z. Yang <ezyang@meta.com>

Pull Request resolved: #98765
Approved by: https://github.com/wanchaol
  • Loading branch information
ezyang authored and pytorchmergebot committed Apr 11, 2023
1 parent 5a7aad9 commit b8b840b
Show file tree
Hide file tree
Showing 21 changed files with 77 additions and 92 deletions.
2 changes: 1 addition & 1 deletion benchmarks/dynamo/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -1393,7 +1393,7 @@ def warmup(fn, model, example_inputs, mode, niters=5):
percentage = psutil.Process(os.getpid()).memory_percent()
peak_mem = percentage * total / 10**9
except Exception:
log.exception(f"Backend {mode} failed in warmup()")
log.exception("Backend %s failed in warmup()", mode)
return sys.exit(-1)
dynamo_stats = get_dynamo_stats()
dynamo_stats.subtract(start_stats)
Expand Down
1 change: 0 additions & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,6 @@
"cudart",
"is_bf16_supported",
# torch.cuda._sanitizer
"format_log_message",
"zip_arguments",
"zip_by_key",
# torch.distributed.autograd
Expand Down
2 changes: 1 addition & 1 deletion functorch/benchmarks/chrome_trace_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def main():
utilization, mm_conv_utilization = compute_utilization(filenames, total_length)
print(f"{modelname}, {utilization}, {mm_conv_utilization}")
except BaseException:
logging.exception(f"{filename}, ERROR")
logging.exception("%s, ERROR", filename)
print(f"{filename}, ERROR")

if __name__ == "__main__":
Expand Down
18 changes: 9 additions & 9 deletions tools/linter/adapters/s3_init.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
import stat
import subprocess
import sys
import textwrap
import urllib.error
import urllib.request
from pathlib import Path
Expand Down Expand Up @@ -77,16 +76,17 @@ def check(binary_path: Path, reference_hash: str) -> bool:
return True

logging.warning(
textwrap.dedent(
f"""\
Found binary hash does not match reference!
"""\
Found binary hash does not match reference!
Found hash: {existing_binary_hash}
Reference hash: {reference_hash}
Found hash: %s
Reference hash: %s
Deleting {binary_path} just to be safe.
"""
)
Deleting %s just to be safe.
""",
existing_binary_hash,
reference_hash,
binary_path,
)
if DRY_RUN:
logging.critical(
Expand Down
8 changes: 4 additions & 4 deletions torch/_dynamo/symbolic_convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -784,11 +784,11 @@ def calc_package(self):
if package is not None:
if spec is not None and package != spec.parent:
log.warning(
"__package__ != __spec__.parent "
f"({package!r} != {spec.parent!r})",
ImportWarning,
"__package__ != __spec__.parent (%r != %r)",
package,
spec.parent,
stacklevel=3,
) # type: ignore[call-arg]
)
return package
elif spec is not None:
return spec.parent
Expand Down
2 changes: 1 addition & 1 deletion torch/_dynamo/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ def write_record_to_file(filename, exec_record):
with open(filename, "wb") as f:
exec_record.dump(f)
except Exception:
log.error(f"Unable to write execution record {filename}", exc_info=1)
log.error("Unable to write execution record %s", filename, exc_info=1)


def count_calls(g: fx.Graph):
Expand Down
2 changes: 1 addition & 1 deletion torch/backends/_nnapi/serializer.py
Original file line number Diff line number Diff line change
Expand Up @@ -491,7 +491,7 @@ def get_tensor_operand_by_jitval_fixed_size(self, jitval):
raise Exception("Flexible size is not supported for this operand.")
if s < 0:
# runtime flex
LOG.warning(f"Operand {oper} has runtime flex shape")
LOG.warning("Operand %s has runtime flex shape", oper)
return op_id, oper

def get_tensor_operand_or_constant(self, jitval, dim_order=DimOrder.PRESUMED_CONTIGUOUS):
Expand Down
6 changes: 3 additions & 3 deletions torch/backends/xeon/run_cpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -332,10 +332,10 @@ def set_memory_allocator(self, enable_tcmalloc=True, enable_jemalloc=False, use_
if find_je:
logger.info("Use JeMalloc memory allocator")
return
logger.warning(f"""Neither TCMalloc nor JeMalloc is found in $CONDA_PREFIX/lib or $VIRTUAL_ENV/lib
logger.warning("""Neither TCMalloc nor JeMalloc is found in $CONDA_PREFIX/lib or $VIRTUAL_ENV/lib
or /.local/lib/ or /usr/local/lib/ or /usr/local/lib64/ or /usr/lib or /usr/lib64 or
{expanduser("~")}/.local/lib/ so the LD_PRELOAD environment variable will not be set.
This may drop the performance""")
%s/.local/lib/ so the LD_PRELOAD environment variable will not be set.
This may drop the performance""", expanduser("~"))

def log_env_var(self, env_var_name=""):
if env_var_name in os.environ:
Expand Down
74 changes: 26 additions & 48 deletions torch/cuda/_sanitizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,10 +142,6 @@ def __str__(self):
return f"detected {len(self.errors)} errors"


def format_log_message(message: str) -> str:
return " ".join(line.strip() for line in message.strip().splitlines())


@dataclass
class TensorInfo:
r"""Stores information about a single tensor and recent accesses to it.
Expand All @@ -169,27 +165,21 @@ def __init__(self):
def ensure_tensor_exists(self, data_ptr: DataPtr) -> None:
if data_ptr not in self.accesses:
logger.info(
format_log_message(
f"""
Found tensor with pointer: {data_ptr}, but no matching tensor
allocation in the trace. Backfilling the trace now.
Perhaps the sanitizer was enabled after some torch operations?
"""
)
"Found tensor with pointer: %s, but no matching tensor "
"allocation in the trace. Backfilling the trace now. "
"Perhaps the sanitizer was enabled after some torch operations?",
data_ptr
)
self.create_tensor(data_ptr, None)

def ensure_tensor_does_not_exist(self, data_ptr: DataPtr) -> None:
if data_ptr in self.accesses:
logger.info(
format_log_message(
f"""
Found duplicate tensor allocation in the trace for tensor with
pointer: {data_ptr}. Assuming the trace for tensor deallocation
wasn't caught and backfilling it now.
Perhaps the sanitizer was enabled after some torch operations?
"""
)
"Found duplicate tensor allocation in the trace for tensor with "
"pointer: %s. Assuming the trace for tensor deallocation "
"wasn't caught and backfilling it now. "
"Perhaps the sanitizer was enabled after some torch operations?",
data_ptr
)
self.delete_tensor(data_ptr)

Expand Down Expand Up @@ -233,53 +223,41 @@ def __init__(self):
def _ensure_stream_exists(self, stream: StreamId) -> None:
if stream not in self.current_sync_states:
logger.info(
format_log_message(
f"""
Found Stream with id: {stream}, but no matching stream
creation in the trace. Backfilling the trace now.
Perhaps the sanitizer was enabled after some torch operations?
"""
)
"Found Stream with id: %s, but no matching stream "
"creation in the trace. Backfilling the trace now. "
"Perhaps the sanitizer was enabled after some torch operations?",
stream
)
self.create_stream(stream)

def _ensure_event_exists(self, event: EventId) -> None:
if event not in self.recorded_sync_states:
logger.info(
format_log_message(
f"""
Found Event with id: {event}, but no matching event
creation in the trace. Backfilling the trace now.
Perhaps the sanitizer was enabled after some torch operations?
"""
)
"Found Event with id: %s, but no matching event "
"creation in the trace. Backfilling the trace now. "
"Perhaps the sanitizer was enabled after some torch operations?",
event
)
self.create_event(event)

def _ensure_event_does_not_exist(self, event: EventId) -> None:
if event in self.recorded_sync_states:
logger.info(
format_log_message(
f"""
Found duplicate event creation in the trace for event with
id: {event}. Assuming the trace for event deletion wasn't caught
and backfilling it now.
Perhaps the sanitizer was enabled after some torch operations?
"""
)
"Found duplicate event creation in the trace for event with "
"id: %s. Assuming the trace for event deletion wasn't caught "
"and backfilling it now. "
"Perhaps the sanitizer was enabled after some torch operations?",
event
)
self.delete_event(event)

def create_stream(self, stream: StreamId) -> None:
if stream in self.current_sync_states:
logger.info(
format_log_message(
f"""
Found duplicate Stream creation in the trace for Stream with
id: {stream}. PyTorch Streams are only created once, so this
trace entry is ignored.
"""
)
"Found duplicate Stream creation in the trace for Stream with "
"id: %s. PyTorch Streams are only created once, so this "
"trace entry is ignored.",
stream
)
else:
self.host_sync_state[stream] = 0
Expand Down
5 changes: 3 additions & 2 deletions torch/distributed/elastic/agent/server/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,7 @@ class ElasticAgent(abc.ABC):
if group_result.is_failed():
# workers failed
failure = group_result.failures[0]
log.exception(f"worker 0 failed with exit code : {failure.exit_code}")
log.exception("worker 0 failed with exit code : %s", failure.exit_code)
else:
return group_result.return_values[0] # return rank 0's results
Expand Down Expand Up @@ -949,5 +949,6 @@ def _exit_barrier(self):
raise
except Exception:
log.exception(
f"Error waiting on exit barrier. Elapsed: {time.time() - start} seconds"
"Error waiting on exit barrier. Elapsed: %s seconds",
time.time() - start
)
9 changes: 6 additions & 3 deletions torch/distributed/elastic/multiprocessing/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -491,9 +491,12 @@ def _poll(self) -> Optional[RunProcsResult]:
error_filepath = self.error_files[failed_local_rank]

log.error(
f"failed (exitcode: {failed_proc.exitcode})"
f" local_rank: {failed_local_rank} (pid: {e.pid})"
f" of fn: {fn_name} (start_method: {self.start_method})",
"failed (exitcode: %s)"
" local_rank: %s (pid: %s)"
" of fn: %s (start_method: %s)",
failed_proc.exitcode,
failed_local_rank, e.pid,
fn_name, self.start_method,
exc_info=True,
)

Expand Down
9 changes: 5 additions & 4 deletions torch/distributed/elastic/multiprocessing/errors/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def __post_init__(self):
self.error_file_data
)
except Exception:
log.exception(f"Failed to parse reply file: {self.error_file}")
log.exception("Failed to parse reply file: %s", self.error_file)
raise
else:
self._set_no_reply_file()
Expand Down Expand Up @@ -351,9 +351,10 @@ def wrapper(*args, **kwargs):
else:
log.info(
(
f"local_rank {rank} FAILED with no error file."
f" Decorate your entrypoint fn with @record for traceback info."
f" See: https://pytorch.org/docs/stable/elastic/errors.html"
"local_rank %s FAILED with no error file."
" Decorate your entrypoint fn with @record for traceback info."
" See: https://pytorch.org/docs/stable/elastic/errors.html",
rank
)
)
raise
Expand Down
5 changes: 3 additions & 2 deletions torch/distributed/elastic/multiprocessing/tail_log.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,8 +132,9 @@ def stop(self) -> None:
f.result()
except Exception as e:
log.error(
f"error in log tailor for {self._name}{local_rank}."
f" {e.__class__.__qualname__}: {e}",
"error in log tailor for %s%s. %s: %s",
self._name, local_rank,
e.__class__.__qualname__, e,
)

if self._threadpool:
Expand Down
2 changes: 1 addition & 1 deletion torch/distributed/elastic/timer/file_based_local_timer.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,5 +329,5 @@ def _reap_worker(self, worker_pid: int, signal: int) -> bool:
log.info("Process with pid=%s does not exist. Skipping", worker_pid)
return True
except Exception as e:
log.error(f"Error terminating pid={worker_pid}", exc_info=e)
log.error("Error terminating pid=%s", worker_pid, exc_info=e)
return False
2 changes: 1 addition & 1 deletion torch/distributed/elastic/timer/local_timer.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,5 +121,5 @@ def _reap_worker(self, worker_id: int) -> bool:
log.info("Process with pid=%s does not exist. Skipping", worker_id)
return True
except Exception as e:
log.error(f"Error terminating pid={worker_id}", exc_info=e)
log.error("Error terminating pid=%s", worker_id, exc_info=e)
return False
9 changes: 5 additions & 4 deletions torch/distributed/rpc/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,10 +148,11 @@ def init_rpc(
# Ignore type error because mypy doesn't handle dynamically generated type objects (#4865)
if backend != BackendType.TENSORPIPE: # type: ignore[attr-defined]
logger.warning(
f"RPC was initialized with no explicit backend but with options " # type: ignore[attr-defined]
f"corresponding to {backend}, hence that backend will be used "
f"instead of the default {BackendType.TENSORPIPE}. To silence this "
f"warning pass `backend={backend}` explicitly."
"RPC was initialized with no explicit backend but with options " # type: ignore[attr-defined]
"corresponding to %(backend)s, hence that backend will be used "
"instead of the default BackendType.TENSORPIPE. To silence this "
"warning pass `backend=%(backend)s` explicitly.",
{'backend': backend}
)

if backend is None:
Expand Down
2 changes: 1 addition & 1 deletion torch/fx/passes/infra/partitioner.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ def merge_single_node(node: Node, id: Optional[int]):

logger.debug("Partitions proposed:")
for id, partition in partitions_by_id.items():
logger.debug(f"partition #{id}", [node.name for node in partition.nodes])
logger.debug("partition #%s: %s", id, [node.name for node in partition.nodes])

return list(partitions_by_id.values())

Expand Down
2 changes: 1 addition & 1 deletion torch/fx/passes/infra/pass_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ def __call__(self, module: nn.Module) -> PassResult:
modified = modified or res.modified

if isinstance(module, GraphModule):
logger.debug(f"Graph after pass '{fn_name}':", module.graph)
logger.debug("Graph after pass '%s': %s", fn_name, module.graph)
module.recompile()

# Check graph invariants
Expand Down
2 changes: 1 addition & 1 deletion torch/fx/passes/net_min_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,7 @@ def _sequential_traverse(self, nodes: NodeList) -> NodeSet:
report.append(f"Sequential traverse iteration {self.iteration}.")
report.append(f"Visit node: {node.name}")

_LOGGER.info(f"Visit node: {node.name}")
_LOGGER.info("Visit node: %s", node.name)
cur_nodes: NodeSet = {node}

if node in self.fusions:
Expand Down
2 changes: 1 addition & 1 deletion torch/package/_importlib.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def _calc___package__(globals):
if package is not None:
if spec is not None and package != spec.parent:
_warnings.warn( # noqa: G010
"__package__ != __spec__.parent " f"({package!r} != {spec.parent!r})",
f"__package__ != __spec__.parent ({package!r} != {spec.parent!r})", # noqa: G004
ImportWarning,
stacklevel=3,
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -244,8 +244,9 @@ def train_batch(
if trainer_has_less_inputs:
input_batches = batches[: len(batches) // 2]
gLogger.info(
f"""Trainer reduced input patches from {len(batches)}
to {len(input_batches)} to simulate uneven inputs."""
"Trainer reduced input patches from %s "
"to %s to simulate uneven inputs.",
len(batches), len(input_batches)
)
else:
input_batches = batches
Expand Down

0 comments on commit b8b840b

Please sign in to comment.