Skip to content

Commit

Permalink
refine warnings (#2641)
Browse files Browse the repository at this point in the history
* refine warnings

* fix

* further improve

* further improve

* refine warnings

* refine warnings

* workaround emb warning for jit.script
  • Loading branch information
zhuhaozhe committed Mar 11, 2024
1 parent c251356 commit e0bf673
Show file tree
Hide file tree
Showing 39 changed files with 515 additions and 268 deletions.
8 changes: 5 additions & 3 deletions intel_extension_for_pytorch/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import re

import torch
import warnings


try:
import torchvision
Expand Down Expand Up @@ -128,12 +128,14 @@
from . import _dynamo
from . import _meta_registrations
from ._init_on_device import OnDevice
from .utils._logger import logger, WarningType

try:
from .cpu import tpp
except BaseException:
warnings.warn(
"Please install transformers repo when you want to use fast_bert API."
logger.warn(
"Please install transformers repo when you want to use fast_bert API.",
_type=WarningType.MissingArgument,
)

from .frontend import optimize
Expand Down
8 changes: 5 additions & 3 deletions intel_extension_for_pytorch/_inductor/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,8 @@
from torch._subclasses import FakeTensor
from torch.utils._mode_utils import no_dispatch
import builtins
import warnings
from typing import Callable, Dict, Optional, Union, List

from ..utils._logger import logger, WarningType

_compiler_backend = "inductor"

Expand Down Expand Up @@ -66,7 +65,10 @@ def defake(x):
traced_model = torch.jit.freeze(traced_model)
return traced_model
except Exception:
warnings.warn("JIT trace failed during the IPEX compile process.")
logger.warning(
"JIT trace failed during the IPEX compile process.",
_type=WarningType.NotSupported,
)
return model
else:
raise RuntimeError(
Expand Down
6 changes: 3 additions & 3 deletions intel_extension_for_pytorch/_inductor/decomposition.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
import logging
import torch._decomp as decomp

log = logging.getLogger(__name__)
from ..utils._logger import logger

decomposition_overrides = {}


def register_decomposition(ops):
for op in [ops] if callable(ops) else ops:
if op in decomposition_overrides:
log.warning(f"duplicate decomp: {ops}")
logger.warning(f"duplicate decomp: {ops}")
return decomp.register_decomposition(ops, decomposition_overrides)


Expand Down
10 changes: 4 additions & 6 deletions intel_extension_for_pytorch/cpu/auto_ipex.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,13 @@
import os
import platform
import glob
import logging
from ..utils._logger import logger, WarningType
import sys
from argparse import ArgumentParser, REMAINDER
from argparse import RawTextHelpFormatter
from tempfile import mkstemp
import uuid

format_str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=format_str)
logger = logging.getLogger(__name__)


def apply_monkey_patch(program, dtype, auto_ipex_verbose, disable_ipex_graph_mode):
# Auto apply the ipex features
Expand Down Expand Up @@ -213,7 +209,9 @@ def main():
lst_valid.append(item)
else:
logger.warning(
"{} doesn't exist. Removing it from LD_PRELOAD.".format(item)
f"You have set {item} into LD_PRELOAD but it doesn't exist. Removing it from LD_PRELOAD."
+ "please install it if you want it or remove it from LD_PRELOAD if you don't",
_type=WarningType.MissingDependency,
)
if len(lst_valid) > 0:
os.environ["LD_PRELOAD"] = ":".join(lst_valid)
Expand Down
21 changes: 13 additions & 8 deletions intel_extension_for_pytorch/cpu/graph_capture.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@
from typing import List

import functools
import logging
import threading
import warnings
from ..utils._logger import logger, WarningType


class RunMethods(IntEnum):
Expand Down Expand Up @@ -37,7 +37,10 @@ def compiler(gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]):
traced_model = torch.jit.freeze(traced_model)
return traced_model
except Exception:
warnings.warn("JIT trace failed during the 'compiler' process.")
logger.warning(
"JIT trace failed during the 'compiler' process.",
_type=WarningType.NotSupported,
)
return gm

@functools.wraps(func)
Expand All @@ -62,8 +65,9 @@ def forward(*input, **kwargs):
else:
return self.model(*input, **kwargs)
if self.train:
warnings.warn(
"graph capture does not support training yet."
logger.warning(
"graph capture does not support training yet.",
_type=WarningType.NotSupported,
)
self.method = RunMethods.EagerTrain
return func(*input, **kwargs)
Expand All @@ -89,7 +93,7 @@ def forward(*input, **kwargs):
output = traced_model(*input, **kwargs)
self.model = traced_model
self.method = RunMethods.JIT
logging.debug("generate graph by JIT trace.")
logger.debug("generate graph by JIT trace.")
return output
except BaseException:
try:
Expand All @@ -101,11 +105,12 @@ def forward(*input, **kwargs):
output = dynamo_model(*input, **kwargs)
self.model = dynamo_model
self.method = RunMethods.TorchDynamo
logging.debug("generate graph by TorchDynamo.")
logger.debug("generate graph by TorchDynamo.")
return output
except BaseException:
warnings.warn(
"Both JIT and TorchDynamo failed, fallback to original model."
logger.warning(
"Both JIT and TorchDynamo failed, fallback to original model.",
_type=WarningType.NotSupported,
)
self.method = RunMethods.EagerInfer
torch._dynamo.reset()
Expand Down
6 changes: 5 additions & 1 deletion intel_extension_for_pytorch/cpu/hypertune/objective.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# reference: https://github.com/intel/neural-compressor/blob/\
# 15477100cef756e430c8ef8ef79729f0c80c8ce6/neural_compressor/objective.py
import subprocess
from ...utils._logger import logger, WarningType


class MultiObjective(object):
Expand Down Expand Up @@ -39,7 +40,10 @@ def deprecate_config(self, cfg, deprecated, new, default):
), f"Configurations {deprecated} and {new} cannot be set at the same time."
ret = default
if v_deprecated != default:
print(f"[**Warning**] Configuration {deprecated} is deprecated by {new}.")
logger.warn(
f"[**Warning**] Configuration {deprecated} is deprecated by {new}.",
_type=WarningType.DeprecatedArgument,
)
ret = v_deprecated
if v_new != default:
ret = v_new
Expand Down
25 changes: 19 additions & 6 deletions intel_extension_for_pytorch/cpu/launch/cpu_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import platform
import re
import subprocess
from ...utils._logger import WarningType

# lscpu Examples
# # The following is the parsable format, which can be fed to other
Expand Down Expand Up @@ -206,7 +207,7 @@ def __init__(self, logger=None, lscpu_txt=""):
if c.maxmhz in e_core_mhzs:
c.is_p_core = False

def verbose(self, level, msg):
def verbose(self, level, msg, warning_type=None):
if self.logger:
logging_fn = {
"warning": self.logger.warning,
Expand All @@ -215,7 +216,7 @@ def verbose(self, level, msg):
assert (
level in logging_fn.keys()
), f"Unrecognized logging level {level} is detected. Available levels are {logging_fn.keys()}."
logging_fn[level](msg)
logging_fn[level](msg, _type=warning_type)
else:
print(msg)

Expand Down Expand Up @@ -264,12 +265,18 @@ def gen_pools_ondemand(
if use_logical_cores:
self.verbose(
"warning",
"Argument --use-logical-cores won't take effect when --cores-list is set.",
"Argument --use-logical-cores won't take effect when --cores-list is set."
+ "please see https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/performance_tuning/launch_script.html#launch-script-usage-guide" # noqa: B950
+ "for usage guide",
warning_type=WarningType.AmbiguousArgument,
)
if use_e_cores:
self.verbose(
"warning",
"Argument --use-e-cores won't take effect when --cores-list is set.",
+"please see https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/performance_tuning/launch_script.html#launch-script-usage-guide" # noqa: B950
+ "for usage guide",
warning_type=WarningType.AmbiguousArgument,
)
pool = [c for c in self.pool_all if c.cpu in cores_list]
nodes = list(set([c.node for c in pool]))
Expand All @@ -284,6 +291,9 @@ def gen_pools_ondemand(
self.verbose(
"warning",
"Argument --skip-cross-node-cores cannot take effect on the designated cores. Disabled.",
+"please see https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/performance_tuning/launch_script.html#launch-script-usage-guide" # noqa: B950
+ "for usage guide",
warning_type=WarningType.WrongArgument,
)
break
else:
Expand All @@ -302,7 +312,7 @@ def gen_pools_ondemand(
e_cores = [c.cpu for c in pool if not c.is_p_core]
if len(e_cores) > 0:
self.verbose(
"warning",
"info",
f"Efficient-Cores are detected ({e_cores}). Disabled for performance consideration. \
You can enable them with argument --use-e-cores.",
)
Expand Down Expand Up @@ -348,8 +358,11 @@ def gen_pools_ondemand(
if skip_cross_node_cores:
self.verbose(
"warning",
"Argument --skip-cross-node-cores won't take effect when both --ninstances and \
--ncores-per-instance are explicitly set.",
"Argument --skip-cross-node-cores won't take effect when both --ninstances and"
+ " --ncores-per-instance are explicitly set."
+ "please see https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/performance_tuning/launch_script.html#launch-script-usage-guide" # noqa: B950
+ "for usage guide",
warning_type=WarningType.AmbiguousArgument,
)
assert (
ninstances * ncores_per_instance > 0
Expand Down
63 changes: 42 additions & 21 deletions intel_extension_for_pytorch/cpu/launch/launch.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import intel_extension_for_pytorch.cpu.auto_ipex as auto_ipex
from .launcher_distributed import DistributedTrainingLauncher
from .launcher_multi_instances import MultiInstancesLauncher
from ...utils._logger import logger, WarningType

"""
This is a script for launching PyTorch training and inference on Intel Xeon CPU with optimal configurations.
Expand Down Expand Up @@ -220,53 +221,72 @@ def add_deprecated_params(parser):

def process_deprecated_params(args, logger):
if args.nproc_per_node != -1:
logger.warning("Argument --nproc_per_node is deprecated by --nprocs-per-node.")
logger.warning(
"Argument --nproc_per_node is deprecated by --nprocs-per-node.",
_type=WarningType.DeprecatedArgument,
)
args.nprocs_per_node = args.nproc_per_node
if args.more_mpi_params != "":
logger.warning(
"Argument --more_mpi_params is deprecated by --extra-mpi-params."
"Argument --more_mpi_params is deprecated by --extra-mpi-params.",
_type=WarningType.DeprecatedArgument,
)
args.extra_mpi_params = args.more_mpi_params
if args.ncore_per_instance != -1:
logger.warning(
"Argument --ncore_per_instance is deprecated by --ncores-per-instance."
"Argument --ncore_per_instance is deprecated by --ncores-per-instance.",
_type=WarningType.DeprecatedArgument,
)
args.ncores_per_instance = args.ncore_per_instance
if args.node_id != -1:
logger.warning("Argument --node_id is deprecated by --nodes-list.")
logger.warning(
"Argument --node_id is deprecated by --nodes-list.",
_type=WarningType.DeprecatedArgument,
)
args.nodes_list = str(args.node_id)
if args.core_list != "":
logger.warning("Argument --core_list is deprecated by --cores-list.")
logger.warning(
"Argument --core_list is deprecated by --cores-list.",
_type=WarningType.DeprecatedArgument,
)
args.cores_list = args.core_list
if args.logical_core_for_ccl:
logger.warning(
"Argument --logical_core_for_ccl is deprecated by --logical-cores-for-ccl."
"Argument --logical_core_for_ccl is deprecated by --logical-cores-for-ccl.",
_type=WarningType.DeprecatedArgument,
)
args.logical_cores_for_ccl = args.logical_core_for_ccl
if args.use_logical_core:
logger.warning(
"Argument --use_logical_core is deprecated by --use-logical-cores."
"Argument --use_logical_core is deprecated by --use-logical-cores.",
_type=WarningType.DeprecatedArgument,
)
args.use_logical_cores = args.use_logical_core
if args.log_path != "":
logger.warning("Argument --log_path is deprecated by --log-dir.")
logger.warning(
"Argument --log_path is deprecated by --log-dir.",
_type=WarningType.DeprecatedArgument,
)
args.log_dir = args.log_path

if args.multi_instance:
logger.info(
"Argument --multi_instance is deprecated. Will be removed. \
If you are using the deprecated argument, please update it to the new one."
"Argument --multi_instance is deprecated. Will be removed."
+ "If you are using the deprecated argument, please update it to the new one.",
_type=WarningType.DeprecatedArgument,
)
if args.distributed:
logger.info(
"Argument --distributed is deprecated. Will be removed. \
If you are using the deprecated argument, please update it to the new one."
"Argument --distributed is deprecated. Will be removed."
+ "If you are using the deprecated argument, please update it to the new one.",
_type=WarningType.DeprecatedArgument,
)

if args.enable_tcmalloc or args.enable_jemalloc or args.use_default_allocator:
logger.warning(
"Arguments --enable_tcmalloc, --enable_jemalloc and --use_default_allocator \
are deprecated by --memory-allocator."
"Arguments --enable_tcmalloc, --enable_jemalloc and --use_default_allocator"
+ "are deprecated by --memory-allocator tcmalloc/jemalloc/auto.",
_type=WarningType.DeprecatedArgument,
)
if args.use_default_allocator:
args.memory_allocator = "default"
Expand All @@ -276,16 +296,21 @@ def process_deprecated_params(args, logger):
args.memory_allocator = "tcmalloc"
if args.disable_numactl:
logger.warning(
"Argument --disable_numactl is deprecated by --multi-task-manager."
"Argument --disable_numactl is deprecated by --multi-task-manager taskset.",
_type=WarningType.DeprecatedArgument,
)
args.multi_task_manager = "taskset"
if args.disable_taskset:
logger.warning(
"Argument --disable_taskset is deprecated by --multi-task-manager."
"Argument --disable_taskset is deprecated by --multi-task-manager numactl.",
_type=WarningType.DeprecatedArgument,
)
args.multi_task_manager = "numactl"
if args.disable_iomp:
logger.warning("Argument --disable_iomp is deprecated by --omp-runtime.")
logger.warning(
"Argument --disable_iomp is deprecated by --omp-runtime default.",
_type=WarningType.DeprecatedArgument,
)
args.omp_runtime = "default"


Expand Down Expand Up @@ -383,10 +408,6 @@ def run_main_with_args(args):
if platform.system() == "Windows":
raise RuntimeError("Windows platform is not supported!!!")

format_str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=format_str)
logger = logging.getLogger(__name__)

launcher_distributed = DistributedTrainingLauncher(logger)
launcher_multi_instances = MultiInstancesLauncher(logger)

Expand Down
Loading

0 comments on commit e0bf673

Please sign in to comment.