Skip to content
This repository has been archived by the owner on Nov 3, 2023. It is now read-only.

Switched logging.warn to logging.warning #3569

Merged
merged 2 commits into from Apr 2, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion parlai/chat_service/services/websocket/sockets.py
Expand Up @@ -23,7 +23,7 @@ def __init__(self: T, *args, **kwargs):
self.subs: Dict[int, T] = kwargs.pop('subs')

def _default_callback(message, socketID):
logging.warn(f"No callback defined for new WebSocket messages.")
logging.warning(f"No callback defined for new WebSocket messages.")

self.message_callback = kwargs.pop('message_callback', _default_callback)
self.sid = get_rand_id()
Expand Down
8 changes: 4 additions & 4 deletions parlai/core/agents.py
Expand Up @@ -245,7 +245,7 @@ def compare_init_model_opts(opt: Opt, curr_opt: Opt):
# print warnings
extra_strs = ['{}: {}'.format(k, v) for k, v in extra_opts.items()]
if extra_strs:
logging.warn(
logging.warning(
'your model is being loaded with opts that do not '
'exist in the model you are initializing the weights with: '
'{}'.format(','.join(extra_strs))
Expand All @@ -255,7 +255,7 @@ def compare_init_model_opts(opt: Opt, curr_opt: Opt):
'--{} {}'.format(k.replace('_', '-'), v) for k, v in different_opts.items()
]
if different_strs:
logging.warn(
logging.warning(
'your model is being loaded with opts that differ '
'from the model you are initializing the weights with. Add the '
'following args to your run command to change this: \n'
Expand Down Expand Up @@ -309,7 +309,7 @@ def create_agent_from_opt_file(opt: Opt):
if opt.get('override'):
for k, v in opt['override'].items():
if k in opt_from_file and str(v) != str(opt_from_file.get(k)):
logging.warn(
logging.warning(
f'Overriding opt["{k}"] to {v} (previously: {opt_from_file.get(k)})'
)
opt_from_file[k] = v
Expand Down Expand Up @@ -413,7 +413,7 @@ def create_agent(opt: Opt, requireModelExists=False):
model = model_class(opt)
if requireModelExists and hasattr(model, 'load') and not opt.get('model_file'):
# double check that we didn't forget to set model_file on loadable model
logging.warn('model_file unset but model has a `load` function.')
logging.warning('model_file unset but model has a `load` function.')
return model
else:
raise RuntimeError('Need to set `model` argument to use create_agent.')
Expand Down
2 changes: 1 addition & 1 deletion parlai/core/params.py
Expand Up @@ -997,7 +997,7 @@ def _load_opts(self, opt):
# existing command line parameters take priority.
if key not in opt:
if opt.get('allow_missing_init_opts', False):
logging.warn(
logging.warning(
f'The "{key}" key in {optfile} will not be loaded, because it '
f'does not exist in the target opt.'
)
Expand Down
2 changes: 1 addition & 1 deletion parlai/core/teachers.py
Expand Up @@ -1975,7 +1975,7 @@ def setup_image_features(self, data_path):
with PathManager.open(image_mode_features_dict_path, 'rb') as f:
self.image_features_dict = torch.load(f, map_location='cpu')
else:
logging.warn('No existing image features, attempting to build.')
logging.warning('No existing image features, attempting to build.')
if self.is_image_mode_buildable(self.image_mode):
# TODO: Awkward to modify the input opt but needed to use
# TODO: ImageLoader functionality. Is from comment_battle,
Expand Down
4 changes: 2 additions & 2 deletions parlai/core/torch_agent.py
Expand Up @@ -966,7 +966,7 @@ def init_optim(self, params, optim_states=None, saved_optim_type=None) -> bool:
"""
if hasattr(self, 'resized_embeddings') and self.resized_embeddings:
optim_states = None
logging.warn('Not loading optimizer due to resize in token embeddings')
logging.warning('Not loading optimizer due to resize in token embeddings')

opt = self.opt

Expand Down Expand Up @@ -1052,7 +1052,7 @@ def init_optim(self, params, optim_states=None, saved_optim_type=None) -> bool:
# will remain the behavior for the time being.
if optim_states and saved_optim_type != opt['optimizer']:
# we changed from adam to adamax, or sgd to adam, or similar
logging.warn('Not loading optim state since optim class changed.')
logging.warning('Not loading optim state since optim class changed.')
return False
elif optim_states:
# check for any fp16/fp32 conversions we need to do
Expand Down
2 changes: 1 addition & 1 deletion parlai/core/torch_ranker_agent.py
Expand Up @@ -317,7 +317,7 @@ def get_task_candidates_path(self):
path = self.opt['model_file'] + '.cands-' + self.opt['task'] + '.cands'
if PathManager.exists(path) and self.opt['fixed_candidate_vecs'] == 'reuse':
return path
logging.warn(f'Building candidates file as they do not exist: {path}')
logging.warning(f'Building candidates file as they do not exist: {path}')
from parlai.scripts.build_candidates import build_cands
from copy import deepcopy

Expand Down
2 changes: 1 addition & 1 deletion parlai/crowdsourcing/tasks/model_chat/bot_agent.py
Expand Up @@ -103,7 +103,7 @@ def get_bot_agents(
# If we load many models at once, we have to keep it on CPU
model_overrides['no_cuda'] = no_cuda
else:
logging.warn(
logging.warning(
'WARNING: MTurk task has no_cuda FALSE. Models will run on GPU. Will '
'not work if loading many models at once.'
)
Expand Down
2 changes: 1 addition & 1 deletion parlai/scripts/data_stats.py
Expand Up @@ -63,7 +63,7 @@ def _report(world, counts):

def verify(opt):
if opt['datatype'] == 'train':
logging.warn('changing datatype from train to train:ordered')
logging.warning('changing datatype from train to train:ordered')
opt['datatype'] = 'train:ordered'

# create repeat label agent and assign it to the specified task
Expand Down
2 changes: 1 addition & 1 deletion parlai/scripts/eval_model.py
Expand Up @@ -139,7 +139,7 @@ def _eval_single_world(opt, agent, task):
total_cnt = world.num_examples()

if is_distributed():
logging.warn('Progress bar is approximate in distributed mode.')
logging.warning('Progress bar is approximate in distributed mode.')

while not world.epoch_done() and cnt < max_cnt:
cnt += opt.get('batchsize', 1)
Expand Down
2 changes: 1 addition & 1 deletion parlai/scripts/train_model.py
Expand Up @@ -60,7 +60,7 @@ def _num_else_inf(opt: Opt, key: str, distributed_warn=False):
if opt[key] > 0:
if distributed_warn and is_distributed():
nicekey = '--' + key.replace('_', '-')
logging.warn(
logging.warning(
f'Using {nicekey} in distributed mode can lead to slowdowns. '
'See https://github.com/facebookresearch/ParlAI/pull/3379 for more info.'
)
Expand Down
2 changes: 1 addition & 1 deletion parlai/scripts/verify_data.py
Expand Up @@ -57,7 +57,7 @@ def warn(txt, act, opt):

def verify(opt):
if opt['datatype'] == 'train':
logging.warn("changing datatype from train to train:ordered")
logging.warning("changing datatype from train to train:ordered")
opt['datatype'] = 'train:ordered'
opt.log()
# create repeat label agent and assign it to the specified task
Expand Down
2 changes: 1 addition & 1 deletion parlai/tasks/fromfile/agents.py
Expand Up @@ -100,7 +100,7 @@ def __init__(self, opt, shared=None):
if shared is None and (
'valid' in self.opt['datatype'] or 'test' in self.opt['datatype']
):
logging.warn(
logging.warning(
'You are using this fromfile data as a valid or test set without setting fromfile_datatype_extension to true. Please be aware this uses directly the file you indicated, make sure this is not the same as your training file.'
)
if shared is None:
Expand Down
4 changes: 2 additions & 2 deletions parlai/tasks/genderation_bias/agents.py
Expand Up @@ -257,7 +257,7 @@ def load_data(self, opt: Opt, filename: str) -> Optional[List[List[Message]]]:

if opt['invalidate_cache']:
# invalidate the cache and remove the existing data
logging.warn(
logging.warning(
f' [ WARNING: invalidating cache at {self.save_path} and rebuilding the data. ]'
)
if self.save_path == most_recent:
Expand Down Expand Up @@ -285,7 +285,7 @@ def save_data(self, data: List[List[Message]]):
f.write(json_data)
logging.info(f'[ Data successfully saved to path: {self.save_path} ]')
except Exception:
logging.warn('Data is not json serializable; not saving')
logging.warning('Data is not json serializable; not saving')

def get(self, episode_idx: int, entry_idx: int = 0) -> Message:
"""
Expand Down
2 changes: 1 addition & 1 deletion parlai/utils/conversations.py
Expand Up @@ -242,7 +242,7 @@ def read_metadata(self):
if self.metadata is not None:
logging.info(self.metadata)
else:
logging.warn('No metadata available.')
logging.warning('No metadata available.')

def __getitem__(self, index):
return self.conversations[index]
Expand Down
4 changes: 2 additions & 2 deletions parlai/utils/logging.py
Expand Up @@ -181,11 +181,11 @@ def error(*args, **kwargs):


def warn(*args, **kwargs):
return logger.warn(*args, **kwargs)
return logger.warning(*args, **kwargs)


def warning(*args, **kwargs):
return logger.warn(*args, **kwargs)
return logger.warning(*args, **kwargs)


def get_all_levels():
Expand Down
2 changes: 1 addition & 1 deletion parlai/utils/misc.py
Expand Up @@ -770,7 +770,7 @@ def warn_once(msg: str) -> None:
global _seen_logs
if msg not in _seen_logs:
_seen_logs.add(msg)
logging.warn(msg)
logging.warning(msg)


def error_once(msg: str) -> None:
Expand Down