Skip to content
This repository has been archived by the owner on Nov 3, 2023. It is now read-only.

Commit

Permalink
[pre-commit] black version (#4506)
Browse files Browse the repository at this point in the history
* black version

* lint yaml

* reblack files

* lint y aml doc
  • Loading branch information
jxmsML committed Apr 20, 2022
1 parent 71c4d44 commit 3192501
Show file tree
Hide file tree
Showing 37 changed files with 71 additions and 72 deletions.
14 changes: 6 additions & 8 deletions .github/workflows/lint.yml
Expand Up @@ -7,14 +7,12 @@ on:
pull_request:

jobs:
# TODO(roller): uncomment this. it drifted due to click versioning.
# see #4481 for details
# pre-commit:
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v2
# - uses: actions/setup-python@v2
# - uses: pre-commit/action@v2.0.0
pre-commit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- uses: pre-commit/action@v2.0.0

lint:
runs-on: ubuntu-latest
Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
@@ -1,6 +1,6 @@
repos:
- repo: https://github.com/psf/black
rev: 19.3b0
rev: 22.3.0
hooks:
- id: black
language_version: python3
Expand Down
2 changes: 1 addition & 1 deletion parlai/agents/hugging_face/t5.py
Expand Up @@ -345,7 +345,7 @@ def output(self, tensor):
# Taken directly from HuggingFace
# Rescale output before projecting on vocab
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
tensor = tensor * (self.t5.model_dim ** -0.5)
tensor = tensor * (self.t5.model_dim**-0.5)
lm_logits = self.t5.lm_head(tensor)
return lm_logits

Expand Down
2 changes: 1 addition & 1 deletion parlai/agents/rag/indexers.py
Expand Up @@ -211,7 +211,7 @@ def index_data(self, tensors: List[torch.Tensor]):
'HNSW index needs to index all data at once, results will be unpredictable otherwise.'
)
phi = 0
norms = (data ** 2).sum(dim=1)
norms = (data**2).sum(dim=1)
max_norms = norms.max().item()
phi = max(phi, max_norms)
logging.info(f'HNSWF DotProduct -> L2 space phi={phi}')
Expand Down
2 changes: 1 addition & 1 deletion parlai/agents/rag/modules.py
Expand Up @@ -578,6 +578,6 @@ def reorder_decoder_incremental_state(

@set_device
def decoder_output(self, latent: torch.Tensor):
tensor = latent * (self.t5.model_dim ** -0.5)
tensor = latent * (self.t5.model_dim**-0.5)
logits = self.t5.lm_head(tensor)
return logits
2 changes: 1 addition & 1 deletion parlai/agents/transformer/modules/decoder.py
Expand Up @@ -270,7 +270,7 @@ def _default(val, default):
)
else:
nn.init.normal_(
self.position_embeddings.weight, 0, self.embedding_size ** -0.5
self.position_embeddings.weight, 0, self.embedding_size**-0.5
)

# build the model
Expand Down
6 changes: 3 additions & 3 deletions parlai/agents/transformer/modules/encoder.py
Expand Up @@ -191,7 +191,7 @@ def _default(val, default):
self.embeddings = nn.Embedding(
vocabulary_size, self.embedding_size, padding_idx=padding_idx
)
nn.init.normal_(self.embeddings.weight, 0, self.embedding_size ** -0.5)
nn.init.normal_(self.embeddings.weight, 0, self.embedding_size**-0.5)

# create the positional embeddings
self.position_embeddings = nn.Embedding(self.n_positions, self.embedding_size)
Expand All @@ -203,7 +203,7 @@ def _default(val, default):
)
else:
nn.init.normal_(
self.position_embeddings.weight, 0, self.embedding_size ** -0.5
self.position_embeddings.weight, 0, self.embedding_size**-0.5
)

# embedding normalization
Expand All @@ -220,7 +220,7 @@ def _default(val, default):

if self.n_segments >= 1:
self.segment_embeddings = nn.Embedding(self.n_segments, self.dim)
nn.init.normal_(self.segment_embeddings.weight, 0, self.dim ** -0.5)
nn.init.normal_(self.segment_embeddings.weight, 0, self.dim**-0.5)

# build the model
self.layers = self.build_layers()
Expand Down
2 changes: 1 addition & 1 deletion parlai/agents/transformer/modules/functions.py
Expand Up @@ -36,7 +36,7 @@ def create_embeddings(dictionary, embedding_size, padding_idx):
Create and initialize word embeddings.
"""
e = nn.Embedding(len(dictionary), embedding_size, padding_idx)
nn.init.normal_(e.weight, mean=0, std=embedding_size ** -0.5)
nn.init.normal_(e.weight, mean=0, std=embedding_size**-0.5)
nn.init.constant_(e.weight[padding_idx], 0)
return e

Expand Down
2 changes: 1 addition & 1 deletion parlai/agents/transformer/polyencoder.py
Expand Up @@ -404,7 +404,7 @@ def _get_embeddings(self, dict_, null_idx, embedding_size):
embeddings = torch.nn.Embedding(
len(dict_), embedding_size, padding_idx=null_idx
)
torch.nn.init.normal_(embeddings.weight, 0, embedding_size ** -0.5)
torch.nn.init.normal_(embeddings.weight, 0, embedding_size**-0.5)
return embeddings

def attend(self, attention_layer, queries, keys, values, mask):
Expand Down
2 changes: 1 addition & 1 deletion parlai/core/build_data.py
Expand Up @@ -175,7 +175,7 @@ def download(url, path, fname, redownload=False, num_retries=5):
download = not PathManager.exists(outfile) or redownload
logging.info(f"Downloading {url} to {outfile}")
retry = num_retries
exp_backoff = [2 ** r for r in reversed(range(retry))]
exp_backoff = [2**r for r in reversed(range(retry))]

pbar = tqdm.tqdm(unit='B', unit_scale=True, desc='Downloading {}'.format(fname))

Expand Down
2 changes: 1 addition & 1 deletion parlai/core/torch_classifier_agent.py
Expand Up @@ -231,7 +231,7 @@ def update_raw(
assert self._class_name == class_name
assert len(true_labels) == len(pos_probs)

TO_INT_FACTOR = 10 ** self._max_bucket_dec_places
TO_INT_FACTOR = 10**self._max_bucket_dec_places
# add the upper and lower bound of the values
for label, prob in zip(true_labels, pos_probs):
# calculate the upper and lower bound of the values
Expand Down
Expand Up @@ -513,10 +513,10 @@ def compute_fleiss_kappa(
except Exception:
n_ij = 0.0
p_j[j] += n_ij
P_bar_sum_term += n_ij ** 2
P_bar_sum_term += n_ij**2

p_j = [tmp / (N * number_of_raters) for tmp in p_j]
P_e_bar = sum([tmp ** 2 for tmp in p_j])
P_e_bar = sum([tmp**2 for tmp in p_j])

P_bar = (P_bar_sum_term - N * number_of_raters) / (
N * number_of_raters * (number_of_raters - 1)
Expand Down
8 changes: 2 additions & 6 deletions parlai/tasks/casino/agents.py
Expand Up @@ -95,17 +95,13 @@ def _setup_data(self, data_path):
episode = copy.deepcopy(dialogue)
episode[
'perspective'
] = (
'mturk_agent_1'
) # id of the agent whose perspective will be used in this dialog
] = 'mturk_agent_1' # id of the agent whose perspective will be used in this dialog
episodes.append(episode)

episode = copy.deepcopy(dialogue)
episode[
'perspective'
] = (
'mturk_agent_2'
) # id of the agent whose perspective will be used in this dialog
] = 'mturk_agent_2' # id of the agent whose perspective will be used in this dialog
episodes.append(episode)

self.episodes = episodes
Expand Down
4 changes: 1 addition & 3 deletions parlai/tasks/multiwoz_v22/agents.py
Expand Up @@ -315,9 +315,7 @@ def setup_episodes(self, fold):
if raw_episode["dialogue_id"] != self.opt["dialogue_id"]:
continue

skip = (
False
) # need to skip outer for loop while in `for domains` inner for loop
skip = False # need to skip outer for loop while in `for domains` inner for loop
if self.opt.get("well_formatted_domains_only", True):
if len(domains) == 0:
skip = True
Expand Down
4 changes: 2 additions & 2 deletions parlai/utils/bpe.py
Expand Up @@ -608,10 +608,10 @@ def bytes_to_unicode(self) -> Dict[int, str]:
)
cs: List[int] = bs[:]
n = 0
for b in range(2 ** 8):
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
cs.append(2**8 + n)
n += 1
str_cs: List[str] = [chr(n) for n in cs]
return dict(zip(bs, str_cs))
Expand Down
2 changes: 1 addition & 1 deletion parlai/utils/distributed.py
Expand Up @@ -212,7 +212,7 @@ def sync_parameters(model: torch.nn.Module) -> bool:
dist.all_reduce(p.data, dist.ReduceOp.SUM)

# double check everything synced correctly
norm2 = sum((p.data ** 2).sum().float().item() for p in model.parameters())
norm2 = sum((p.data**2).sum().float().item() for p in model.parameters())
all_versions = all_gather_list(norm2)
if not all(n == norm2 for n in all_versions):
raise AssertionError(
Expand Down
10 changes: 5 additions & 5 deletions parlai/utils/fp16.py
Expand Up @@ -122,8 +122,8 @@ def __init__(self, optimizer, aggregate_gnorms=False):
raise NotImplementedError("Need to implement the parameter group transfer.")
optimizer.param_groups[0]['params'] = self.fp32_params

self.scaler = DynamicLossScaler(2.0 ** 15)
self.min_loss_scale = 2 ** -5
self.scaler = DynamicLossScaler(2.0**15)
self.min_loss_scale = 2**-5
self._aggregate_gnorms = aggregate_gnorms

@classmethod
Expand Down Expand Up @@ -318,7 +318,7 @@ class DynamicLossScaler(object):

def __init__(
self,
init_scale: float = 2.0 ** 15,
init_scale: float = 2.0**15,
scale_factor: float = 2.0,
scale_window: int = 2000,
tolerance: float = 0.00,
Expand Down Expand Up @@ -415,7 +415,7 @@ def __init__(
self,
init_optimizer: torch.optim.Optimizer, # type: ignore
aggregate_gnorms: bool = False,
loss_initial_scale: float = 2.0 ** 17,
loss_initial_scale: float = 2.0**17,
min_loss_scale: float = 1e-4,
):
self.optimizer = init_optimizer
Expand Down Expand Up @@ -832,7 +832,7 @@ def step(self, closure=None):
group['lr'] = self._get_lr(group, state)

beta2t = 1.0 - math.pow(state['step'], group['decay_rate'])
update = (grad ** 2) + group['eps'][0]
update = (grad**2) + group['eps'][0]
if factored:
exp_avg_sq_row = state['exp_avg_sq_row']
exp_avg_sq_col = state['exp_avg_sq_col']
Expand Down
2 changes: 1 addition & 1 deletion projects/image_chat/transresnet_multimodal/modules.py
Expand Up @@ -526,7 +526,7 @@ def __init__(
n_positions, hidden_dim, out=self.position_embeddings.weight
)
else:
nn.init.normal_(self.position_embeddings.weight, 0, hidden_dim ** -0.5)
nn.init.normal_(self.position_embeddings.weight, 0, hidden_dim**-0.5)

self.layers = nn.ModuleList()
for _ in range(self.n_layers):
Expand Down
21 changes: 16 additions & 5 deletions projects/light_whoami/agents/expanded_attention.py
Expand Up @@ -57,7 +57,7 @@


def get_classifier_model_and_dict(
opt: Opt
opt: Opt,
) -> Tuple[Optional[TorchAgent], Optional[DictionaryAgent]]:
"""
Build classifier model and dictionary.
Expand Down Expand Up @@ -707,9 +707,14 @@ def _apply_model_parallel_with_extra(
new_incr_state = {i: [] for i, _ in enumerate(self.layers)}

for chunk_idx, layer_nos, next_device in work_items:
s_tensor, s_enc_out, s_enc_mask, s_incr_state, s_extra_out, s_extra_mask = chunks[
chunk_idx
]
(
s_tensor,
s_enc_out,
s_enc_mask,
s_incr_state,
s_extra_out,
s_extra_mask,
) = chunks[chunk_idx]
for layer_no in layer_nos:
s_tensor, nis = self.layers[layer_no](
x=s_tensor,
Expand All @@ -721,7 +726,13 @@ def _apply_model_parallel_with_extra(
)
new_incr_state[layer_no].append(nis)
# don't move incr state, it's always on the correct device
s_tensor, s_enc_out, s_enc_mask, s_extra_out, s_extra_mask = PipelineHelper.chunk_to(
(
s_tensor,
s_enc_out,
s_enc_mask,
s_extra_out,
s_extra_mask,
) = PipelineHelper.chunk_to(
(s_tensor, s_enc_out, s_enc_mask, s_extra_out, s_extra_mask),
next_device,
)
Expand Down
4 changes: 1 addition & 3 deletions projects/safety_bench/model_wrappers/example_wrapper.py
Expand Up @@ -29,6 +29,4 @@ def get_response(self, input_text: str) -> str:
# Be sure to reset the model's dialogue history before/after
# every call to `get_response`.

return (
"Hello"
) # In this example, we always respond 'Hello' regardless of the input
return "Hello" # In this example, we always respond 'Hello' regardless of the input
8 changes: 5 additions & 3 deletions projects/seeker/agents/seeker.py
Expand Up @@ -831,9 +831,11 @@ def batch_act(self, observations: List[Dict[str, Message]]) -> List[Message]:
"""
knowledge_agent_observations = [o['knowledge_agent'] for o in observations]
# First, determine whether we're searching
batch_reply_sdm, search_indices, knowledge_agent_observations = self.batch_act_sdm(
observations, knowledge_agent_observations
)
(
batch_reply_sdm,
search_indices,
knowledge_agent_observations,
) = self.batch_act_sdm(observations, knowledge_agent_observations)
# Second, generate search queries
batch_reply_sqm = self.batch_act_sqm(observations, search_indices)

Expand Down
17 changes: 13 additions & 4 deletions projects/seeker/agents/seeker_modules.py
Expand Up @@ -243,9 +243,13 @@ def encoder(

assert all(t is None for t in [input_turns_cnt, positions, segments])
# Encode with `super()` call for non-skip-retrieval inputs
enc_out_retrieval, mask_retrieval, input_turns_cnt, top_docs, top_doc_scores = super(
ComboFidModel, self
).encoder(
(
enc_out_retrieval,
mask_retrieval,
input_turns_cnt,
top_docs,
top_doc_scores,
) = super(ComboFidModel, self).encoder(
input[~skip_retrieval_vec],
input_lengths[~skip_retrieval_vec],
query_vec[~skip_retrieval_vec],
Expand All @@ -258,7 +262,12 @@ def encoder(
input[skip_retrieval_vec]
)

new_out, new_mask, new_top_docs, new_top_doc_scores = interleave_fid_combo_outputs(
(
new_out,
new_mask,
new_top_docs,
new_top_doc_scores,
) = interleave_fid_combo_outputs(
enc_out_retrieval,
enc_out_skip_retrieval,
mask_retrieval,
Expand Down
2 changes: 1 addition & 1 deletion projects/wizard_of_wikipedia/generator/agents.py
Expand Up @@ -105,7 +105,7 @@ def _set_text_vec(self, obs, history, truncate):
class EndToEndAgent(_GenericWizardAgent):
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self._vectorize_text = lru_cache(int(2 ** 20))(self._vectorize_text)
self._vectorize_text = lru_cache(int(2**20))(self._vectorize_text)

# knowledge truncate defaults to the same as --truncate
self.knowledge_truncate = opt.get('knowledge_truncate')
Expand Down
1 change: 0 additions & 1 deletion tests/crowdsourcing/tasks/acute_eval/test_acute_eval.py
Expand Up @@ -58,7 +58,6 @@ def test_base_task(
# Check that the agent state is as it should be
self._test_agent_state(task_data=task_data, data_regression=data_regression)


except ImportError:
pass

Expand Down
Expand Up @@ -87,7 +87,6 @@ def setup_teardown(self):
# Tear down temp file
shutil.rmtree(root_dir)


except ImportError:
pass

Expand Down
Expand Up @@ -103,7 +103,6 @@ def setup_teardown(self):
# Tear down temp file
shutil.rmtree(root_dir)


except ImportError:
pass

Expand Down
Expand Up @@ -79,7 +79,6 @@ def setup_teardown(self):
# Tear down temp file
shutil.rmtree(root_dir)


except ImportError:
pass

Expand Down
1 change: 0 additions & 1 deletion tests/crowdsourcing/tasks/model_chat/test_image_stack.py
Expand Up @@ -72,6 +72,5 @@ def test_fill_stack(self, file_regression: FileRegressionFixture):
# Check the output against what it should be
file_regression.check(contents=stdout)


except ImportError:
pass
1 change: 0 additions & 1 deletion tests/crowdsourcing/tasks/model_chat/test_model_chat.py
Expand Up @@ -157,7 +157,6 @@ def _remove_non_deterministic_keys(self, actual_state: dict) -> dict:

return actual_state


except ImportError:
pass

Expand Down
Expand Up @@ -132,6 +132,5 @@ def test_worker_results_file(
outputs = setup_teardown
file_regression.check(outputs[prefix], basename=prefix)


except ImportError:
pass

0 comments on commit 3192501

Please sign in to comment.