Skip to content
This repository has been archived by the owner on Nov 3, 2023. It is now read-only.

None metrics #4902

Merged
merged 4 commits into from Dec 5, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
5 changes: 4 additions & 1 deletion parlai/core/metrics.py
Expand Up @@ -292,7 +292,8 @@ def from_mask(
cls, metric_per_token: torch.Tensor, mask: torch.Tensor
) -> List[Metric]:
"""
From token-level metrics, returns an aggregate MyMetric per example in the batch.
From token-level metrics, returns an aggregate MyMetric per example in the
batch.

:param metric_per_token:
a (batchsize x num_tokens) Tensor
Expand Down Expand Up @@ -1097,6 +1098,8 @@ def _consume_user_metrics(self, observation):
# User-reported metrics
if 'metrics' in observation:
for uk, v in observation['metrics'].items():
if v is None:
continue
if uk in ALL_METRICS:
# don't let the user override our metrics
uk = f'USER_{uk}'
Expand Down
4 changes: 1 addition & 3 deletions projects/cringe/cringe_loss.py
Expand Up @@ -88,9 +88,7 @@ def __call__(self, x, y, classifier_labels=None, **kwargs):

# concatenate the logits of the preds with the actual label's logits
x_target = x[torch.arange(x.shape[0]), y]
x_ct = torch.concat(
[x_target.unsqueeze(1), sample_preds_values.unsqueeze(1)], -1
)
x_ct = torch.cat([x_target.unsqueeze(1), sample_preds_values.unsqueeze(1)], -1)
# get the y's for the x_ct (the correct label is index 0 if
# the target is positive and index 1 if the target is negative)
y_ct = torch.abs(torch.abs(classifier_labels) - 1).type(y.dtype).to(x_ct.device)
Expand Down