Skip to content

Transforming metric values #959

@failable

Description

@failable

❓ Questions/Help/Support

The document of Metircs says that return value of compute can be Any. So I'm trying to do this in a single pass evaluation over the whole validation data loader.

class SuperMetrics(Metric):

  def __init__(self, num_labels, output_transform=lambda x: x, device=None):
      self.num_labels = num_labels
      self._y = None
      self._y_pred = None
      self._num_drugs = None
      super(SuperMetrics, self).__init__(output_transform=output_transform,
                                         device=device)

  def compute_metrics(self, y, y_pred):  # pylint: disable=no-self-use
      y = y.toarray()
      y_pred = y_pred.toarray()
      y[y > 0] = 1
      y_pred[y_pred > 0] = 1

      hamming_loss = metrics.hamming_loss(y, y_pred)
      macro_f1 = metrics.f1_score(y, y_pred, average='macro')
      macro_precision = metrics.precision_score(y, y_pred, average='macro')
      macro_recall = metrics.recall_score(y, y_pred, average='macro')
      micro_f1 = metrics.f1_score(y, y_pred, average='micro')
      micro_precision = metrics.precision_score(y, y_pred, average='micro')
      micro_recall = metrics.recall_score(y, y_pred, average='micro')

      return {
          'hamming_loss': hamming_loss,
          'macro_f1': macro_f1,
          'macro_precision': macro_precision,
          'macro_recall': macro_recall,
          'micro_f1': micro_f1,
          'micro_precision': micro_precision,
          'micro_recall': micro_recall
      }

  @reinit__is_reduced
  def reset(self):
      self._y = []
      self._y_pred = []
      self._num_drugs = []
      super(SuperMetrics, self).reset()

  @reinit__is_reduced
  def update(self, output):
      y, y_pred, num_drugs = output
      self._y += y
      self._y_pred += y_pred
      self._num_drugs += num_drugs

  @sync_all_reduce('_y', '_y_pred', '_num_drugs')
  def compute(self):
      num_examples = len(self._num_drugs)

      rows = []
      y_columns, y_pred_columns = [], []
      for i, (y_sample, y_pred_sample, num_drug_sample) in enumerate(
              zip(self._y, self._y_pred, self._num_drugs)):
          rows += [i] * (num_drug_sample - 2)
          y_columns += y_sample[1:1 + num_drug_sample - 2]
          y_pred_columns += y_pred_sample[:num_drug_sample - 2]
      values = [1] * len(rows)
      y = coo_matrix((values, (rows, y_columns)),
                     shape=(num_examples, self.num_labels))
      y_pred = coo_matrix((values, (rows, y_pred_columns)),
                          shape=(num_examples, self.num_labels))

      return self.compute_metrics(y, y_pred)

The problem is, if this metric is attached to the evaluator by passing metrics={'super': SuperMetrics(vocab_size))}, I will get a nested metric value of engine.state.metrics. This is fine if I only print it to the terminal though. But I can not figure out a way to make it work with NeptuneLogger.

neptune_logger.attach(
    evaluator,
    log_handler=OutputHandler(tag='val',
                              metric_names='all'),
    event_name=Events.EPOCH_COMPLETED(every=params['eval_freq']))

Is there a safe way and place to flatten engine.state.metrics? Or should I do this? Is there any advice to compute all this metrics once using ignite? Thanks!

Metadata

Metadata

Assignees

No one assigned

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions