-
Notifications
You must be signed in to change notification settings - Fork 6
/
metrics.py
138 lines (124 loc) · 5.25 KB
/
metrics.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
import math
import os
import time
from matplotlib import cm
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
import itertools
import tensorflow as tf
from google.protobuf import text_format
from waymo_open_dataset.metrics.ops import py_metrics_ops
from waymo_open_dataset.metrics.python import config_util_py as config_util
from waymo_open_dataset.protos import motion_metrics_pb2
from tensorflow_graphics.math.interpolation import bspline
def _default_metrics_config():
config = motion_metrics_pb2.MotionMetricsConfig()
config_text = """
track_steps_per_second: 10
prediction_steps_per_second: 2
track_history_samples: 10
track_future_samples: 80
speed_lower_bound: 1.4
speed_upper_bound: 11.0
speed_scale_lower: 0.5
speed_scale_upper: 1.0
step_configurations {
measurement_step: 5
lateral_miss_threshold: 1.0
longitudinal_miss_threshold: 2.0
}
step_configurations {
measurement_step: 9
lateral_miss_threshold: 1.8
longitudinal_miss_threshold: 3.6
}
step_configurations {
measurement_step: 15
lateral_miss_threshold: 3.0
longitudinal_miss_threshold: 6.0
}
max_predictions: 6
"""
text_format.Parse(config_text, config)
return config
class MotionMetrics(tf.keras.metrics.Metric):
"""Wrapper for motion metrics computation."""
def __init__(self, config=None):
super().__init__()
self._ground_truth_trajectory = []
self._ground_truth_is_valid = []
self._prediction_trajectory = []
self._prediction_score = []
self._object_type = []
if config is None:
config = _default_metrics_config()
self._metrics_config = config
def reset_state(self):
self._ground_truth_trajectory = []
self._ground_truth_is_valid = []
self._prediction_trajectory = []
self._prediction_score = []
self._object_type = []
def update_state(self, prediction_trajectory, prediction_score,
ground_truth_trajectory=None,
ground_truth_is_valid=None,
object_type=None):
self._prediction_trajectory.append(prediction_trajectory)
self._prediction_score.append(prediction_score)
self._ground_truth_trajectory.append(ground_truth_trajectory)
self._ground_truth_is_valid.append(ground_truth_is_valid)
self._object_type.append(object_type)
def result(self):
# [batch_size, K, steps, 2].
prediction_trajectory = tf.concat(self._prediction_trajectory, 0)
# [batch_size, K].
prediction_score = tf.concat(self._prediction_score, 0)
# [batch_size, gt_steps, 7].
ground_truth_trajectory = tf.concat(self._ground_truth_trajectory, 0)
# [batch_size, gt_steps].
ground_truth_is_valid = tf.concat(self._ground_truth_is_valid, 0)
# [batch_size].
object_type = tf.cast(tf.concat(self._object_type, 0), tf.int64)
# We are predicting more steps than needed by the eval code. Subsample.
interval = (
self._metrics_config.track_steps_per_second //
self._metrics_config.prediction_steps_per_second)
prediction_trajectory = prediction_trajectory[:, :, (interval - 1)::interval]
# prediction_trajectory: [B, M, K, N, TP, 2]. Predicted trajectories. The
# inner-most dimensions are [x, y].
# prediction_score: [B, M, K]. Scores per joint prediction.
# ground_truth_trajectory: [B, A, TG, 7]. Groundtruth trajectories. The
# inner-most dimensions are [x, y, length, width, heading, velocity_x,
# velocity_y].
# ground_truth_is_valid: [B, A, TG]. Indicates whether a time stamp is valid
# per trajectory. If all timestamps for a trajectory are invalid, the
# trajectory is assumed invalid.
# prediction_ground_truth_indices: [B, M, N]. Indices to gather the
# predictions of shape [B, M, ?, N] from the groundtruth of shape [B, A],
# values must be between [0, A).
# prediction_ground_truth_indices_mask: [B, M, N]. A validity mask for
# `prediction_ground_truth_indices`.
# object_type: [B, A] Object type per trajectory.
# object_id: [B, A]. Object IDs per trajectory.
# scenario_id: [B]. Scenario IDs of all groundtruth trajectories.
# Prepare these into shapes expected by the metrics computation.
# [batch_size, K, steps, 2] -> [B, M, K, N, TP, 2]
prediction_trajectory = prediction_trajectory[:, tf.newaxis, :, tf.newaxis]
prediction_score = prediction_score[:, tf.newaxis]
# [batch_size, num_agents_per_joint_prediction, gt_steps, 7].
ground_truth_trajectory = ground_truth_trajectory[:, tf.newaxis]
# [batch_size, num_agents_per_joint_prediction, gt_steps].
ground_truth_is_valid = ground_truth_is_valid[:, tf.newaxis]
# [batch_size, num_agents_per_joint_prediction].
object_type = object_type[:, tf.newaxis]
batch_size = tf.shape(object_type)[0]
return py_metrics_ops.motion_metrics(
config=self._metrics_config.SerializeToString(),
prediction_trajectory=prediction_trajectory,
prediction_score=prediction_score,
ground_truth_trajectory=ground_truth_trajectory,
ground_truth_is_valid=ground_truth_is_valid,
prediction_ground_truth_indices = tf.zeros([batch_size,1,1], dtype=tf.int64),
prediction_ground_truth_indices_mask = tf.ones([batch_size,1,1], dtype=tf.bool),
object_type=object_type)