Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 16 additions & 23 deletions src/feature_extraction/base_features/centroid_velocity.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,20 +42,19 @@ def per_frame(self, identity: int) -> np.ndarray:
# get centroids for all frames where this identity is present
centroids = [convex_hulls[i].centroid for i in indexes]

# convert to numpy array of x,y points of the centroids
points = np.asarray([[p.x, p.y] for p in centroids])
# get centroids for all frames where this identity is present
centroid_centers = np.full([self._poses.num_frames, 2], np.nan, dtype=np.float32)
for i in indexes:
centroid_centers[i, :] = np.asarray(convex_hulls[i].centroid.xy).squeeze()

if points.shape[0] > 1:
# compute x,y velocities
# pass indexes so numpy can figure out spacing
v = np.gradient(points, indexes, axis=0)
v = np.gradient(centroid_centers, axis=0)

# compute direction of velocities
d = np.degrees(np.arctan2(v[:, 1], v[:, 0]))
# compute direction of velocities
d = np.degrees(np.arctan2(v[:, 1], v[:, 0]))

# subtract animal bearing from orientation
# convert angle to range -180 to 180
values[indexes] = (((d - bearings[indexes]) + 360) % 360) - 180
# subtract animal bearing from orientation
# convert angle to range -180 to 180
values = (((d - bearings) + 180) % 360) - 180

return {'centroid_velocity_dir': values}

Expand Down Expand Up @@ -92,18 +91,12 @@ def per_frame(self, identity: int) -> np.ndarray:
indexes = np.arange(self._poses.num_frames)[frame_valid == 1]

# get centroids for all frames where this identity is present
centroids = [convex_hulls[i].centroid for i in indexes]

# convert to numpy array of x,y points of the centroids
points = np.asarray([[p.x, p.y] for p in centroids])

if points.shape[0] > 1:
# compute x,y velocities
# pass indexes so numpy can figure out spacing
v = np.gradient(points, indexes, axis=0)
centroid_centers = np.full([self._poses.num_frames, 2], np.nan, dtype=np.float32)
for i in indexes:
centroid_centers[i, :] = np.asarray(convex_hulls[i].centroid.xy).squeeze()

# compute magnitude of velocities
values[indexes] = np.sqrt(
np.square(v[:, 0]) + np.square(v[:, 1])) * fps
# get change over frames
v = np.gradient(centroid_centers, axis=0)
values = np.linalg.norm(v, axis=-1) * fps * self._pixel_scale

return {'centroid_velocity_mag': values}
13 changes: 4 additions & 9 deletions src/feature_extraction/base_features/point_speeds.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,10 @@ def per_frame(self, identity: int) -> np.ndarray:
speeds = {}

# calculate velocities for each point
xy_deltas = np.gradient(poses, axis=0)
point_velocities = np.linalg.norm(xy_deltas, axis=-1) * fps

for keypoint in PoseEstimation.KeypointIndex:
# grab all of the values for this point
points = np.ma.array(poses[:, keypoint, :], mask=np.stack([~point_masks[:, keypoint], ~point_masks[:, keypoint]]), dtype=np.float32)
point_velocities = np.gradient(points, axis=0)
point_velocities.fill_value = 0
speeds[f"{keypoint.name} speed"] = point_velocities

# convert the velocities to speed and convert units
for key, val in speeds.items():
speeds[key] = np.linalg.norm(val, axis=-1) * fps
speeds[f"{keypoint.name} speed"] = point_velocities[:, keypoint.value]

return speeds
12 changes: 3 additions & 9 deletions src/feature_extraction/base_features/point_velocities.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,17 +33,11 @@ def per_frame(self, identity: int) -> np.ndarray:
bearings = self._poses.compute_all_bearings(identity)

directions = {}
xy_deltas = np.gradient(poses, axis=0)
angles = np.degrees(np.arctan2(xy_deltas[:, :, 1], xy_deltas[:, :, 0]))

for keypoint in PoseEstimation.KeypointIndex:
# compute x,y velocities
# pass indexes so numpy can figure out spacing
points = np.ma.array(poses[:, keypoint, :], mask=np.stack([~point_masks[:, keypoint], ~point_masks[:, keypoint]]), dtype=np.float32)
point_velocities = np.gradient(points, axis=0)

# compute the orientation, and adjust based on the animal's bearing
adjusted_angle = (((np.degrees(np.arctan2(point_velocities[:, 1], point_velocities[:, 0])) - bearings) + 360) % 360) - 180
adjusted_angle.fill_value = np.nan
directions[f"{keypoint.name} velocity direction"] = adjusted_angle.filled()
directions[f"{keypoint.name} velocity direction"] = ((angles[:, keypoint.value] - bearings + 360) % 360) - 180

return directions

Expand Down
2 changes: 1 addition & 1 deletion src/feature_extraction/features.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from .segmentation_features import SegmentationFeatureGroup


FEATURE_VERSION = 9
FEATURE_VERSION = 10

_FEATURE_MODULES = [
BaseFeatureGroup,
Expand Down
12 changes: 4 additions & 8 deletions src/feature_extraction/landmark_features/corner.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import math
import typing

import numpy as np
Expand Down Expand Up @@ -51,9 +50,6 @@ def cache_features(self, identity: int):
closest_corners = np.full(self._poses.num_frames, -1, dtype=np.int8)
corners = self._poses.static_objects['corners']

# points and convex hulls are in y,x
# corners are x,y so flip them to match points and convex hulls
corners = np.flip(corners, axis=-1)
arena_center_np = np.mean(corners, axis=0)
arena_center = Point(arena_center_np[0], arena_center_np[1])

Expand Down Expand Up @@ -88,7 +84,7 @@ def cache_features(self, identity: int):

center_dist = self_shape.distance(arena_center)
# Note that self_shape.xy stores a [2,1] point data, but cv2 needs shape [2]
wall_dist = cv2.pointPolygonTest(corners.astype(np.float32), np.asarray(self_shape.centroid.xy).squeeze(), True)
wall_dist = cv2.pointPolygonTest(corners.astype(np.float32), np.asarray(self_shape.centroid.xy).squeeze() * self._pixel_scale, True)

corner_distances[frame] = distance * self._pixel_scale
center_distances[frame] = center_dist * self._pixel_scale
Expand Down Expand Up @@ -152,9 +148,9 @@ def compute_angle(a, b, c):

# most of the point types are unsigned short integers
# cast to signed types to avoid underflow issues during subtraction
angle = math.degrees(
math.atan2(int(c[1]) - int(b[1]), int(c[0]) - int(b[0])) -
math.atan2(int(a[1]) - int(b[1]), int(a[0]) - int(b[0]))
angle = np.degrees(
np.arctan2(c[1] - b[1], c[0] - b[0]) -
np.arctan2(a[1] - b[1], a[0] - b[0])
)
return ((angle + 180) % 360) - 180

Expand Down
7 changes: 3 additions & 4 deletions src/feature_extraction/landmark_features/food_hopper.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ def per_frame(self, identity: int) -> np.ndarray:
if self._pixel_scale is not None:
hopper = hopper * self._pixel_scale

# swap the point x,y values and change dtype to float32 for open cv
hopper_pts = hopper[:, [1, 0]].astype(np.float32)
# change dtype to float32 for open cv
hopper_pts = hopper.astype(np.float32)

points, _ = self._poses.get_identity_poses(identity, self._pixel_scale)

Expand All @@ -42,8 +42,7 @@ def per_frame(self, identity: int) -> np.ndarray:
if key_point in _EXCLUDED_POINTS:
continue

# swap our x,y to match the opencv coordinate space
pts = points[:, key_point.value, [1, 0]]
pts = points[:, key_point.value, :]

distance = np.asarray([cv2.pointPolygonTest(hopper_pts, (p[0], p[1]), True) for p in pts])
distance[np.isnan(pts[:, 0])] = np.nan
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,8 @@ def per_frame(self, identity: int) -> np.ndarray:
hole_area_ratio[frame] = (hole_areas * self._pixel_scale**2)/self._moment_cache.get_moment(frame, 'm00')

# Calculate the centroid speeds
centroid_speeds = np.hypot(np.gradient(x), np.gradient(y))
centroid_speeds = np.hypot(np.gradient(x), np.gradient(y)) * self._poses.fps

values = {}
values['centroid_speed'] = centroid_speeds
values['ellipse_w'] = ellipse_w
Expand Down
12 changes: 5 additions & 7 deletions src/feature_extraction/social_features/social_distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def __init__(self, poses: PoseEstimation, identity: int,
continue

# Find the distance and identity of the closest animal at each
# frame, as well as the distance, identity and angle of the closes
# frame, as well as the distance, identity and angle of the closest
# animal in field of view. In order to calculate this we require
# that both animals have a valid convex hull and the the self
# identity has a valid nose point and base neck point (which is
Expand All @@ -59,7 +59,7 @@ def __init__(self, poses: PoseEstimation, identity: int,

self_base_neck_point = points[idx.BASE_NECK, :]
self_nose_point = points[idx.NOSE, :]
other_centroid = np.array(other_shape.centroid.coords[0])
other_centroid = np.array(other_shape.centroid.xy).squeeze() * self._pixel_scale

view_angle = self.compute_angle(
self_nose_point,
Expand Down Expand Up @@ -95,11 +95,9 @@ def compute_angle(a, b, c):
:return: angle between AB and BC with range [-180, 180)
"""

# point types in the pose files are typically unsigned 16 bit integers,
# cast to signed types to avoid underflow during subtraction
angle = math.degrees(
math.atan2(int(c[1]) - int(b[1]), int(c[0]) - int(b[0])) -
math.atan2(int(a[1]) - int(b[1]), int(a[0]) - int(b[0]))
angle = np.degrees(
np.arctan2(c[1] - b[1], c[0] - b[0]) -
np.arctan2(a[1] - b[1], a[0] - b[0])
)
return ((angle + 180) % 360) - 180

Expand Down
2 changes: 1 addition & 1 deletion src/pose_estimation/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import h5py

from .pose_est import PoseEstimation, PoseHashException
from .pose_est import PoseEstimation, PoseHashException, MINIMUM_CONFIDENCE
from .pose_est_v2 import PoseEstimationV2
from .pose_est_v3 import PoseEstimationV3
from .pose_est_v4 import PoseEstimationV4
Expand Down
7 changes: 4 additions & 3 deletions src/pose_estimation/pose_est.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@

from src.utils import hash_file

MINIMUM_CONFIDENCE = 0.3

class PoseHashException(Exception):
pass
Expand Down Expand Up @@ -155,8 +156,8 @@ def get_identity_convex_hulls(self, identity):
The convex hulls are calculated using all valid points except for the
middle of tail and tip of tail points.
:param identity: identity to return points for
:return: the convex hulls (array elements will be None if there is no
valid convex hull for that frame)
:return: the convex hulls in pixel units (array elements will be None
if there is no valid convex hull for that frame)
"""

if identity in self._convex_hull_cache:
Expand Down Expand Up @@ -208,7 +209,7 @@ def compute_bearing(self, points):
angle_rad = np.arctan2(base_neck_offset_xy[1],
base_neck_offset_xy[0])

return angle_rad * (180 / np.pi)
return np.degrees(angle_rad)

def compute_all_bearings(self, identity):
bearings = np.full(self.num_frames, np.nan, dtype=np.float32)
Expand Down
7 changes: 4 additions & 3 deletions src/pose_estimation/pose_est_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import h5py
import numpy as np

from .pose_est import PoseEstimation
from .pose_est import PoseEstimation, MINIMUM_CONFIDENCE


class PoseEstimationV2(PoseEstimation):
Expand Down Expand Up @@ -39,9 +39,10 @@ def __init__(self, file_path: Path,
pose_grp = pose_h5['poseest']

# load contents
self._points = pose_grp['points'][:].astype(np.float64)
# keypoints are stored as (y,x)
self._points = np.flip(pose_grp['points'][:].astype(np.float64), axis=-1)
self._point_mask = np.zeros(self._points.shape[:-1], dtype=np.uint16)
self._point_mask[:] = pose_grp['confidence'][:] > 0.3
self._point_mask[:] = pose_grp['confidence'][:] > MINIMUM_CONFIDENCE

# get pixel size
self._cm_per_pixel = pose_grp.attrs.get('cm_per_pixel', None)
Expand Down
7 changes: 4 additions & 3 deletions src/pose_estimation/pose_est_v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import h5py
import numpy as np

from .pose_est import PoseEstimation, PoseHashException
from .pose_est import PoseEstimation, PoseHashException, MINIMUM_CONFIDENCE


class _CacheFileVersion(Exception):
Expand Down Expand Up @@ -85,7 +85,8 @@ def __init__(self, file_path: Path,
assert major_version == 3

# load contents
all_points = pose_grp['points'][:]
# keypoints are stored as (y,x)
all_points = np.flip(pose_grp['points'][:], axis=-1)
all_confidence = pose_grp['confidence'][:]
all_instance_count = pose_grp['instance_count'][:]
all_track_id = pose_grp['instance_track_id'][:]
Expand Down Expand Up @@ -218,7 +219,7 @@ def get_identity_point_mask(self, identity):
def _build_track_dict(self, all_points, all_confidence, all_instance_count,
all_track_id):
""" iterate through frames and build track dict """
all_points_mask = all_confidence > 0
all_points_mask = all_confidence > MINIMUM_CONFIDENCE
track_dict = {}

for frame_index in range(self.num_frames):
Expand Down
19 changes: 13 additions & 6 deletions src/pose_estimation/pose_est_v4.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import h5py
import numpy as np

from .pose_est import PoseEstimation, PoseHashException
from .pose_est import PoseEstimation, PoseHashException, MINIMUM_CONFIDENCE


class _CacheFileVersion(Exception):
Expand Down Expand Up @@ -60,7 +60,8 @@ def __init__(self, file_path: Path,
#assert major_version == 4

# load contents
all_points = pose_grp['points'][:]
# keypoints are stored as (y,x)
all_points = np.flip(pose_grp['points'][:], axis=-1)
all_confidence = pose_grp['confidence'][:]
id_mask = pose_grp['id_mask'][:]
instance_embed_id = pose_grp['instance_embed_id'][:]
Expand All @@ -84,19 +85,25 @@ def __init__(self, file_path: Path,
instance_embed_id[id_mask == 0] - 1, :, :] = all_points[
id_mask == 0, :, :]

# then transpose to make the first index the "identity" rather
# than frame
# transpose to make the first index the "identity" rather than frame
# indexes before transpose: [frame][ident][point idx][pt axis]
# indexes after transpose: [ident][frame][point idx][pt axis]
self._points = np.transpose(points_tmp, [1, 0, 2, 3])
points_tmp = np.transpose(points_tmp, [1, 0, 2, 3])

# transform confidence values for mask as well
confidence_by_id_tmp = np.zeros(tmp_shape[:3], dtype=all_confidence.dtype)
confidence_by_id_tmp[np.where(id_mask == 0)[0],
instance_embed_id[id_mask == 0] - 1, :] = all_confidence[
id_mask == 0, :]
confidence_by_id = np.transpose(confidence_by_id_tmp, [1, 0, 2])

self._point_mask = confidence_by_id > 0
# enforce partial poses get nan values
points_tmp[confidence_by_id <= MINIMUM_CONFIDENCE] = np.nan

# copy data into object
self._points = points_tmp

self._point_mask = confidence_by_id > MINIMUM_CONFIDENCE

# build a mask for each identity that indicates if it exists or not
# in the frame
Expand Down
19 changes: 11 additions & 8 deletions src/pose_estimation/pose_est_v5.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
import typing
from pathlib import Path

import numpy as np
import h5py

from .pose_est_v4 import PoseEstimationV4

OBJECTS_STORED_YX = [
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

good note to have

'lixit',
'food_hopper',
]

class PoseEstimationV5(PoseEstimationV4):
def __init__(self, file_path: Path,
Expand Down Expand Up @@ -38,15 +42,14 @@ def __init__(self, file_path: Path,
# 'static_objects'. Currently anything else is ignored
if g == 'static_objects':
for d in pose_h5['static_objects']:
self._static_objects[d] = pose_h5['static_objects'][d][:]
static_object_data = pose_h5['static_objects'][d][:]
if d in OBJECTS_STORED_YX:
static_object_data = np.flip(static_object_data, axis=-1)
self._static_objects[d] = static_object_data

# drop "lixit" from the static objects if it is an empty array
try:
if self._static_objects['lixit'].shape[0] == 0:
del self._static_objects['lixit']
except KeyError:
# lixit was not in static objects, ignore
pass
if 'lixit' in self._static_objects and self._static_objects['lixit'].shape[0] == 0:
del self._static_objects['lixit']

@property
def format_major_version(self) -> int:
Expand Down
4 changes: 1 addition & 3 deletions src/ui/central_widget.py
Original file line number Diff line number Diff line change
Expand Up @@ -627,9 +627,7 @@ def _pixmap_clicked(self, event):
on one of the mice, make that one active
"""
if self._pose_est is not None:
# since convex hulls are represented as y, x we need to maintain
# this ordering
pt = Point(event['y'], event['x'])
pt = Point(event['x'], event['y'])
for i, ident in enumerate(self._pose_est.identities):
c_hulls = self._pose_est.get_identity_convex_hulls(ident)
curr_c_hull = c_hulls[self._curr_frame_index]
Expand Down
Loading