Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Multi anchor crop #41

Merged
merged 34 commits into from
May 14, 2024
Merged
Show file tree
Hide file tree
Changes from 25 commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
a6da501
shuffle instance order if dataset is in training mode.
aaprasad May 3, 2024
3c62687
add anchor and centroid properties
aaprasad May 3, 2024
55daaf9
simplify pose centroid computation
aaprasad May 3, 2024
035e630
abstract `MLP` into a separate `mlp` module for use in other areas of…
aaprasad May 6, 2024
1147ceb
generalize positional embeddings to work on arbitrary number of bound…
aaprasad May 6, 2024
fa52443
change logic in `Instance` and `Frame` to work with multiple bounding…
aaprasad May 6, 2024
599e66b
adapt to work with multiple boxes
aaprasad May 7, 2024
787ac15
add functionality to use multi-anchor crops
aaprasad May 7, 2024
9c0433f
lint, update tests and configs
aaprasad May 7, 2024
583aa64
add logs/checkpoints to gitignore
aaprasad May 7, 2024
a660393
abstract `MLP` into a separate `mlp` module for use in other areas of…
aaprasad May 6, 2024
e9f755a
generalize positional embeddings to work on arbitrary number of bound…
aaprasad May 6, 2024
4a8c728
change logic in `Instance` and `Frame` to work with multiple bounding…
aaprasad May 6, 2024
8ada90e
adapt to work with multiple boxes
aaprasad May 7, 2024
a8d01a3
add functionality to use multi-anchor crops
aaprasad May 7, 2024
0105c1a
lint, update tests and configs
aaprasad May 7, 2024
e5c5509
add logs/checkpoints to gitignore
aaprasad May 7, 2024
611f008
Merge branch 'main' into aadi/multi-anchor-crop
aaprasad May 7, 2024
390a564
Merge branch 'aadi/multi-anchor-crop' of https://github.com/talmolab/…
aaprasad May 7, 2024
9edc517
fix errors from merge
aaprasad May 7, 2024
2566aaa
add timm as a dependency
aaprasad May 8, 2024
6002f30
remove mutable defaults
aaprasad May 8, 2024
beaa4fc
convert grayscale to rgb and int to float
aaprasad May 8, 2024
08781ba
use `timm` for feature extractor to generalize to any # of channels
aaprasad May 8, 2024
4578eeb
lint
aaprasad May 8, 2024
68915be
adapt to multiple boxes
aaprasad May 8, 2024
12fee29
account for edge case where iou is none (ie when a box is missing in …
aaprasad May 13, 2024
45955ea
revert to old implementation of mlp
aaprasad May 13, 2024
a8f1c6f
remove use of `nn.LazyLinear`
aaprasad May 13, 2024
67292a4
fix typo, untrack checkpoint files only
aaprasad May 13, 2024
bd806e6
adjust tests
aaprasad May 13, 2024
68cf717
remove duplicates
aaprasad May 13, 2024
72af375
turn off mlp if `num_layers <=0`
aaprasad May 14, 2024
c772453
add torchvision as visual feature extractor backend
aaprasad May 14, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,8 @@ dmypy.json
# lightning
lightning_logs
outputs
logs
models

# vscode
.vscode
Expand Down
84 changes: 51 additions & 33 deletions biogtr/data_structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@ def __init__(
self,
gt_track_id: int = -1,
pred_track_id: int = -1,
bbox: ArrayLike = torch.empty((0, 4)),
crop: ArrayLike = torch.tensor([]),
bbox: ArrayLike = None,
crop: ArrayLike = None,
centroid: dict[str, ArrayLike] = None,
features: ArrayLike = torch.tensor([]),
features: ArrayLike = None,
talmo marked this conversation as resolved.
Show resolved Hide resolved
track_score: float = -1.0,
point_scores: ArrayLike = None,
instance_score: float = -1.0,
Expand Down Expand Up @@ -56,46 +56,58 @@ def __init__(
else:
self._skeleton = skeleton

if not isinstance(bbox, torch.Tensor):
if bbox is None:
self._bbox = torch.empty(1, 0, 4)

elif not isinstance(bbox, torch.Tensor):
self._bbox = torch.tensor(bbox)

else:
self._bbox = bbox

if self._bbox.shape[0] and len(self._bbox.shape) == 1:
aaprasad marked this conversation as resolved.
Show resolved Hide resolved
self._bbox = self._bbox.unsqueeze(0) # (n_anchors, 4)

if self._bbox.shape[1] and len(self._bbox.shape) == 2:
aaprasad marked this conversation as resolved.
Show resolved Hide resolved
self._bbox = self._bbox.unsqueeze(0) # (1, n_anchors, 4)

talmo marked this conversation as resolved.
Show resolved Hide resolved
if centroid is not None:
self._centroid = centroid
elif self.bbox.shape[0]:
y1, x1, y2, x2 = self.bbox.squeeze()

elif self.bbox.shape[1]:
y1, x1, y2, x2 = self.bbox.squeeze(dim=0).nanmean(dim=0)
self._centroid = {"centroid": np.array([(x1 + x2) / 2, (y1 + y2) / 2])}

else:
self._centroid = {}

if self._bbox.shape[0] and len(self._bbox.shape) == 1:
self._bbox = self._bbox.unsqueeze(0)

if not isinstance(crop, torch.Tensor):
if crop is None:
self._crop = torch.tensor([])
elif not isinstance(crop, torch.Tensor):
self._crop = torch.tensor(crop)
else:
self._crop = crop

if len(self._crop.shape) == 2:
self._crop = self._crop.unsqueeze(0).unsqueeze(0)
elif len(self._crop.shape) == 3:
self._crop = self._crop.unsqueeze(0)
if len(self._crop.shape) == 2: # (h, w)
self._crop = self._crop.unsqueeze(0) # (c, h, w)
if len(self._crop.shape) == 3:
self._crop = self._crop.unsqueeze(0) # (1, c, h, w)
aaprasad marked this conversation as resolved.
Show resolved Hide resolved

if not isinstance(features, torch.Tensor):
if features is None:
self._features = torch.tensor([])
elif not isinstance(features, torch.Tensor):
self._features = torch.tensor(features)
else:
self._features = features

if self._features.shape[0] and len(self._features.shape) == 1:
self._features = self._features.unsqueeze(0)
if self._features.shape[0] and len(self._features.shape) == 1: # (d,)
self._features = self._features.unsqueeze(0) # (1, d)
talmo marked this conversation as resolved.
Show resolved Hide resolved

if pose is not None:
self._pose = pose

elif self.bbox.shape[0]:

y1, x1, y2, x2 = self.bbox.squeeze()
elif self.bbox.shape[1]:
y1, x1, y2, x2 = self.bbox.squeeze(dim=0).mean(dim=0)
self._pose = {"centroid": np.array([(x1 + x2) / 2, (y1 + y2) / 2])}

else:
Expand Down Expand Up @@ -287,14 +299,16 @@ def bbox(self, bbox: ArrayLike) -> None:

if self._bbox.shape[0] and len(self._bbox.shape) == 1:
self._bbox = self._bbox.unsqueeze(0)
if self._bbox.shape[1] and len(self._bbox.shape) == 2:
self._bbox = self._bbox.unsqueeze(0)

def has_bbox(self) -> bool:
"""Determine if the instance has a bbox.

Returns:
True if the instance has a bounding box, false otherwise.
"""
if self._bbox.shape[0] == 0:
if self._bbox.shape[1] == 0:
return False
else:
return True
Expand All @@ -318,14 +332,15 @@ def centroid(self, centroid: dict[str, ArrayLike]) -> None:
self._centroid = centroid

@property
def anchor(self) -> str:
def anchor(self) -> list[str]:
"""The anchor node name around which the crop was formed.

Returns:
the node name of the anchor around which the crop was formed
the list of anchors around which each crop was formed
the list of anchors around which each crop was formed
"""
if self.centroid:
return list(self.centroid.keys())[0]
return list(self.centroid.keys())
return ""

@property
Expand Down Expand Up @@ -353,8 +368,8 @@ def crop(self, crop: ArrayLike) -> None:
self._crop = crop

if len(self._crop.shape) == 2:
self._crop = self._crop.unsqueeze(0).unsqueeze(0)
elif len(self._crop.shape) == 3:
self._crop = self._crop.unsqueeze(0)
if len(self._crop.shape) == 3:
self._crop = self._crop.unsqueeze(0)

def has_crop(self) -> bool:
Expand Down Expand Up @@ -528,8 +543,8 @@ def __init__(
video_id: int,
frame_id: int,
vid_file: str = "",
img_shape: ArrayLike = [0, 0, 0],
instances: List[Instance] = [],
img_shape: ArrayLike = None,
instances: List[Instance] = None,
aaprasad marked this conversation as resolved.
Show resolved Hide resolved
asso_output: ArrayLike = None,
matches: tuple = None,
traj_score: Union[ArrayLike, dict] = None,
Expand Down Expand Up @@ -559,13 +574,16 @@ def __init__(
self._video = sio.Video(vid_file)
except ValueError:
self._video = vid_file

if isinstance(img_shape, torch.Tensor):
if img_shape is None:
self._img_shape = torch.tensor([0, 0, 0])
elif isinstance(img_shape, torch.Tensor):
self._img_shape = img_shape
else:
self._img_shape = torch.tensor([img_shape])

self._instances = instances
if instances is None:
self.instances = []
else:
self._instances = instances

self._asso_output = asso_output
self._matches = matches
Expand Down Expand Up @@ -631,7 +649,7 @@ def to(self, map_location: str):
return self

def to_slp(
self, track_lookup: dict[int : sio.Track] = {}
self, track_lookup: dict[int, sio.Track] = {}
) -> tuple[sio.LabeledFrame, dict[int, sio.Track]]:
"""Convert Frame to sleap_io.LabeledFrame object.

Expand Down
132 changes: 105 additions & 27 deletions biogtr/datasets/sleap_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def __init__(
video_files: list[str],
padding: int = 5,
crop_size: int = 128,
anchor: str = "",
anchors: Union[int, list[str], str] = "",
chunk: bool = True,
clip_length: int = 500,
mode: str = "train",
Expand All @@ -39,7 +39,14 @@ def __init__(
video_files: a list of paths to video files
padding: amount of padding around object crops
crop_size: the size of the object crops
anchor: the name of the anchor keypoint to be used as centroid for cropping.
anchors: One of:
* a string indicating a single node to center crops around
* a list of skeleton node names to be used as the center of crops
* an int indicating the number of anchors to randomly select
anchors: One of:
* a string indicating a single node to center crops around
* a list of skeleton node names to be used as the center of crops
* an int indicating the number of anchors to randomly select
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Remove duplicate documentation and code for anchors parameter.

-            anchors: One of:
-                        * a string indicating a single node to center crops around
-                        * a list of skeleton node names to be used as the center of crops
-                        * an int indicating the number of anchors to randomly select

Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation.

Suggested change
anchors: One of:
* a string indicating a single node to center crops around
* a list of skeleton node names to be used as the center of crops
* an int indicating the number of anchors to randomly select
anchors: One of:
* a string indicating a single node to center crops around
* a list of skeleton node names to be used as the center of crops
* an int indicating the number of anchors to randomly select
anchors: One of:
* a string indicating a single node to center crops around
* a list of skeleton node names to be used as the center of crops
* an int indicating the number of anchors to randomly select

If unavailable then crop around the midpoint between all visible anchors.
chunk: whether or not to chunk the dataset into batches
clip_length: the number of frames in each chunk
Expand Down Expand Up @@ -78,7 +85,31 @@ def __init__(
self.mode = mode.lower()
self.n_chunks = n_chunks
self.seed = seed
self.anchor = anchor.lower()

if isinstance(anchors, int):
self.anchors = anchors
elif isinstance(anchors, str):
self.anchors = [anchors.lower()]
else:
self.anchors = [anchor.lower() for anchor in anchors]

if (
isinstance(self.anchors, list) and len(self.anchors) == 0
) or self.anchors == 0:
raise ValueError(f"Must provide at least one anchor but got {self.anchors}")

if isinstance(anchors, int):
self.anchors = anchors
elif isinstance(anchors, str):
self.anchors = [anchors.lower()]
else:
self.anchors = [anchor.lower() for anchor in anchors]

if (
isinstance(self.anchors, list) and len(self.anchors) == 0
) or self.anchors == 0:
raise ValueError(f"Must provide at least one anchor but got {self.anchors}")
Comment on lines +85 to +95
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Remove duplicated logic for setting self.anchors.

-        if isinstance(anchors, int):
-            self.anchors = anchors
-        elif isinstance(anchors, str):
-            self.anchors = [anchors.lower()]
-        else:
-            self.anchors = [anchor.lower() for anchor in anchors]
-        if (
-            isinstance(self.anchors, list) and len(self.anchors) == 0
-        ) or self.anchors == 0:
-            raise ValueError(f"Must provide at least one anchor but got {self.anchors}")

Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation.

Suggested change
if isinstance(anchors, int):
self.anchors = anchors
elif isinstance(anchors, str):
self.anchors = [anchors.lower()]
else:
self.anchors = [anchor.lower() for anchor in anchors]
if (
isinstance(self.anchors, list) and len(self.anchors) == 0
) or self.anchors == 0:
raise ValueError(f"Must provide at least one anchor but got {self.anchors}")

Comment on lines +85 to +95
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Refactor the handling of anchors to simplify the logic.

-        if isinstance(anchors, int):
-            self.anchors = anchors
-        elif isinstance(anchors, str):
-            self.anchors = [anchors.lower()]
-        else:
-            self.anchors = [anchor.lower() for anchor in anchors]
-        if (
-            isinstance(self.anchors, list) and len(self.anchors) == 0
-        ) or self.anchors == 0:
-            raise ValueError(f"Must provide at least one anchor but got {self.anchors}")
+        # Simplified handling of anchors
+        self.anchors = [anchors.lower()] if isinstance(anchors, str) else anchors
+        if not self.anchors:
+            raise ValueError("Must provide at least one anchor.")

Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation.

Suggested change
if isinstance(anchors, int):
self.anchors = anchors
elif isinstance(anchors, str):
self.anchors = [anchors.lower()]
else:
self.anchors = [anchor.lower() for anchor in anchors]
if (
isinstance(self.anchors, list) and len(self.anchors) == 0
) or self.anchors == 0:
raise ValueError(f"Must provide at least one anchor but got {self.anchors}")
# Simplified handling of anchors
self.anchors = [anchors.lower()] if isinstance(anchors, str) else anchors
if not self.anchors:
raise ValueError("Must provide at least one anchor.")


self.verbose = verbose

# if self.seed is not None:
Expand Down Expand Up @@ -165,6 +196,18 @@ def get_instances(self, label_idx: List[int], frame_idx: List[int]) -> list[dict
print(f"Could not read frame {frame_ind} from {video_name} due to {e}")
continue

if len(img.shape) == 2:
img = img.expand_dims(-1)
h, w, c = img.shape

if c == 1:
img = np.concatenate(
[img, img, img], axis=-1
) # convert to grayscale to rgb

Comment on lines +187 to +191
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Optimize the conversion of grayscale images to RGB.

-                img = np.concatenate(
-                    [img, img, img], axis=-1
-                )  # convert to grayscale to rgb
+                img = np.stack([img] * 3, axis=-1)  # convert grayscale to RGB

Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation.

Suggested change
if c == 1:
img = np.concatenate(
[img, img, img], axis=-1
) # convert to grayscale to rgb
if c == 1:
img = np.stack([img] * 3, axis=-1) # convert grayscale to RGB

if np.issubdtype(img.dtype, np.integer): # convert int to float
img = img.astype(np.float32) / 255

for instance in lf:
if instance.track is not None:
gt_track_id = video.tracks.index(instance.track)
Expand Down Expand Up @@ -247,41 +290,76 @@ def get_instances(self, label_idx: List[int], frame_idx: List[int]) -> list[dict
pose = shown_poses[j]

"""Check for anchor"""
if self.anchor == "random":
anchors = list(pose.keys()) + ["midpoint"]
anchor = np.random.choice(anchors)
elif self.anchor in pose:
anchor = self.anchor
crops = []
boxes = []
centroids = {}

if isinstance(self.anchors, int):
anchors_to_choose = list(pose.keys()) + ["midpoint"]
anchors = np.random.choice(anchors_to_choose, self.anchors)
else:
if self.verbose:
warnings.warn(
f"{self.anchor} not in {[key for key in pose.keys()]}! Defaulting to midpoint"
)
anchor = "midpoint"
anchors = self.anchors

for anchor in anchors:
if anchor == "midpoint" or anchor == "centroid":
centroid = np.nanmean(np.array(list(pose.values())), axis=0)

if anchor != "midpoint":
centroid = pose[anchor]
elif anchor in pose:
centroid = np.array(pose[anchor])
if np.isnan(centroid).any():
centroid = np.array([np.nan, np.nan])

if np.isnan(centroid).any():
elif anchor not in pose and len(anchors) == 1:
anchor = "midpoint"
centroid = np.nanmean(np.array(list(pose.values())), axis=0)
else:
# print(f'{self.anchor} not an available option amongst {pose.keys()}. Using midpoint')
centroid = np.nanmean(np.array(list(pose.values())), axis=0)

bbox = data_utils.pad_bbox(
data_utils.get_bbox(centroid, self.crop_size),
padding=self.padding,
)
elif anchor in pose:
centroid = np.array(pose[anchor])
if np.isnan(centroid).any():
centroid = np.array([np.nan, np.nan])

elif anchor not in pose and len(anchors) == 1:
anchor = "midpoint"
centroid = np.nanmean(np.array(list(pose.values())), axis=0)

else:
centroid = np.array([np.nan, np.nan])

if np.isnan(centroid).all():
bbox = torch.tensor([np.nan, np.nan, np.nan, np.nan])

else:
bbox = data_utils.pad_bbox(
data_utils.get_bbox(centroid, self.crop_size),
padding=self.padding,
)

if bbox.isnan().all():
crop = torch.zeros(
c,
self.crop_size + 2 * self.padding,
self.crop_size + 2 * self.padding,
dtype=img.dtype,
)
else:
crop = data_utils.crop_bbox(img, bbox)

crops.append(crop)
centroids[anchor] = centroid
boxes.append(bbox)

if len(crops) > 0:
crops = torch.concat(crops, dim=0)

crop = data_utils.crop_bbox(img, bbox)
if len(boxes) > 0:
boxes = torch.stack(boxes, dim=0)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Refactor the handling of anchor points to simplify the logic.

-                if isinstance(self.anchors, int):
-                    anchors_to_choose = list(pose.keys()) + ["midpoint"]
-                    anchors = np.random.choice(anchors_to_choose, self.anchors)
-                else:
-                    anchors = self.anchors
-                for anchor in anchors:
-                    if anchor == "midpoint" or anchor == "centroid":
-                        centroid = np.nanmean(np.array(list(pose.values())), axis=0)
-                    elif anchor in pose:
-                        centroid = np.array(pose[anchor])
-                        if np.isnan(centroid).any():
-                            centroid = np.array([np.nan, np.nan])
-                    elif anchor not in pose and len(anchors) == 1:
-                        anchor = "midpoint"
-                        centroid = np.nanmean(np.array(list(pose.values())), axis=0)
-                    else:
-                        centroid = np.array([np.nan, np.nan])
-                    if np.isnan(centroid).all():
-                        bbox = torch.tensor([np.nan, np.nan, np.nan, np.nan])
-                    else:
-                        bbox = data_utils.pad_bbox(
-                            data_utils.get_bbox(centroid, self.crop_size),
-                            padding=self.padding,
-                        )
-                    if bbox.isnan().all():
-                        crop = torch.zeros(
-                            c,
-                            self.crop_size + 2 * self.padding,
-                            self.crop_size + 2 * self.padding,
-                            dtype=img.dtype,
-                        )
-                    else:
-                        crop = data_utils.crop_bbox(img, bbox)
-                    crops.append(crop)
-                    centroids[anchor] = centroid
-                    boxes.append(bbox)
-                if len(crops) > 0:
-                    crops = torch.concat(crops, dim=0)
-                if len(boxes) > 0:
-                    boxes = torch.stack(boxes, dim=0)
+                # Proposed refactoring code here

Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation.

Suggested change
crops = []
boxes = []
centroids = {}
if isinstance(self.anchors, int):
anchors_to_choose = list(pose.keys()) + ["midpoint"]
anchors = np.random.choice(anchors_to_choose, self.anchors)
else:
if self.verbose:
warnings.warn(
f"{self.anchor} not in {[key for key in pose.keys()]}! Defaulting to midpoint"
)
anchor = "midpoint"
anchors = self.anchors
for anchor in anchors:
if anchor == "midpoint" or anchor == "centroid":
centroid = np.nanmean(np.array(list(pose.values())), axis=0)
if anchor != "midpoint":
centroid = pose[anchor]
elif anchor in pose:
centroid = np.array(pose[anchor])
if np.isnan(centroid).any():
centroid = np.array([np.nan, np.nan])
if np.isnan(centroid).any():
elif anchor not in pose and len(anchors) == 1:
anchor = "midpoint"
centroid = np.nanmean(np.array(list(pose.values())), axis=0)
else:
# print(f'{self.anchor} not an available option amongst {pose.keys()}. Using midpoint')
centroid = np.nanmean(np.array(list(pose.values())), axis=0)
bbox = data_utils.pad_bbox(
data_utils.get_bbox(centroid, self.crop_size),
padding=self.padding,
)
elif anchor in pose:
centroid = np.array(pose[anchor])
if np.isnan(centroid).any():
centroid = np.array([np.nan, np.nan])
elif anchor not in pose and len(anchors) == 1:
anchor = "midpoint"
centroid = np.nanmean(np.array(list(pose.values())), axis=0)
else:
centroid = np.array([np.nan, np.nan])
if np.isnan(centroid).all():
bbox = torch.tensor([np.nan, np.nan, np.nan, np.nan])
else:
bbox = data_utils.pad_bbox(
data_utils.get_bbox(centroid, self.crop_size),
padding=self.padding,
)
if bbox.isnan().all():
crop = torch.zeros(
c,
self.crop_size + 2 * self.padding,
self.crop_size + 2 * self.padding,
dtype=img.dtype,
)
else:
crop = data_utils.crop_bbox(img, bbox)
crops.append(crop)
centroids[anchor] = centroid
boxes.append(bbox)
if len(crops) > 0:
crops = torch.concat(crops, dim=0)
crop = data_utils.crop_bbox(img, bbox)
if len(boxes) > 0:
boxes = torch.stack(boxes, dim=0)
crops = []
boxes = []
centroids = {}
# Proposed refactoring code here


instance = Instance(
gt_track_id=gt_track_ids[j],
pred_track_id=-1,
crop=crop,
centroid={anchor: centroid},
bbox=bbox,
crop=crops,
centroid=centroids,
bbox=boxes,
skeleton=skeleton,
pose=poses[j],
point_scores=point_scores[j],
Expand Down
Loading
Loading