Skip to content
Permalink
Browse files

init release

  • Loading branch information...
Guanghan committed May 9, 2019
1 parent 2163c60 commit 2ed72ed7fffbe39ab58f29a42c81be180225b2aa
Showing with 64,876 additions and 0 deletions.
  1. +113 −0 HPE/AllJoints_COCO.py
  2. +176 −0 HPE/AllJoints_PoseTrack.py
  3. +177 −0 HPE/cal_posetrack_means.py
  4. +114 −0 HPE/config.py
  5. +115 −0 HPE/config_train.py
  6. +271 −0 HPE/dataset.py
  7. +26 −0 HPE/merge_PoseTrack_jsons.py
  8. +193 −0 HPE/train_PoseTrack_COCO_17_CPN_res101.py
  9. +226 −0 HPE/train_PoseTrack_COCO_17_MSRA152.py
  10. +160 −0 HPE/train_PoseTrack_COCO_17_mobile_deconv.py
  11. +402 −0 README.md
  12. +2 −0 data/demo/download_demo_video.sh
  13. +72 −0 data/download_coco.sh
  14. +64 −0 data/download_coco_test.sh
  15. +7 −0 data/download_dets.sh
  16. +14 −0 data/download_posetrack17.sh
  17. +14 −0 data/download_posetrack18.sh
  18. BIN demo/10F.gif
  19. BIN demo/10F_mp.gif
  20. BIN demo/1stF.gif
  21. BIN demo/demo.gif
  22. BIN demo/demo2.gif
  23. BIN demo/demo_big.gif
  24. BIN demo/logo.png
  25. BIN demo/overview.png
  26. +851 −0 demo_video_mobile.py
  27. +206 −0 detector/config/yolov3-tiny.cfg
  28. +788 −0 detector/config/yolov3.cfg
  29. +366 −0 detector/detector_utils.py
  30. +94 −0 detector/detector_yolov3.py
  31. +358 −0 detector/models.py
  32. +36 −0 detector/parse_config.py
  33. +131 −0 environment.yml
  34. +14 −0 graph/config/inference.yaml
  35. +32 −0 graph/config/train.yaml
  36. +32 −0 graph/config/train_naive.yaml
  37. +29 −0 graph/config/train_triplet.yaml
  38. +122 −0 graph/gcn_utils/contrastive.py
  39. +91 −0 graph/gcn_utils/feeder.py
  40. +134 −0 graph/gcn_utils/feeder_random_negative.py
  41. +92 −0 graph/gcn_utils/feeder_triplet.py
  42. +75 −0 graph/gcn_utils/gcn_block.py
  43. +179 −0 graph/gcn_utils/gcn_model.py
  44. +142 −0 graph/gcn_utils/graph.py
  45. +121 −0 graph/gcn_utils/io.py
  46. +170 −0 graph/gcn_utils/keypoints_to_graph.py
  47. +188 −0 graph/gcn_utils/keypoints_to_graph_hard_neg.py
  48. +213 −0 graph/gcn_utils/keypoints_to_graph_hard_pos.py
  49. +144 −0 graph/gcn_utils/keypoints_to_graph_negative.py
  50. +190 −0 graph/gcn_utils/keypoints_to_graph_triplet.py
  51. +56 −0 graph/gcn_utils/naive_model.py
  52. +191 −0 graph/gcn_utils/processor_base.py
  53. +213 −0 graph/gcn_utils/processor_siamese_gcn.py
  54. +204 −0 graph/gcn_utils/tools.py
  55. +32 −0 graph/main.py
  56. +8 −0 graph/torchlight/setup.py
  57. +8 −0 graph/torchlight/torchlight/__init__.py
  58. +35 −0 graph/torchlight/torchlight/gpu.py
  59. +203 −0 graph/torchlight/torchlight/io.py
  60. +2 −0 graph/unit_test/__init__.py
  61. +41 −0 graph/unit_test/test_graph.py
  62. +48 −0 graph/unit_test/test_keypoints_to_graph.py
  63. +48 −0 graph/unit_test/test_keypoints_to_graph_hard_neg.py
  64. +48 −0 graph/unit_test/test_keypoints_to_graph_hard_pos.py
  65. +48 −0 graph/unit_test/test_keypoints_to_graph_negative.py
  66. +48 −0 graph/unit_test/test_keypoints_to_graph_triplet.py
  67. +304 −0 graph/visualize_pose_matching.py
  68. +293 −0 graph/visualize_pose_matching_diff_persons.py
  69. +250 −0 jsonformat_std_to_posetrack18.py
  70. +7 −0 lib/Makefile
  71. +3 −0 lib/__init__.py
  72. +5 −0 lib/lib_kernel/lib_nms/__init__.py
  73. +6 −0 lib/lib_kernel/lib_nms/compile.sh
  74. +8 −0 lib/lib_kernel/lib_nms/nms/__init__.py
  75. +9,481 −0 lib/lib_kernel/lib_nms/nms/cpu_nms.c
  76. +163 −0 lib/lib_kernel/lib_nms/nms/cpu_nms.pyx
  77. +6,838 −0 lib/lib_kernel/lib_nms/nms/gpu_nms.cpp
  78. +2 −0 lib/lib_kernel/lib_nms/nms/gpu_nms.hpp
  79. +31 −0 lib/lib_kernel/lib_nms/nms/gpu_nms.pyx
  80. +144 −0 lib/lib_kernel/lib_nms/nms/gpu_nms_kernel.cu
  81. +93 −0 lib/lib_kernel/lib_nms/nms_op.cc
  82. +99 −0 lib/lib_kernel/lib_nms/nms_op.cu.cc
  83. +33 −0 lib/lib_kernel/lib_nms/nms_op.h
  84. +40 −0 lib/lib_kernel/lib_nms/nms_opr.py
  85. +149 −0 lib/lib_kernel/lib_nms/setup.py
  86. 0 lib/nets/__init__.py
  87. +172 −0 lib/nets/basemodel.py
  88. +484 −0 lib/nets/mobilenet_v1.py
  89. +286 −0 lib/nets/resnet_utils.py
  90. +330 −0 lib/nets/resnet_v1.py
  91. 0 lib/nms/.gitignore
  92. 0 lib/nms/__init__.py
  93. +6,869 −0 lib/nms/cpu_nms.c
  94. +68 −0 lib/nms/cpu_nms.pyx
  95. +6,391 −0 lib/nms/gpu_nms.cpp
  96. +2 −0 lib/nms/gpu_nms.hpp
  97. +31 −0 lib/nms/gpu_nms.pyx
  98. +144 −0 lib/nms/nms_kernel.cu
  99. +38 −0 lib/nms/py_cpu_nms.py
  100. +150 −0 lib/setup.py
  101. 0 lib/tfflat/__init__.py
  102. +429 −0 lib/tfflat/base.py
  103. +25 −0 lib/tfflat/config.py
  104. +372 −0 lib/tfflat/data_provider.py
  105. +38 −0 lib/tfflat/dpflow.py
  106. +49 −0 lib/tfflat/logger.py
  107. +119 −0 lib/tfflat/mp_utils.py
  108. +118 −0 lib/tfflat/net_utils.py
  109. +59 −0 lib/tfflat/saver.py
  110. +78 −0 lib/tfflat/serialize.py
  111. +38 −0 lib/tfflat/timer.py
  112. +52 −0 lib/tfflat/utils.py
  113. +3 −0 lib/utils/__init__.py
  114. +8,258 −0 lib/utils/bbox.c
  115. +144 −0 lib/utils/bbox.pyx
  116. +47 −0 lib/utils/blob.py
  117. +73 −0 lib/utils/boxes_grid.py
  118. +40 −0 lib/utils/mask.py
  119. +8,493 −0 lib/utils/nms.c
  120. +37 −0 lib/utils/nms.py
  121. +123 −0 lib/utils/nms.pyx
  122. +38 −0 lib/utils/timer.py
  123. +150 −0 lib/utils/visualize.py
  124. +180 −0 network_CPN101.py
  125. +150 −0 network_MSRA152.py
  126. +148 −0 network_mobile_deconv.py
  127. +815 −0 process_posetrack18_with_lighttrack_CPN101.py
  128. +816 −0 process_posetrack18_with_lighttrack_MSRA152.py
  129. +36 −0 unit_test/test_all.py
  130. +27 −0 unit_test/test_detection_visualizer.py
  131. +27 −0 unit_test/test_keypoint_visualizer.py
  132. +54 −0 unit_test/test_poly_visualizer.py
  133. +50 −0 unit_test/test_temp.py
  134. +46 −0 unit_test/test_utils_convert_heatmap.py
  135. +51 −0 unit_test/test_utils_io_file.py
  136. +119 −0 unit_test/test_utils_io_folder.py
  137. +182 −0 unit_test/test_utils_json.py
  138. +32 −0 unit_test/test_utils_natural_sort.py
  139. +71 −0 unit_test/test_utils_nms.py
  140. +90 −0 unit_test/test_visualizer.py
  141. +74 −0 utils/standard_classes.py
  142. +211 −0 utils/utils_convert_heatmap.py
  143. +375 −0 utils/utils_heatmap.py
  144. +115 −0 utils/utils_io_file.py
  145. +54 −0 utils/utils_io_folder.py
  146. +34 −0 utils/utils_json.py
  147. +18 −0 utils/utils_natural_sort.py
  148. +227 −0 utils/utils_nms.py
  149. +377 −0 utils/utils_pose.py
  150. +149 −0 visualizer/detection_visualizer.py
  151. +184 −0 visualizer/keypoint_visualizer.py
  152. +153 −0 visualizer/poly_visualizer.py
  153. +118 −0 visualizer/visualizer.py
  154. +19 −0 weights/download_weights.sh
@@ -0,0 +1,113 @@
#!/usr/bin/python3
# coding=utf-8

'''
Author: Guanghan Ning
E-mail: guanghan.ning@jd.com
Adapted from: https://github.com/chenyilun95/tf-cpn/blob/master/data/COCO/COCOAllJoints.py
'''
import os
import os.path as osp
import numpy as np
import cv2

import sys
cur_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.join('../data/COCO', 'MSCOCO', 'PythonAPI'))

from pycocotools.coco import COCO

class PoseTrackJoints_COCO(object):
def __init__(self):
self.kp_names = ['nose', 'l_eye', 'r_eye', 'l_ear', 'r_ear', 'l_shoulder',
'r_shoulder', 'l_elbow', 'r_elbow', 'l_wrist', 'r_wrist',
'l_hip', 'r_hip', 'l_knee', 'r_knee', 'l_ankle', 'r_ankle']
self.max_num_joints = 17
self.color = np.random.randint(0, 256, (self.max_num_joints, 3))

self.mpi = []
self.test_mpi = []
for mpi, stage in zip([self.mpi, self.test_mpi], ['train', 'val']):
if stage == 'train':
self._train_gt_path=os.path.join('../data/COCO', 'MSCOCO', 'annotations', 'person_keypoints_trainvalminusminival2014.json')

coco = COCO(self._train_gt_path)
else:
self._val_gt_path=os.path.join('../data/COCO', 'MSCOCO', 'annotations', 'person_keypoints_minival2014.json')

coco = COCO(self._val_gt_path)
if stage == 'train':
for aid in coco.anns.keys():
ann = coco.anns[aid]
if ann['image_id'] not in coco.imgs or ann['image_id'] == '366379':
continue
imgname = coco.imgs[ann['image_id']]['file_name']
prefix_head = "../data/COCO/MSCOCO/images/"
prefix = 'val' if 'val' in imgname else 'train'
rect = np.array([0, 0, 1, 1], np.int32)
if ann['iscrowd']:
continue
joints = ann['keypoints']

''' change the COCO order into PoseTrack order'''
joints = change_order_COCO_to_PoseTrack(joints)

bbox = ann['bbox']
if np.sum(joints[2::3]) == 0 or ann['num_keypoints'] == 0 :
continue
imgname = prefix_head + prefix + '2014/' + 'COCO_' + prefix + '2014' + '_' + str(ann['image_id']).zfill(12) + '.jpg'
humanData = dict(aid = aid,joints=joints, imgpath=imgname, headRect=rect, bbox=bbox, imgid = ann['image_id'], segmentation = ann['segmentation'])
mpi.append(humanData)
elif stage == 'val':
files = [(img_id,coco.imgs[img_id]) for img_id in coco.imgs]
for img_id, img_info in files:
imgname = stage + '2014/' + img_info['file_name']
humanData = dict(imgid = img_id,imgpath = imgname)
mpi.append(humanData)
else:
print('PoseTrack_COCO data error, please check')
embed()

def load_data(self, min_kps=1):
mpi = [i for i in self.mpi if np.sum(np.array(i['joints'], copy=False)[2::3] > 0) >= min_kps]
return mpi, self.test_mpi


def change_order_COCO_to_PoseTrack(pose_keypoints_2d_COCO):
#COCO: {0-nose 1-Leye 2-Reye 3-Lear 4Rear 5-Lsho 6-Rsho 7-Lelb 8-Relb 9-Lwri 10-Rwri 11-Lhip 12-Rhip 13-Lkne 14-Rkne 15-Lank 16-Rank} 
#Posetrack: {0-Rank 1-Rkne 2-Rhip 3-Lhip 4-Lkne 5-Lank 6-Rwri 7-Relb 8-Rsho 9-Lsho 10-Lelb 11-Lwri 12-neck 13-nose 14-TopHead}

order_mapping = {0:13, 1:14, 2:14, 3:14, 4:14, 5:9, 7:10, 9:11, 6:8, 8:7, 10:6, 11:3, 13:4, 15:5, 12:2, 14:1, 16:0}

num_keypoints_COCO = int(len(pose_keypoints_2d_COCO)/3)
pose_keypoints_2d_PoseTrack = 15*3*[0]

for index_COCO in range(num_keypoints_COCO):
x = pose_keypoints_2d_COCO[3*index_COCO]
y = pose_keypoints_2d_COCO[3*index_COCO + 1]
score = pose_keypoints_2d_COCO[3*index_COCO + 2]
index_PoseTrack = order_mapping[index_COCO]

if index_PoseTrack == 12:
continue
elif index_PoseTrack == 14:
continue
else:
pose_keypoints_2d_PoseTrack[3*index_PoseTrack] = x
pose_keypoints_2d_PoseTrack[3*index_PoseTrack +1] = y
pose_keypoints_2d_PoseTrack[3*index_PoseTrack +2] = score

pose_keypoints_2d_PoseTrack[3*12] = (pose_keypoints_2d_COCO[3*5] + pose_keypoints_2d_COCO[3*6])/2
pose_keypoints_2d_PoseTrack[3*12+1] = (pose_keypoints_2d_COCO[3*5 +1] + pose_keypoints_2d_COCO[3*6 +1])/2
pose_keypoints_2d_PoseTrack[3*12+2] = (pose_keypoints_2d_COCO[3*5 +2] + pose_keypoints_2d_COCO[3*6 +2])/2

pose_keypoints_2d_PoseTrack[3*14] = (pose_keypoints_2d_COCO[3*1] + pose_keypoints_2d_COCO[3*2])/2
pose_keypoints_2d_PoseTrack[3*14+1] = 2 * pose_keypoints_2d_PoseTrack[3*13+1] - pose_keypoints_2d_PoseTrack[3*12+1]
pose_keypoints_2d_PoseTrack[3*14+2] = (pose_keypoints_2d_COCO[3*1 +2] + pose_keypoints_2d_COCO[3*2 +2])/2
return pose_keypoints_2d_PoseTrack


if __name__ == '__main__':
coco_joints = PoseTrackJoints_COCO()
train, test = coco_joints.load_data(min_kps=1)
from IPython import embed; embed()
@@ -0,0 +1,176 @@
#!/usr/bin/python3
# coding=utf-8

'''
Author: Guanghan Ning
E-mail: guanghan.ning@jd.com
'''
import os
import os.path as osp
import numpy as np
import cv2

import sys
cur_dir = os.path.dirname(__file__)

from utils_json import read_json_from_file

class PoseTrackJoints(object):
def __init__(self):
#{0-Rank 1-Rkne 2-Rhip 3-Lhip 4-Lkne 5-Lank 6-Rwri 7-Relb 8-Rsho 9-Lsho 10-Lelb 11-Lwri 12-neck 13-nose 14-TopHead}
self.kp_names = ['right_ankle', 'right_knee', 'right_pelvis',
'left_pelvis', 'left_knee', 'left_ankle',
'right_wrist', 'right_elbow', 'right_shoulder',
'left_shoulder', 'left_elbow', 'left_wrist',
'upper_neck', 'nose', 'head']
self.max_num_joints = 15
self.color = np.random.randint(0, 256, (self.max_num_joints, 3))

self.posetrack = []
self.test_posetrack = []
for posetrack, stage in zip([self.posetrack, self.test_posetrack], ['train', 'val']):
if stage == 'train':
self._train_gt_path = "posetrack_merged_train.json"
gt_python_data = read_json_from_file(self._train_gt_path)
anns = gt_python_data["annolist"]
else:
self._val_gt_path = "posetrack_merged_val.json"
gt_python_data = read_json_from_file(self._val_gt_path)
anns = gt_python_data["annolist"]

if stage == 'train':
for aid, ann in enumerate(anns):
if ann["is_labeled"][0] == 0: continue
if not ann["annorect"]: #if it is empty
continue

num_candidates = len(ann["annorect"])
for candidate_id in range(0, num_candidates):
if not ann["annorect"][candidate_id]["annopoints"]: continue #list is empty

# (1) bbox
bbox = get_bbox_from_keypoints(ann["annorect"][candidate_id]["annopoints"][0]["point"])
bbox = x1y1x2y2_to_xywh(bbox)
if bbox == [0, 0, 2, 2]: continue

# (2) imgpath
imgname = ann["image"][0]["name"]
prefix = '../data/posetrack_data/'
imgpath = os.path.join(prefix, imgname)

# (2) joints
joints = get_joints_from_ann(ann["annorect"][candidate_id]["annopoints"][0]["point"])
num_points = len(ann["annorect"][candidate_id]["annopoints"][0]["point"])
if np.sum(joints[2::3]) == 0 or num_points== 0:
continue

# (4) head_rect: useless
rect = np.array([0, 0, 1, 1], np.int32)

''' This [humanData] is what [load_data] will provide '''
humanData = dict(aid = aid, joints=joints, imgpath=imgpath, headRect=rect, bbox=bbox, imgid = ann['imgnum'][0])

posetrack.append(humanData)
elif stage == 'val':
for aid, ann in enumerate(anns):
if ann["is_labeled"][0] == 0: continue
if not ann["annorect"]: #if it is empty
continue

num_candidates = len(ann["annorect"])
for candidate_id in range(0, num_candidates):
if not ann["annorect"][candidate_id]["annopoints"]: continue #list is empty

imgname = ann["image"][0]["name"]
prefix = '../data/posetrack_data/'
imgpath = os.path.join(prefix, imgname)

humanData = dict(imgid = aid, imgpath = imgpath)
posetrack.append(humanData)
else:
print('PoseTrack data error, please check')
embed()

def load_data(self, min_kps=1):
posetrack = [i for i in self.posetrack if np.sum(np.array(i['joints'], copy=False)[2::3] > 0) >= min_kps]
return posetrack, self.test_posetrack


def get_joints_from_ann(keypoints_python_data):
num_keypoints = len(keypoints_python_data)
keypoints_dict = {}
for pid in range(num_keypoints):
keypoint_id = keypoints_python_data[pid]["id"][0]
x = int(keypoints_python_data[pid]["x"][0])
y = int(keypoints_python_data[pid]["y"][0])
vis = int(keypoints_python_data[pid]["is_visible"][0]) + 1

keypoints_dict[keypoint_id] = [x, y, vis]

for i in range(15):
if i not in keypoints_dict.keys():
keypoints_dict[i] = [0, 0, 0] #Should we set them to zero? Yes! COCO dataset did this too.

keypoints_list = []
for i in range(15):
keypoints = keypoints_dict[i]
keypoints_list.append(keypoints[0])
keypoints_list.append(keypoints[1])
keypoints_list.append(keypoints[2])
return keypoints_list


def x1y1x2y2_to_xywh(det):
x1, y1, x2, y2 = det
w, h = int(x2) - int(x1), int(y2) - int(y1)
return [x1, y1, w, h]


def get_bbox_from_keypoints(keypoints_python_data):
num_keypoints = len(keypoints_python_data)
x_list = []
y_list = []
for keypoint_id in range(num_keypoints):
x = keypoints_python_data[keypoint_id]["x"][0]
y = keypoints_python_data[keypoint_id]["y"][0]
x_list.append(x)
y_list.append(y)
min_x = min(x_list)
min_y = min(y_list)
max_x = max(x_list)
max_y = max(y_list)

scale = 0.2 # enlarge bbox by 20% with same center position
bbox = enlarge_bbox([min_x, min_y, max_x, max_y], scale)
return bbox


def enlarge_bbox(bbox, scale):
assert(scale > 0)
min_x, min_y, max_x, max_y = bbox
margin_x = int(0.5 * scale * (max_x - min_x))
margin_y = int(0.5 * scale * (max_y - min_y))
if margin_x < 0: margin_x = 2
if margin_y < 0: margin_y = 2

min_x -= margin_x
max_x += margin_x
min_y -= margin_y
max_y += margin_y

width = max_x - min_x
height = max_y - min_y
if max_y < 0 or max_x < 0 or width <= 0 or height <= 0 or width > 2000 or height > 2000:
min_x=0
max_x=2
min_y=0
max_y=2

bbox_enlarged = [min_x, min_y, max_x, max_y]
return bbox_enlarged


if __name__ == '__main__':
joints = PoseTrackJoints()
train, test = joints.load_data(min_kps=1)
from IPython import embed; embed()

0 comments on commit 2ed72ed

Please sign in to comment.
You can’t perform that action at this time.