-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
executable file
·178 lines (144 loc) · 6.15 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
import argparse
import torch
import torchvision.transforms as transforms
import pathlib
import os
import json
from Panoptic_configs.config import config
from Panoptic_configs.config import update_config
from Panoptic_dataset_class.panoptic import Panoptic
from Panoptic_dataset_class.utils.vis import save_origin_img, save_annotations_vis_img
'''
ANNOTATION_BUILDER_LIST = [
'160422_ultimatum1',
'160224_haggling1',
'160226_haggling1',
'161202_haggling1',
'160906_ian1',
'160906_ian2',
'160906_ian3',
'160906_band1',
'160906_band2',
'160906_band3',
'160906_pizza1',
'160422_haggling1',
'160906_ian5',
'160906_band4'
]
'''
def parse_args():
parser = argparse.ArgumentParser(
description='Yolo-x model train/inference')
# Path
parser.add_argument('--panoptic_config_file_path', help='Panoptic config file path', default='./Panoptic_configs/Panoptic_annotations_builder_config.yaml')
args = parser.parse_args()
return args
def main():
# Args parsing
args = parse_args()
# Update config
update_config(args.panoptic_config_file_path)
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
gpus = [int(i) for i in config.GPUS.split(',')]
ANNOTATION_BUILDER_LIST = config.DATASET.SEQ_LIST
cam_list = config.DATASET.CAMERA_LIST
for seq in ANNOTATION_BUILDER_LIST:
# Load Panoptic
panoptic_dataset = Panoptic( \
config, config.DATASET.TEST_SUBSET, False, seq, \
cam_list = cam_list, \
interval = 1, \
transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
)
# Batch loader
loader = torch.utils.data.DataLoader(
panoptic_dataset,
batch_size=config.TEST.BATCH_SIZE * len(gpus),
shuffle=False,
num_workers=config.WORKERS,
pin_memory=True)
# Save data
with torch.no_grad():
for b, batch_data in enumerate(loader):
inputs, targets_2d, weights_2d, targets_3d, metas, input_heatmap = batch_data
for view_num, (meta, input) in enumerate(zip(metas, inputs)):
# Annotation
data = {}
data['bodies'] = []
# Path
seq_name = meta['seq'][0]
image_id = meta['image'][0].split('_')[-1][:-4]
view_id = meta['view_id'][0]
file_name = view_id+'_'+str(image_id)
dir_path = os.path.join('.', config.OUTPUT_DIR, seq_name, view_id)
# Make dir
pathlib.Path(os.path.join(dir_path, 'annotations')).mkdir(parents=True, exist_ok=True)
pathlib.Path(os.path.join(dir_path, 'origin_images')).mkdir(parents=True, exist_ok=True)
if config.TEST.SAVE_VIS_IMAGE:
pathlib.Path(os.path.join(dir_path, 'vis_images')).mkdir(parents=True, exist_ok=True)
for id_num in meta['id']:
id_num = int(id_num.cpu().numpy())
# Data list
transformed_joints_3d = []
transformed_joints_3d_vis = []
projected_joints_2d = []
projected_joints_2d_vis = []
bb = []
bb_clip = []
vis_bb = []
vis_bb_clip = []
# 3D keypoints
for (j, vis) in zip(meta['joints_3d'][0][id_num], meta['joints_3d_vis'][0][id_num]):
transformed_joints_3d += [float(j[0].cpu().numpy()), float(j[1].cpu().numpy()), float(j[2].cpu().numpy())]
transformed_joints_3d_vis += [float(vis[0].cpu().numpy())]
# 2D keypoints
for (j, vis) in zip(meta['joints'][0][id_num], meta['joints_vis'][0][id_num]):
projected_joints_2d += [float(j[0].cpu().numpy()), float(j[1].cpu().numpy())]
projected_joints_2d_vis += [float(vis[0].cpu().numpy())]
# 2D bounding boxes
box = meta['bounding_boxes'][0][id_num]
bb += [float(box[0].cpu().numpy()), float(box[1].cpu().numpy()), float(box[2].cpu().numpy()), float(box[3].cpu().numpy())]
box_clip = meta['bounding_boxes_clip'][0][id_num]
bb_clip += [float(box_clip[0].cpu().numpy()), float(box_clip[1].cpu().numpy()), float(box_clip[2].cpu().numpy()), float(box_clip[3].cpu().numpy())]
vis_box = meta['vis_bounding_boxes'][0][id_num]
vis_bb += [float(vis_box[0].cpu().numpy()), float(vis_box[1].cpu().numpy()), float(vis_box[2].cpu().numpy()), float(vis_box[3].cpu().numpy())]
vis_box_clip = meta['vis_bounding_boxes_clip'][0][id_num]
vis_bb_clip += [float(vis_box_clip[0].cpu().numpy()), float(vis_box_clip[1].cpu().numpy()), float(vis_box_clip[2].cpu().numpy()), float(vis_box_clip[3].cpu().numpy())]
data['bodies'].append({
"view_id": view_id,
"id": int(id_num),
"num_person": int(meta['num_person'].cpu().numpy()),
"input_width": input.cpu().numpy().shape[-1],
"input_height": input.cpu().numpy().shape[-2],
"transformed_joints_3d": transformed_joints_3d,
"transformed_joints_3d_vis": transformed_joints_3d_vis,
"projected_joints_2d": projected_joints_2d,
"projected_joints_2d_vis": projected_joints_2d_vis,
"bbox": bb,
"bbox_clip": bb_clip,
"vis_bbox": vis_bb,
"vis_bbox_clip": vis_bb_clip
})
# Save annotations
with open(os.path.join(dir_path, 'annotations', '{}_gt.json'.format(file_name)), 'w') as outfile:
json.dump(data, outfile)
#print('Save anno file:', os.path.join(dir_path, 'annotations', '{}_gt.json'.format(file_name)))
# Save GT visualization img
save_origin_img(dir_path, file_name, input)
if config.TEST.SAVE_VIS_IMAGE:
save_annotations_vis_img(dir_path, file_name, input, meta)
if len(metas) > 0 and b % 100 == 0:
print(f"{seq}: {meta['seq'][0]}, idx: {b}, image_id: {meta['image'][0]}")
# Save camera
cam_file = os.path.join('.', config.DATASET.ROOT, seq, 'calibration_{:s}.json'.format(seq))
with open(cam_file) as cfile:
calib = json.load(cfile)
with open(os.path.join('.', config.OUTPUT_DIR, seq, 'calibration_{:s}.json'.format(seq)), 'w') as outfile:
json.dump(calib, outfile)
print('Save camera json file:', os.path.join('.', config.OUTPUT_DIR, seq, 'calibration_{:s}.json'.format(seq)))
if __name__ == '__main__':
main()