Skip to content

Commit

Permalink
Optimize the example of yolov5.
Browse files Browse the repository at this point in the history
  • Loading branch information
raul.rao committed Aug 23, 2021
1 parent a11f836 commit 39d2824
Show file tree
Hide file tree
Showing 5 changed files with 22 additions and 62 deletions.
12 changes: 3 additions & 9 deletions examples/onnx/yolov5/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@

`python export.py --weight yolov5s.pt`

注:yolov5工程需要使用pytorch 1.8.0 或 1.9.0 版本才能正常导出,导出的opset version使用默认的即可。

3. 将导出的onnx模型复制到该demo目录下,执行命令会绘出两个检测结果窗口。

`python test.py`
Expand All @@ -18,15 +20,7 @@

1. 切换成自己训练的模型时,请注意对齐anchor等后处理参数,否则会导致后处理解析出错。

2. 最新版本的yolov5模型得到的结果包含两部分:

A部分:经模型后处理完成的结果。对应 ‘direct result’ 的绘图窗口。

B部分:未经模型后处理的结果。对应 ‘full post process result’ 的绘图窗口。

3. 在不进行量化的情况下,使用任意一种结果都可以得到正确的结果。

4. 在进行**量化**的情况下,在A部分结果中,坐标的数值范围为[0,img_size],而置信度的数值范围为[0,1],量化过程中置信度的值会由于尺度太小,与坐标 concat 到同一个 tensor 时造成置信度值的精度丢失,所以模型量化后不可直接使用A部分结果,**只能自己根据B部分结果进行后处理得到正确的值**。<u>**这个特性是量化本身的性质导致的,用户在使用过程中也应当注意这种不同尺度数据在同个tensor里面时,量化操作会导致严重的精度丢失问题。**</u>
2. 测试代码导出模型的时候指定了输出节点['396', '458', '520'],分别为原模型的第2、3、4输出节点的去掉 transpose 算子部分,此举是为了兼容 rknpu/rknn/rknn_api/examples 的示例。实际部署代码请灵活处理,该demo只是提供了其中一种方式,并不是唯一的。对于其他模型,如yolov5m, yolov5l 等,请使用工具自行查找对应输出节点的名称,这里推荐使用可视化工具 netron。



Binary file added examples/onnx/yolov5/bus.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 1 addition & 1 deletion examples/onnx/yolov5/dataset.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
dog_bike_car_416x416.jpg
bus.jpg
Binary file removed examples/onnx/yolov5/dog_bike_car_416x416.jpg
Binary file not shown.
70 changes: 18 additions & 52 deletions examples/onnx/yolov5/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,12 @@


ONNX_MODEL = 'yolov5s.onnx'
RKNN_MODEL = 'yolov5.rknn'
IMG_PATH = './dog_bike_car_416x416.jpg'
RKNN_MODEL = 'yolov5s.rknn'
IMG_PATH = './bus.jpg'
DATASET = './dataset.txt'

QUANTIZE_ON = True

OBJ_THRESH = 0.5
NMS_THRESH = 0.6
IMG_SIZE = 640
Expand Down Expand Up @@ -126,38 +128,8 @@ def nms_boxes(boxes, scores):
keep = np.array(keep)
return keep

def yolov5_post_process_simple(prediction):
nc = prediction.shape[2] - 5
xc = prediction[..., 4] > OBJ_THRESH
valid_object = prediction[xc]
valid_object[:,5:] *= valid_object[:,4:5]

boxes = xywh2xyxy(valid_object[:,:4])
best_score_class = np.max(valid_object[:,5:],axis=-1)
box_classes = np.argmax(valid_object[:,5:], axis=-1)

nboxes, nclasses, nscores = [], [], []
for c in set(box_classes):
inds = np.where(box_classes == c)
b = boxes[inds]
c = box_classes[inds]
s = best_score_class[inds]

keep = nms_boxes(b, s)
nboxes.append(b[keep])
nclasses.append(c[keep])
nscores.append(s[keep])

if not nclasses and not nscores:
return None, None, None

boxes = np.concatenate(nboxes)
classes = np.concatenate(nclasses)
scores = np.concatenate(nscores)

return boxes, classes, scores

def yolov5_post_process_full(input_data):
def yolov5_post_process(input_data):
masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
anchors = [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45],
[59, 119], [116, 90], [156, 198], [373, 326]]
Expand Down Expand Up @@ -262,20 +234,22 @@ def letterbox(im, new_shape=(640, 640), color=(0, 0, 0)):
rknn.config(reorder_channel='0 1 2',
mean_values=[[0, 0, 0]],
std_values=[[255, 255, 255]],
optimization_level=3)
optimization_level=3,
target_platform = 'rk1808',
output_optimize=1)
print('done')

# Load ONNX model
print('--> Loading model')
ret = rknn.load_onnx(model=ONNX_MODEL)
ret = rknn.load_onnx(model=ONNX_MODEL,outputs=['396', '458', '520'])
if ret != 0:
print('Load yolov5 failed!')
exit(ret)
print('done')

# Build model
print('--> Building model')
ret = rknn.build(do_quantization=False, dataset=DATASET)
ret = rknn.build(do_quantization=QUANTIZE_ON, dataset=DATASET)
if ret != 0:
print('Build yolov5 failed!')
exit(ret)
Expand All @@ -291,8 +265,8 @@ def letterbox(im, new_shape=(640, 640), color=(0, 0, 0)):

# init runtime environment
print('--> Init runtime environment')
# ret = rknn.init_runtime()
ret = rknn.init_runtime('rk1808')
ret = rknn.init_runtime()
# ret = rknn.init_runtime('rk1808', device_id='1808')
if ret != 0:
print('Init runtime environment failed')
exit(ret)
Expand All @@ -306,19 +280,11 @@ def letterbox(im, new_shape=(640, 640), color=(0, 0, 0)):
# Inference
print('--> Running model')
outputs = rknn.inference(inputs=[img])

# simple post process
boxes, classes, scores = yolov5_post_process_simple(outputs[0])

img_0 = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
if boxes is not None:
draw(img_0, boxes, scores, classes)
cv2.imshow("direct result", img_0)

# full post process
input0_data = outputs[1].transpose(0,1,4,2,3)
input1_data = outputs[2].transpose(0,1,4,2,3)
input2_data = outputs[3].transpose(0,1,4,2,3)
# post process
input0_data = outputs[0]
input1_data = outputs[1]
input2_data = outputs[2]

input0_data = input0_data.reshape(*input0_data.shape[1:])
input1_data = input1_data.reshape(*input1_data.shape[1:])
Expand All @@ -329,12 +295,12 @@ def letterbox(im, new_shape=(640, 640), color=(0, 0, 0)):
input_data.append(np.transpose(input1_data, (2, 3, 0, 1)))
input_data.append(np.transpose(input2_data, (2, 3, 0, 1)))

boxes, classes, scores = yolov5_post_process_full(input_data)
boxes, classes, scores = yolov5_post_process(input_data)

img_1 = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
if boxes is not None:
draw(img_1, boxes, scores, classes)
cv2.imshow("full post process result", img_1)
cv2.imshow("post process result", img_1)
cv2.waitKeyEx(0)

rknn.release()

0 comments on commit 39d2824

Please sign in to comment.