-
Notifications
You must be signed in to change notification settings - Fork 1
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
the show result is error #4
Comments
|
It seems the model aims to detect the background rather than the object. How's your mAP result? Is it also strange? FYII've tested the model in COCO dataset, the result is good. #1 (comment) TipsMaybe you can load the official pth model to check the results. |
Yes, just delete it. I was testing low-data dataset in that moment. |
Again, it would be useful if you can provide your training logs. Because the demo you train on official project(WXlong/SOLO) is also strange, it contains overlap parts. Yet the Matrix NMS should have avoided this situation. |
I think the trained model is no problem . The problem is no matrix nms in test phase when run tool-det/demo.py, so all the boxes without nms are drawn in image. why the result your shown above is only one box? can you show your test code ? |
A personal question, do you speak Chinese? We can talk in chinese. DEMO codeThe demo.py directly comes from the official mmdetection project. '''
Author: your name
Date: 2021-12-20 16:47:41
LastEditTime: 2022-04-21 15:48:09
LastEditors: Please set LastEditors
Description: 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
FilePath: /research_workspace/tools_det/demo.py
'''
from argparse import ArgumentParser
import cv2
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('out', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
args = parser.parse_args()
return args
def show_result_pyplot(model,
img,
result,
out_file, #加入out_file,运行时把改行注释删除
score_thr=0.5):
if hasattr(model, 'module'):
model = model.module
res_img = model.show_result(
img,
result,
score_thr=score_thr,
show=False
)
cv2.imwrite(out_file, res_img)
def main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
show_result_pyplot(model, args.img, result, args.out, score_thr=args.score_thr)
if __name__ == '__main__':
args = parse_args()
main(args) Pretrain Model convert codeThis code can helps you to convert official solov2 pth to our mmdet format. # disclaimer: inspired by MoCo official repo and PyContrast repo
import argparse
import pickle as pkl
import sys
import torch
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Convert Models')
parser.add_argument('input', metavar='I', default="/disk1/lihao/model_zoo/InstanceLoc/insloc_c4_400ep.pkl", help='input model path')
parser.add_argument('output', metavar='O', default="/disk1/lihao/model_zoo/InstanceLoc/insloc_c4_400ep.pth", help='output path')
parser.add_argument('--c4', action='store_true', help='using ema model')
args = parser.parse_args()
print('=========================')
print(f'converting {args.input}')
print('=========================')
torch_weight = torch.load(args.input, encoding='ascii')
# torch_weight = torch.load(args.input)['state_dict']
new_state_dict = {}
for k, v in torch_weight.items():
# C4 backbone treats C5 layers as the shared head
if 'roi_head.shared_head' in k:
old_k = k
k = k.replace('roi_head.shared_head', 'backbone')
print(old_k, '---->', k)
if not args.c4 and 'roi_head.bbox_head.shared_fcs' in k:
old_k = k
k = k.replace(f'roi_head.bbox_head.shared_fcs.0.weight',
f'roi_heads.box_head.fc1.weight')
k = k.replace(f'roi_head.bbox_head.shared_fcs.0.bias',
f'roi_heads.box_head.fc1.bias')
print(old_k, '---->', k)
new_state_dict[k] = v.numpy()
continue
if 'backbone' in k and 'layer' not in k and 'backbone_k' not in k:
old_k = k
if args.c4:
# C4 stem
k = k.replace('backbone',
'backbone.stem').replace('bn1', 'conv1.norm')
else:
# FPN stem
k = k.replace('backbone', 'backbone.bottom_up.stem').replace(
'bn1', 'conv1.norm')
print(old_k, '---->', k)
elif 'backbone' in k and 'backbone_k' not in k and 'layer' in k:
if args.c4:
# C4 Backbone
old_k = k
if 'layer4' not in k:
k = k.replace("layer1", "res2")
k = k.replace("layer2", "res3")
k = k.replace("layer3", "res4")
else:
k = k.replace("backbone", "roi_heads")
k = k.replace("layer4", "res5")
k = k.replace("bn1", "conv1.norm")
k = k.replace("bn2", "conv2.norm")
k = k.replace("bn3", "conv3.norm")
k = k.replace("downsample.0", "shortcut")
k = k.replace("downsample.1", "shortcut.norm")
else:
# FPN backbone
old_k = k
k = k.replace('backbone', 'backbone.bottom_up')
k = k.replace("layer1", "res2")
k = k.replace("layer2", "res3")
k = k.replace("layer3", "res4")
k = k.replace("layer4", "res5")
k = k.replace("bn1", "conv1.norm")
k = k.replace("bn2", "conv2.norm")
k = k.replace("bn3", "conv3.norm")
k = k.replace("downsample.0", "shortcut")
k = k.replace("downsample.1", "shortcut.norm")
print(old_k, '--->', k)
elif 'neck' in k and 'neck_k' not in k:
# FPN neck
old_k = k
# replace lateral conv
k = k.replace('neck.lateral_convs.0.bn',
'backbone.fpn_lateral2.norm')
k = k.replace('neck.lateral_convs.1.bn',
'backbone.fpn_lateral3.norm')
k = k.replace('neck.lateral_convs.2.bn',
'backbone.fpn_lateral4.norm')
k = k.replace('neck.lateral_convs.3.bn',
'backbone.fpn_lateral5.norm')
k = k.replace('neck.lateral_convs.0.conv', 'backbone.fpn_lateral2')
k = k.replace('neck.lateral_convs.1.conv', 'backbone.fpn_lateral3')
k = k.replace('neck.lateral_convs.2.conv', 'backbone.fpn_lateral4')
k = k.replace('neck.lateral_convs.3.conv', 'backbone.fpn_lateral5')
# replace fpn conv
k = k.replace('neck.fpn_convs.0.bn', 'backbone.fpn_output2.norm')
k = k.replace('neck.fpn_convs.1.bn', 'backbone.fpn_output3.norm')
k = k.replace('neck.fpn_convs.2.bn', 'backbone.fpn_output4.norm')
k = k.replace('neck.fpn_convs.3.bn', 'backbone.fpn_output5.norm')
k = k.replace('neck.fpn_convs.0.conv', 'backbone.fpn_output2')
k = k.replace('neck.fpn_convs.1.conv', 'backbone.fpn_output3')
k = k.replace('neck.fpn_convs.2.conv', 'backbone.fpn_output4')
k = k.replace('neck.fpn_convs.3.conv', 'backbone.fpn_output5')
print(old_k, '--->', k)
elif 'roi_head.bbox_head.shared_convs' in k:
# 4conv in det head
old_k = k
conv_idx = int(k.split('.')[3])
k = k.replace(
f'roi_head.bbox_head.shared_convs.{conv_idx}.conv.weight',
f'roi_heads.box_head.conv{conv_idx+1}.weight')
k = k.replace(
f'roi_head.bbox_head.shared_convs.{conv_idx}.bn.weight',
f'roi_heads.box_head.conv{conv_idx+1}.norm.weight')
k = k.replace(
f'roi_head.bbox_head.shared_convs.{conv_idx}.bn.bias',
f'roi_heads.box_head.conv{conv_idx+1}.norm.bias')
k = k.replace(
f'roi_head.bbox_head.shared_convs.{conv_idx}.bn.running_mean',
f'roi_heads.box_head.conv{conv_idx+1}.norm.running_mean')
k = k.replace(
f'roi_head.bbox_head.shared_convs.{conv_idx}.bn.running_var',
f'roi_heads.box_head.conv{conv_idx+1}.norm.running_var')
k = k.replace(
f'roi_head.bbox_head.shared_convs.{conv_idx}.bn.num_batches_tracked',
f'roi_heads.box_head.conv{conv_idx+1}.norm.num_batches_tracked'
)
print(old_k, '--->', k)
else:
continue
new_state_dict[k] = v.numpy()
res = {
"model": new_state_dict,
"__author__": "Ceyuan",
"matching_heuristics": True
}
with open(args.output, "wb") as f:
pkl.dump(res, f) |
Hi,Thansk your work.
I am using mmdet-2.23.0 to train solov2 and succuss to get pth model
However,some problem occur when i test model result using demo.py
when i run " python demo.py ./imgs/30208.jpg ./res.jpg ./configs/solo/solo_r101_fpn_3x_coco.py ./SOLO_R101_3x.pth"
the result image show is strange, and other models show same result,why it happen?
The text was updated successfully, but these errors were encountered: