diff --git a/4-Object_Detection/YOLOV3/core/backbone_fnet.py b/4-Object_Detection/YOLOV3/core/backbone_fnet.py new file mode 100644 index 00000000..c56d4ce7 --- /dev/null +++ b/4-Object_Detection/YOLOV3/core/backbone_fnet.py @@ -0,0 +1,50 @@ +#! /usr/bin/env python +# coding=utf-8 +#================================================================ +# Copyright (C) 2019 * Ltd. All rights reserved. +# +# Editor : VIM +# File name : backbone.py +# Author : YunYang1994 +# Created date: 2019-07-11 23:37:51 +# Description : +# +#================================================================ + +import tensorflow as tf +import core.common as common + + +def grid_eye_net_18(input_data): + + input_data = common.convolutional(input_data, (3, 3, 3, 32)) + input_data = common.convolutional(input_data, (3, 3, 32, 64), downsample=True) + + for i in range(1): + input_data = common.residual_block(input_data, 64, 32, 64) + + input_data = common.convolutional(input_data, (3, 3, 64, 128), downsample=True) + + for i in range(1): + input_data = common.residual_block(input_data, 128, 64, 128) + + input_data = common.convolutional(input_data, (3, 3, 128, 256), downsample=True) + + for i in range(2): + input_data = common.residual_block(input_data, 256, 128, 256) + + route_1 = input_data + input_data = common.convolutional(input_data, (3, 3, 256, 512), downsample=True) + + for i in range(2): + input_data = common.residual_block(input_data, 512, 256, 512) + + route_2 = input_data + input_data = common.convolutional(input_data, (3, 3, 512, 1024), downsample=True) + + for i in range(1): + input_data = common.residual_block(input_data, 1024, 512, 512) + + return route_1, route_2, input_data + + diff --git a/4-Object_Detection/YOLOV3/core/yolov3_fnet.py b/4-Object_Detection/YOLOV3/core/yolov3_fnet.py new file mode 100644 index 00000000..3066cb0e --- /dev/null +++ b/4-Object_Detection/YOLOV3/core/yolov3_fnet.py @@ -0,0 +1,195 @@ +#! /usr/bin/env python +# coding=utf-8 +#================================================================ +# Copyright (C) 2019 * Ltd. All rights reserved. +# +# Editor : VIM +# File name : yolov3.py +# Author : YunYang1994 +# Created date: 2019-07-12 13:47:10 +# Description : +# +#================================================================ + +import numpy as np +import tensorflow as tf +import core.utils as utils +import core.common as common +import core.backbone as backbone +from core.config import cfg + + +NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES)) +ANCHORS = utils.get_anchors(cfg.YOLO.ANCHORS) +STRIDES = np.array(cfg.YOLO.STRIDES) +IOU_LOSS_THRESH = cfg.YOLO.IOU_LOSS_THRESH + +def YOLOv3(input_layer): + route_1, route_2, conv = backbone.darknet53(input_layer) + + conv = common.convolutional(conv, (1, 1, 512, 256)) + conv = common.convolutional(conv, (3, 3, 256, 512)) + conv = common.convolutional(conv, (1, 1, 512, 256)) + + conv_lobj_branch = common.convolutional(conv, (3, 3, 256, 512)) + conv_lbbox = common.convolutional(conv_lobj_branch, (1, 1, 512, 3*(NUM_CLASS + 5)), activate=False, bn=False) + + conv = common.convolutional(conv, (1, 1, 256, 128)) + conv = common.upsample(conv) + + conv = tf.concat([conv, route_2], axis=-1) + + conv = common.convolutional(conv, (1, 1, 768, 256))#512+256 + conv = common.convolutional(conv, (3, 3, 256, 512)) + conv = common.convolutional(conv, (1, 1, 512, 256)) + + conv_mobj_branch = common.convolutional(conv, (3, 3, 256, 512)) + conv_mbbox = common.convolutional(conv_mobj_branch, (1, 1, 512, 3*(NUM_CLASS + 5)), activate=False, bn=False) + + conv = common.convolutional(conv, (1, 1, 256, 128)) + conv = common.upsample(conv) + + conv = tf.concat([conv, route_1], axis=-1) + + conv = common.convolutional(conv, (1, 1, 512, 128))#256+256 + conv = common.convolutional(conv, (3, 3, 128, 256)) + conv = common.convolutional(conv, (1, 1, 256, 128)) + + conv_sobj_branch = common.convolutional(conv, (3, 3, 128, 256)) + conv_sbbox = common.convolutional(conv_sobj_branch, (1, 1, 256, 3*(NUM_CLASS +5)), activate=False, bn=False) + + return [conv_sbbox, conv_mbbox, conv_lbbox] + +def decode(conv_output, i=0): + """ + return tensor of shape [batch_size, output_size, output_size, anchor_per_scale, 5 + num_classes] + contains (x, y, w, h, score, probability) + """ + + conv_shape = tf.shape(conv_output) + batch_size = conv_shape[0] + output_size = conv_shape[1] + + conv_output = tf.reshape(conv_output, (batch_size, output_size, output_size, 3, 5 + NUM_CLASS)) + + conv_raw_dxdy = conv_output[:, :, :, :, 0:2] + conv_raw_dwdh = conv_output[:, :, :, :, 2:4] + conv_raw_conf = conv_output[:, :, :, :, 4:5] + conv_raw_prob = conv_output[:, :, :, :, 5: ] + + y = tf.tile(tf.range(output_size, dtype=tf.int32)[:, tf.newaxis], [1, output_size]) + x = tf.tile(tf.range(output_size, dtype=tf.int32)[tf.newaxis, :], [output_size, 1]) + + xy_grid = tf.concat([x[:, :, tf.newaxis], y[:, :, tf.newaxis]], axis=-1) + xy_grid = tf.tile(xy_grid[tf.newaxis, :, :, tf.newaxis, :], [batch_size, 1, 1, 3, 1]) + xy_grid = tf.cast(xy_grid, tf.float32) + + pred_xy = (tf.sigmoid(conv_raw_dxdy) + xy_grid) * STRIDES[i] + pred_wh = (tf.exp(conv_raw_dwdh) * ANCHORS[i]) * STRIDES[i] + pred_xywh = tf.concat([pred_xy, pred_wh], axis=-1) + + pred_conf = tf.sigmoid(conv_raw_conf) + pred_prob = tf.sigmoid(conv_raw_prob) + + return tf.concat([pred_xywh, pred_conf, pred_prob], axis=-1) + +def bbox_iou(boxes1, boxes2): + + boxes1_area = boxes1[..., 2] * boxes1[..., 3] + boxes2_area = boxes2[..., 2] * boxes2[..., 3] + + boxes1 = tf.concat([boxes1[..., :2] - boxes1[..., 2:] * 0.5, + boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1) + boxes2 = tf.concat([boxes2[..., :2] - boxes2[..., 2:] * 0.5, + boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1) + + left_up = tf.maximum(boxes1[..., :2], boxes2[..., :2]) + right_down = tf.minimum(boxes1[..., 2:], boxes2[..., 2:]) + + inter_section = tf.maximum(right_down - left_up, 0.0) + inter_area = inter_section[..., 0] * inter_section[..., 1] + union_area = boxes1_area + boxes2_area - inter_area + + return 1.0 * inter_area / union_area + +def bbox_giou(boxes1, boxes2): + + boxes1 = tf.concat([boxes1[..., :2] - boxes1[..., 2:] * 0.5, + boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1) + boxes2 = tf.concat([boxes2[..., :2] - boxes2[..., 2:] * 0.5, + boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1) + + boxes1 = tf.concat([tf.minimum(boxes1[..., :2], boxes1[..., 2:]), + tf.maximum(boxes1[..., :2], boxes1[..., 2:])], axis=-1) + boxes2 = tf.concat([tf.minimum(boxes2[..., :2], boxes2[..., 2:]), + tf.maximum(boxes2[..., :2], boxes2[..., 2:])], axis=-1) + + boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1]) + boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1]) + + left_up = tf.maximum(boxes1[..., :2], boxes2[..., :2]) + right_down = tf.minimum(boxes1[..., 2:], boxes2[..., 2:]) + + inter_section = tf.maximum(right_down - left_up, 0.0) + inter_area = inter_section[..., 0] * inter_section[..., 1] + union_area = boxes1_area + boxes2_area - inter_area + iou = inter_area / union_area + + enclose_left_up = tf.minimum(boxes1[..., :2], boxes2[..., :2]) + enclose_right_down = tf.maximum(boxes1[..., 2:], boxes2[..., 2:]) + enclose = tf.maximum(enclose_right_down - enclose_left_up, 0.0) + enclose_area = enclose[..., 0] * enclose[..., 1] + giou = iou - 1.0 * (enclose_area - union_area) / enclose_area + + return giou + + +def compute_loss(pred, conv, label, bboxes, i=0): + + conv_shape = tf.shape(conv) + batch_size = conv_shape[0] + output_size = conv_shape[1] + input_size = STRIDES[i] * output_size + conv = tf.reshape(conv, (batch_size, output_size, output_size, 3, 5 + NUM_CLASS)) + + conv_raw_conf = conv[:, :, :, :, 4:5] + conv_raw_prob = conv[:, :, :, :, 5:] + + pred_xywh = pred[:, :, :, :, 0:4] + pred_conf = pred[:, :, :, :, 4:5] + + label_xywh = label[:, :, :, :, 0:4] + respond_bbox = label[:, :, :, :, 4:5] + label_prob = label[:, :, :, :, 5:] + + giou = tf.expand_dims(bbox_giou(pred_xywh, label_xywh), axis=-1) + input_size = tf.cast(input_size, tf.float32) + + bbox_loss_scale = 2.0 - 1.0 * label_xywh[:, :, :, :, 2:3] * label_xywh[:, :, :, :, 3:4] / (input_size ** 2) + giou_loss = respond_bbox * bbox_loss_scale * (1- giou) + + iou = bbox_iou(pred_xywh[:, :, :, :, np.newaxis, :], bboxes[:, np.newaxis, np.newaxis, np.newaxis, :, :]) + max_iou = tf.expand_dims(tf.reduce_max(iou, axis=-1), axis=-1) + + respond_bgd = (1.0 - respond_bbox) * tf.cast( max_iou < IOU_LOSS_THRESH, tf.float32 ) + + conf_focal = tf.pow(respond_bbox - pred_conf, 2) + + conf_loss = conf_focal * ( + respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox, logits=conv_raw_conf) + + + respond_bgd * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox, logits=conv_raw_conf) + ) + + prob_loss = respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=label_prob, logits=conv_raw_prob) + + giou_loss = tf.reduce_mean(tf.reduce_sum(giou_loss, axis=[1,2,3,4])) + conf_loss = tf.reduce_mean(tf.reduce_sum(conf_loss, axis=[1,2,3,4])) + prob_loss = tf.reduce_mean(tf.reduce_sum(prob_loss, axis=[1,2,3,4])) + + return giou_loss, conf_loss, prob_loss + + + + +