@@ -4,11 +4,10 @@
* Created on: Dec 19, 2016
* Author: Marko Bjelonic
* Institute: ETH Zurich, Robotic Systems Lab
#include <iostream>
*/

// yolo object detector
#include "darknet_ros/YoloObjectDetector.h"
#include "darknet_ros/YoloObjectDetector.hpp"

// Check for xServer
#include <X11/Xlib.h>
@@ -21,26 +20,20 @@ std::string darknetFilePath_ = DARKNET_FILE_PATH;

namespace darknet_ros {


char *cfg;
char *weights;
char *data;
char **detectionNames;

cv::Mat camImageCopy_;
IplImage* get_ipl_image() {
IplImage* ROS_img = new IplImage(camImageCopy_);
return ROS_img;
}

YoloObjectDetector::YoloObjectDetector(ros::NodeHandle nh):
nodeHandle_(nh),
imageTransport_(nodeHandle_),
numClasses_(0),
classLabels_(0),
rosBoxes_(0),
rosBoxCounter_(0),
rosBoxColors_(0),
opencvWindow_("YOLO V2 object detection")
YoloObjectDetector::YoloObjectDetector(ros::NodeHandle nh)
: nodeHandle_(nh),
imageTransport_(nodeHandle_),
numClasses_(0),
classLabels_(0),
rosBoxes_(0),
rosBoxCounter_(0),
robot_ID(0)
{
ROS_INFO("[YoloObjectDetector] Node started.");

@@ -52,46 +45,45 @@ YoloObjectDetector::YoloObjectDetector(ros::NodeHandle nh):
init();
}

bool YoloObjectDetector::readParameters() {
YoloObjectDetector::~YoloObjectDetector()
{
{
boost::unique_lock<boost::shared_mutex> lockNodeStatus(mutexNodeStatus_);
isNodeRunning_ = false;
}
yoloThread_.join();
}

bool YoloObjectDetector::readParameters()
{
// Load common parameters.
nodeHandle_.param("image_view/enable_opencv", viewImage_, true);
nodeHandle_.param("image_view/use_darknet", darknetImageViewer_, false);
nodeHandle_.param("image_view/wait_key_delay", waitKeyDelay_, 3);
nodeHandle_.param("image_view/enable_console_output", enableConsoleOutput_, false);

// Check if Xserver is running on Linux.
if(XOpenDisplay(NULL)) {
if (XOpenDisplay(NULL)) {
// Do nothing!
ROS_INFO("[YoloObjectDetector] Xserver is running.");
}
else {
} else {
ROS_INFO("[YoloObjectDetector] Xserver is not running.");
viewImage_ = false;
}

if(!viewImage_) {
darknetImageViewer_ = false;
}

// Set vector sizes.
nodeHandle_.param("yolo_model/detection_classes/names", classLabels_, std::vector<std::string>(0));
nodeHandle_.param("yolo_model/detection_classes/names", classLabels_,
std::vector<std::string>(0));
numClasses_ = classLabels_.size();
rosBoxes_ = std::vector< std::vector<RosBox_> >(numClasses_);
rosBoxes_ = std::vector<std::vector<RosBox_> >(numClasses_);
rosBoxCounter_ = std::vector<int>(numClasses_);
rosBoxColors_ = std::vector<cv::Scalar>(numClasses_);

return true;
}

void YoloObjectDetector::init() {
void YoloObjectDetector::init()
{
ROS_INFO("[YoloObjectDetector] init().");

// Initialize color of bounding boxes of different object classes.
int incr = floor(255/numClasses_);
for (int i = 0; i < numClasses_; i++) {
rosBoxColors_[i] = cv::Scalar(255 - incr*i, 0 + incr*i, 255 - incr*i);
}

// Initialize deep network of darknet.
std::string weightsPath;
std::string configPath;
@@ -101,10 +93,11 @@ void YoloObjectDetector::init() {

// Threshold of object detection.
float thresh;
nodeHandle_.param("yolo_model/threshold/value", thresh, (float)0.3);
nodeHandle_.param("yolo_model/threshold/value", thresh, (float) 0.3);

// Path to weights file.
nodeHandle_.param("yolo_model/weight_file/name", weightsModel, std::string("tiny-yolo-voc.weights"));
nodeHandle_.param("yolo_model/weight_file/name", weightsModel,
std::string("tiny-yolo-voc.weights"));
nodeHandle_.param("weights_path", weightsPath, std::string("/default"));
weightsPath += "/" + weightsModel;
weights = new char[weightsPath.length() + 1];
@@ -124,21 +117,16 @@ void YoloObjectDetector::init() {
strcpy(data, dataPath.c_str());

// Get classes.
detectionNames = (char**)realloc((void*)detectionNames, (numClasses_ + 1) * sizeof(char*));
detectionNames = (char**) realloc((void*) detectionNames, (numClasses_ + 1) * sizeof(char*));
for (int i = 0; i < numClasses_; i++) {
detectionNames[i] = new char[classLabels_[i].length() + 1];
strcpy(detectionNames[i], classLabels_[i].c_str());
}

// Load network.
load_network_demo(cfg, weights, data,
thresh,
detectionNames, numClasses_,
darknetImageViewer_, waitKeyDelay_,
0,
0.5,
0, 0, 0, 0,
enableConsoleOutput_);
setupNetwork(cfg, weights, data, thresh, detectionNames, numClasses_,
0, 0, 1, 0.5, 0, 0, 0, 0);
yoloThread_ = std::thread(&YoloObjectDetector::yolo, this);

// Initialize publisher and subscriber.
std::string cameraTopicName;
@@ -153,6 +141,7 @@ void YoloObjectDetector::init() {
int detectionImageQueueSize;
bool detectionImageLatch;


std::string camOneTopicName;
std::string camTwoTopicName;
std::string camThreeTopicName;
@@ -177,7 +166,7 @@ void YoloObjectDetector::init() {

std::string compiledMessageTopicName4;
int compiledMessageTopicSize4;
bool compiledMessageTopicLatch4;
bool compiledMessageTopicLatch4;

nodeHandle_.param("subscribers/robot_one/topic", camOneTopicName, std::string("/webcam1/image_raw"));
nodeHandle_.param("subscribers/robot_two/topic", camTwoTopicName, std::string("/webcam2/image_raw"));
@@ -215,9 +204,9 @@ void YoloObjectDetector::init() {

nodeHandle_.param("publishers/compiled_message4/topic", compiledMessageTopicName4, std::string("/detection/compiled_ros_msg4"));
nodeHandle_.param("publishers/compiled_message4/queue_size", compiledMessageTopicSize4, 1000);
nodeHandle_.param("publishers/compiled_message4/latch", compiledMessageTopicLatch4, false);
nodeHandle_.param("publishers/compiled_message4/latch", compiledMessageTopicLatch4, false);


//imageSubscriber_ = imageTransport_.subscribe(cameraTopicName, cameraQueueSize, &YoloObjectDetector::cameraCallback,this); //*********************************************************
camOneSubscriber_ = imageTransport_.subscribe(camOneTopicName, camOneTopicSize, &YoloObjectDetector::cameraOneCallback, this);
camTwoSubscriber_ = imageTransport_.subscribe(camTwoTopicName, camTwoTopicSize, &YoloObjectDetector::cameraTwoCallback, this);
camThreeSubscriber_ = imageTransport_.subscribe(camThreeTopicName, camThreeTopicSize, &YoloObjectDetector::cameraThreeCallback, this);
@@ -232,141 +221,30 @@ void YoloObjectDetector::init() {
compiledMessagePublisher3_ = nodeHandle_.advertise<detection_msgs::CompiledMessage>(compiledMessageTopicName3, compiledMessageTopicSize3, compiledMessageTopicLatch3);
compiledMessagePublisher4_ = nodeHandle_.advertise<detection_msgs::CompiledMessage>(compiledMessageTopicName4, compiledMessageTopicSize4, compiledMessageTopicLatch4);

// imageSubscriber_ = imageTransport_.subscribe(cameraTopicName, cameraQueueSize,
// &YoloObjectDetector::cameraCallback, this);
// objectPublisher_ = nodeHandle_.advertise<std_msgs::Int8>(objectDetectorTopicName,
// objectDetectorQueueSize,
// objectDetectorLatch);
// boundingBoxesPublisher_ = nodeHandle_.advertise<darknet_ros_msgs::BoundingBoxes>(
// boundingBoxesTopicName, boundingBoxesQueueSize, boundingBoxesLatch);
// detectionImagePublisher_ = nodeHandle_.advertise<sensor_msgs::Image>(detectionImageTopicName,
// detectionImageQueueSize,
// detectionImageLatch);

// Action servers.
std::string checkForObjectsActionName;
nodeHandle_.param("actions/camera_reading/topic", checkForObjectsActionName, std::string("check_for_objects"));
nodeHandle_.param("actions/camera_reading/topic", checkForObjectsActionName,
std::string("check_for_objects"));
checkForObjectsActionServer_.reset(
new CheckForObjectsActionServer(
nodeHandle_, checkForObjectsActionName,
false));
new CheckForObjectsActionServer(nodeHandle_, checkForObjectsActionName, false));
checkForObjectsActionServer_->registerGoalCallback(
boost::bind(&YoloObjectDetector::checkForObjectsActionGoalCB, this));
checkForObjectsActionServer_->registerPreemptCallback(
boost::bind(&YoloObjectDetector::checkForObjectsActionPreemptCB, this));
checkForObjectsActionServer_->start();

// OpenCv image view.
if(viewImage_ && !darknetImageViewer_) {
cv::namedWindow(opencvWindow_, cv::WINDOW_NORMAL);
cv::moveWindow(opencvWindow_, 0, 0);
cv::resizeWindow(opencvWindow_, 500, 500);
}
}

YoloObjectDetector::~YoloObjectDetector() {
if(viewImage_ && !darknetImageViewer_) {
cv::destroyWindow(opencvWindow_);
}
}

void YoloObjectDetector::drawBoxes(cv::Mat &inputFrame, std::vector<RosBox_> &rosBoxes, int &numberOfObjects,
cv::Scalar &rosBoxColor, const std::string &objectLabel) {
darknet_ros_msgs::BoundingBox boundingBox;

for (int i = 0; i < numberOfObjects; i++) {
int xmin = (rosBoxes[i].x - rosBoxes[i].w/2)*frameWidth_;
int ymin = (rosBoxes[i].y - rosBoxes[i].h/2)*frameHeight_;
int xmax = (rosBoxes[i].x + rosBoxes[i].w/2)*frameWidth_;
int ymax = (rosBoxes[i].y + rosBoxes[i].h/2)*frameHeight_;

//Filters everything out besides Humans
if(objectLabel != "person"){
continue;
}
boundingBox.Class = objectLabel;
boundingBox.probability = rosBoxes[i].prob;
boundingBox.xmin = xmin;
boundingBox.ymin = ymin;
boundingBox.xmax = xmax;
boundingBox.ymax = ymax;
boundingBoxesResults_.boundingBoxes.push_back(boundingBox);

// draw bounding box of first object found
cv::Point topLeftCorner = cv::Point(xmin, ymin); //xmin, ymin
cv::Point botRightCorner = cv::Point(xmax, ymax);
cv::rectangle(inputFrame, topLeftCorner, botRightCorner, 2);
//ROS_INFO_STREAM(xmin >> " ">> ymin >>" ">> xmax >> " " >> ymax );
std::ostringstream probability;
probability << rosBoxes[i].prob*100;
cv::putText(inputFrame, objectLabel + " (" + probability.str() + "%)", cv::Point(xmin, ymax+15), cv::FONT_HERSHEY_PLAIN,
1.0, rosBoxColor, 2.0);
}
}

void YoloObjectDetector::runYolo(cv::Mat &fullFrame, int robotId, int id) {
if(enableConsoleOutput_) {
ROS_INFO("[YoloObjectDetector] runYolo().");
}

detection_msgs::CompiledMessage outmsg;
outmsg.robotId = robotId;

cv::Mat inputFrame = fullFrame.clone();
cv::Mat inputFrame_empty = fullFrame.clone(); // Cloning live video to publish without bounding boxes

// run yolo and get bounding boxes for objects
boxes_ = demo_yolo();

// get the number of bounding boxes found
int num = boxes_[0].num;

// if at least one BoundingBox found, draw box
if (num > 0 && num <= 100) {
if(!darknetImageViewer_ && enableConsoleOutput_) {
std::cout << "# Objects: " << num << std::endl;
}
// split bounding boxes by class
for (int i = 0; i < num; i++) {
for (int j = 0; j < numClasses_; j++) {
if (boxes_[i].Class == j) {
rosBoxes_[j].push_back(boxes_[i]);
rosBoxCounter_[j]++;
if(!darknetImageViewer_ && enableConsoleOutput_) {
std::cout << classLabels_[boxes_[i].Class] << " (" << boxes_[i].prob*100 << "%)" << std::endl;
}
}
}
}

// send message that an object has been detected
std_msgs::Int8 msg;
msg.data = 1;
objectPublisher_.publish(msg);

for (int i = 0; i < numClasses_; i++) {
if (rosBoxCounter_[i] > 0) drawBoxes(inputFrame, rosBoxes_[i],
rosBoxCounter_[i], rosBoxColors_[i], classLabels_[i]);
}
outmsg.boxes = boundingBoxesResults_;
boundingBoxesPublisher_.publish(boundingBoxesResults_);
boundingBoxesResults_.boundingBoxes.clear();

for (int i = 0; i < numClasses_; i++) {
rosBoxes_[i].clear();
rosBoxCounter_[i] = 0;
}

if(viewImage_ && !darknetImageViewer_) {
cv::imshow(opencvWindow_, inputFrame); //inputFrame
cv::waitKey(waitKeyDelay_);
}

// Publish elevation change map.
if (!publishDetectionImage(inputFrame_empty, outmsg)) ROS_DEBUG("Detection image has not been broadcasted.");
}
else {
std_msgs::Int8 msg;
msg.data = 0;
objectPublisher_.publish(msg);
}
if (isCheckingForObjects()) {
ROS_DEBUG("[YoloObjectDetector] check for objects in image.");
darknet_ros_msgs::CheckForObjectsResult objectsActionResult;
objectsActionResult.id = id;
objectsActionResult.boundingBoxes = boundingBoxesResults_;
checkForObjectsActionServer_->setSucceeded(objectsActionResult,"Send bounding boxes.");
}
}

void YoloObjectDetector::cameraOneCallback(const sensor_msgs::ImageConstPtr& msg) {
YoloObjectDetector::cameraCallback(msg, 1);
@@ -384,93 +262,488 @@ void YoloObjectDetector::cameraFourCallback(const sensor_msgs::ImageConstPtr& ms
YoloObjectDetector::cameraCallback(msg, 4);
}

void YoloObjectDetector::cameraCallback(const sensor_msgs::ImageConstPtr& msg, int robotNum) {
if(enableConsoleOutput_) {
ROS_INFO("[YoloObjectDetector] USB image received.");
}

void YoloObjectDetector::cameraCallback(const sensor_msgs::ImageConstPtr& msg, int robotNum)
{
ROS_DEBUG("[YoloObjectDetector] USB image received.");

cv_bridge::CvImagePtr cam_image;
this -> robot_ID = robotNum;


try {
cam_image = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
}
catch (cv_bridge::Exception& e) {
} catch (cv_bridge::Exception& e) {
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}

if(cam_image) {
camImageCopy_ = cam_image->image.clone();
if (cam_image) {
{
boost::unique_lock<boost::shared_mutex> lockImageCallback(mutexImageCallback_);
camImageCopy_ = cam_image->image.clone();
}
{
boost::unique_lock<boost::shared_mutex> lockImageStatus(mutexImageStatus_);
imageStatus_ = true;
}
frameWidth_ = cam_image->image.size().width;
frameHeight_ = cam_image->image.size().height;
runYolo(cam_image->image, robotNum);
}
return;
}

void YoloObjectDetector::checkForObjectsActionGoalCB() {
if(enableConsoleOutput_) {
ROS_INFO("[YoloObjectDetector] Start check for objects action.");
}
void YoloObjectDetector::checkForObjectsActionGoalCB()
{
ROS_DEBUG("[YoloObjectDetector] Start check for objects action.");

boost::shared_ptr<const darknet_ros_msgs::CheckForObjectsGoal> imageActionPtr = checkForObjectsActionServer_->acceptNewGoal();
boost::shared_ptr<const darknet_ros_msgs::CheckForObjectsGoal> imageActionPtr =
checkForObjectsActionServer_->acceptNewGoal();
sensor_msgs::Image imageAction = imageActionPtr->image;

cv_bridge::CvImagePtr cam_image;

try {
cam_image = cv_bridge::toCvCopy(imageAction, sensor_msgs::image_encodings::BGR8);
}
catch (cv_bridge::Exception& e) {
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
} catch (cv_bridge::Exception& e) {
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}

if (cam_image) {
camImageCopy_ = cam_image->image.clone();
{
boost::unique_lock<boost::shared_mutex> lockImageCallback(mutexImageCallback_);
camImageCopy_ = cam_image->image.clone();
}
{
boost::unique_lock<boost::shared_mutex> lockImageCallback(mutexActionStatus_);
actionId_ = imageActionPtr->id;
}
{
boost::unique_lock<boost::shared_mutex> lockImageStatus(mutexImageStatus_);
imageStatus_ = true;
}
frameWidth_ = cam_image->image.size().width;
frameHeight_ = cam_image->image.size().height;
runYolo(cam_image->image, imageActionPtr->id);
}
return;
}

void YoloObjectDetector::checkForObjectsActionPreemptCB() {
ROS_INFO("[YoloObjectDetector] Preempt check for objects action.");
void YoloObjectDetector::checkForObjectsActionPreemptCB()
{
ROS_DEBUG("[YoloObjectDetector] Preempt check for objects action.");
checkForObjectsActionServer_->setPreempted();
}

bool YoloObjectDetector::isCheckingForObjects() const {
return (ros::ok() &&
checkForObjectsActionServer_->isActive() &&
!checkForObjectsActionServer_->isPreemptRequested());
bool YoloObjectDetector::isCheckingForObjects() const
{
return (ros::ok() && checkForObjectsActionServer_->isActive()
&& !checkForObjectsActionServer_->isPreemptRequested());
}

bool YoloObjectDetector::publishDetectionImage(const cv::Mat& detectionImage, detection_msgs::CompiledMessage message) {
if (compiledMessagePublisher_.getNumSubscribers() < 1) {
bool YoloObjectDetector::publishDetectionImage(const cv::Mat& detectionImage, detection_msgs::CompiledMessage message)
{
if (detectionImagePublisher_.getNumSubscribers() < 1)
return false;
}

cv_bridge::CvImage cvImage;
cvImage.header.stamp = ros::Time::now();
cvImage.header.frame_id = "detection_image";
cvImage.encoding = sensor_msgs::image_encodings::BGR8;
cvImage.image = detectionImage;
//message.image = *cvImage.toImageMsg();

message.image = *cvImage.toImageMsg();
detectionImagePublisher_.publish(*cvImage.toImageMsg());

if(message.robotId == 1)
compiledMessagePublisher_.publish(message);
else if(message.robotId == 2)
compiledMessagePublisher2_.publish(message);
else if(message.robotId == 3)
compiledMessagePublisher3_.publish(message);
else
compiledMessagePublisher4_.publish(message);

ROS_DEBUG("Detection image has been published.");
return true;
}

double YoloObjectDetector::getWallTime()
{
struct timeval time;
if (gettimeofday(&time, NULL)) {
return 0;
}
return (double) time.tv_sec + (double) time.tv_usec * .000001;
}

void *YoloObjectDetector::fetchInThread()
{
IplImage* ROS_img = getIplImage();
ipl_into_image(ROS_img, buff_[buffIndex_]);
{
boost::shared_lock<boost::shared_mutex> lock(mutexImageCallback_);
buffId_[buffIndex_] = actionId_;
}
rgbgr_image(buff_[buffIndex_]);
letterbox_image_into(buff_[buffIndex_], net_.w, net_.h, buffLetter_[buffIndex_]);
return 0;
}

void *YoloObjectDetector::detectInThread()
{
running_ = 1;
float nms = .4;

layer l = net_.layers[net_.n - 1];
float *X = buffLetter_[(buffIndex_ + 2) % 3].data;
float *prediction = network_predict(net_, X);

memcpy(predictions_[demoIndex_], prediction, l.outputs * sizeof(float));
mean_arrays(predictions_, demoFrame_, l.outputs, avg_);
l.output = lastAvg2_;
if (demoDelay_ == 0)
l.output = avg_;
if (l.type == DETECTION) {
get_detection_boxes(l, 1, 1, demoThresh_, probs_, boxes_, 0);
} else if (l.type == REGION) {
get_region_boxes(l, buff_[0].w, buff_[0].h, net_.w, net_.h, demoThresh_, probs_, boxes_, 0, 0,
demoHier_, 1);
} else {
error("Last layer must produce detections\n");
}
if (nms > 0)
do_nms_obj(boxes_, probs_, l.w * l.h * l.n, l.classes, nms);

if (enableConsoleOutput_) {
printf("\nFPS:%.1f\n", fps_);
printf("Objects:\n\n");
}
image display = buff_[(buffIndex_ + 2) % 3];
draw_detections(display, demoDetections_, demoThresh_, boxes_, probs_, demoNames_, demoAlphabet_,
demoClasses_);

// extract the bounding boxes and send them to ROS
int total = l.w * l.h * l.n;
int i, j;
int count = 0;
for (i = 0; i < total; ++i) {
float xmin = boxes_[i].x - boxes_[i].w / 2.;
float xmax = boxes_[i].x + boxes_[i].w / 2.;
float ymin = boxes_[i].y - boxes_[i].h / 2.;
float ymax = boxes_[i].y + boxes_[i].h / 2.;

if (xmin < 0)
xmin = 0;
if (ymin < 0)
ymin = 0;
if (xmax > 1)
xmax = 1;
if (ymax > 1)
ymax = 1;

// iterate through possible boxes and collect the bounding boxes
for (j = 0; j < l.classes; ++j) {
if (probs_[i][j]) {
float x_center = (xmin + xmax) / 2;
float y_center = (ymin + ymax) / 2;
float BoundingBox_width = xmax - xmin;
float BoundingBox_height = ymax - ymin;

// define bounding box
// BoundingBox must be 1% size of frame (3.2x2.4 pixels)
if (BoundingBox_width > 0.01 && BoundingBox_height > 0.01) {
roiBoxes_[count].x = x_center;
roiBoxes_[count].y = y_center;
roiBoxes_[count].w = BoundingBox_width;
roiBoxes_[count].h = BoundingBox_height;
roiBoxes_[count].Class = j;
roiBoxes_[count].prob = probs_[i][j];
count++;
}
}
}
}

// create array to store found bounding boxes
// if no object detected, make sure that ROS knows that num = 0
if (count == 0) {
roiBoxes_[0].num = 0;
} else {
roiBoxes_[0].num = count;
}

demoIndex_ = (demoIndex_ + 1) % demoFrame_;
running_ = 0;
return 0;
}

void *YoloObjectDetector::displayInThread(void *ptr)
{
show_image_cv(buff_[(buffIndex_ + 1) % 3], "Demo", ipl_);
int c = cvWaitKey(waitKeyDelay_);
if (c != -1)
c = c % 256;
if (c == 10) {
if (demoDelay_ == 0)
demoDelay_ = 60;
else if (demoDelay_ == 5)
demoDelay_ = 0;
else if (demoDelay_ == 60)
demoDelay_ = 5;
else
demoDelay_ = 0;
} else if (c == 27) {
demoDone_ = 1;
return 0;
} else if (c == 82) {
demoThresh_ += .02;
} else if (c == 84) {
demoThresh_ -= .02;
if (demoThresh_ <= .02)
demoThresh_ = .02;
} else if (c == 83) {
demoHier_ += .02;
} else if (c == 81) {
demoHier_ -= .02;
if (demoHier_ <= .0)
demoHier_ = .0;
}
return 0;
}

void *YoloObjectDetector::displayLoop(void *ptr)
{
while (1) {
displayInThread(0);
}
}

void *YoloObjectDetector::detectLoop(void *ptr)
{
while (1) {
detectInThread();
}
}

void YoloObjectDetector::setupNetwork(char *cfgfile, char *weightfile, char *datafile, float thresh,
char **names, int classes,
int delay, char *prefix, int avg_frames, float hier, int w, int h,
int frames, int fullscreen)
{
demoPrefix_ = prefix;
demoDelay_ = delay;
demoFrame_ = avg_frames;
predictions_ = (float **) calloc(demoFrame_, sizeof(float*));
image **alphabet = load_alphabet_with_file(datafile);
demoNames_ = names;
demoAlphabet_ = alphabet;
demoClasses_ = classes;
demoThresh_ = thresh;
demoHier_ = hier;
fullScreen_ = fullscreen;
printf("YOLO_V2\n");
net_ = parse_network_cfg(cfgfile);
if (weightfile) {
load_weights(&net_, weightfile);
}
set_batch_network(&net_, 1);
}

void YoloObjectDetector::yolo()
{
const auto wait_duration = std::chrono::milliseconds(2000);
while (!getImageStatus()) {
printf("Waiting for image.\n");
if (!isNodeRunning()) {
return;
}
std::this_thread::sleep_for(wait_duration);
}

std::thread detect_thread;
std::thread fetch_thread;

srand(2222222);

layer l = net_.layers[net_.n - 1];
demoDetections_ = l.n * l.w * l.h;
int j;

avg_ = (float *) calloc(l.outputs, sizeof(float));
lastAvg_ = (float *) calloc(l.outputs, sizeof(float));
lastAvg2_ = (float *) calloc(l.outputs, sizeof(float));
for (j = 0; j < demoFrame_; ++j)
predictions_[j] = (float *) calloc(l.outputs, sizeof(float));

boxes_ = (box *) calloc(l.w * l.h * l.n, sizeof(box));
roiBoxes_ = (darknet_ros::RosBox_ *) calloc(l.w * l.h * l.n, sizeof(darknet_ros::RosBox_));
probs_ = (float **) calloc(l.w * l.h * l.n, sizeof(float *));
for (j = 0; j < l.w * l.h * l.n; ++j)
probs_[j] = (float *) calloc(l.classes + 1, sizeof(float));

IplImage* ROS_img = getIplImage();
buff_[0] = ipl_to_image(ROS_img);
buff_[1] = copy_image(buff_[0]);
buff_[2] = copy_image(buff_[0]);
buffLetter_[0] = letterbox_image(buff_[0], net_.w, net_.h);
buffLetter_[1] = letterbox_image(buff_[0], net_.w, net_.h);
buffLetter_[2] = letterbox_image(buff_[0], net_.w, net_.h);
ipl_ = cvCreateImage(cvSize(buff_[0].w, buff_[0].h), IPL_DEPTH_8U, buff_[0].c);

int count = 0;

if (!demoPrefix_ && viewImage_) {
cvNamedWindow("Demo", CV_WINDOW_NORMAL);
if (fullScreen_) {
cvSetWindowProperty("Demo", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
} else {
cvMoveWindow("Demo", 0, 0);
cvResizeWindow("Demo", 1352, 1013);
}
}

demoTime_ = getWallTime();

while (!demoDone_) {
buffIndex_ = (buffIndex_ + 1) % 3;
fetch_thread = std::thread(&YoloObjectDetector::fetchInThread, this);
detect_thread = std::thread(&YoloObjectDetector::detectInThread, this);
if (!demoPrefix_) {
if (count % (demoDelay_ + 1) == 0) {
fps_ = 1. / (getWallTime() - demoTime_);
demoTime_ = getWallTime();
float *swap = lastAvg_;
lastAvg_ = lastAvg2_;
lastAvg2_ = swap;
memcpy(lastAvg_, avg_, l.outputs * sizeof(float));
}
if (viewImage_) {
displayInThread(0);
}
publishInThread();
} else {
char name[256];
sprintf(name, "%s_%08d", demoPrefix_, count);
save_image(buff_[(buffIndex_ + 1) % 3], name);
}
fetch_thread.join();
detect_thread.join();
++count;
if (!isNodeRunning()) {
demoDone_ = true;
}
}

}

IplImage* YoloObjectDetector::getIplImage()
{
boost::shared_lock<boost::shared_mutex> lock(mutexImageCallback_);
IplImage* ROS_img = new IplImage(camImageCopy_);
return ROS_img;
}

bool YoloObjectDetector::getImageStatus(void)
{
boost::shared_lock<boost::shared_mutex> lock(mutexImageStatus_);
return imageStatus_;
}

bool YoloObjectDetector::isNodeRunning(void)
{
boost::shared_lock<boost::shared_mutex> lock(mutexNodeStatus_);
return isNodeRunning_;
}

void *YoloObjectDetector::publishInThread()
{
detection_msgs::CompiledMessage outmsg;


//message.image = *cvImage.toImageMsg();
outmsg.robotId = this->robot_ID;

// Publish image.
cv::Mat cvImage = cv::cvarrToMat(ipl_);
cv::Mat clone_image = cvImage;

cv_bridge::CvImage cvImage1;
cvImage1.header.stamp = ros::Time::now();
cvImage1.header.frame_id = "detection_image";
cvImage1.encoding = sensor_msgs::image_encodings::BGR8;
cvImage1.image = cvImage;
outmsg.image = *cvImage1.toImageMsg();



if (!publishDetectionImage(cv::Mat(cvImage), outmsg)) {
ROS_DEBUG("Detection image has not been broadcasted.");
}

// Publish bounding boxes and detection result.
int num = roiBoxes_[0].num;
if (num > 0 && num <= 100) {
for (int i = 0; i < num; i++) {
for (int j = 0; j < numClasses_; j++) {
if (roiBoxes_[i].Class == j) {
rosBoxes_[j].push_back(roiBoxes_[i]);
rosBoxCounter_[j]++;
}
}
}

std_msgs::Int8 msg;
msg.data = num;
objectPublisher_.publish(msg);

for (int i = 0; i < numClasses_; i++) {
if (rosBoxCounter_[i] > 0) {
darknet_ros_msgs::BoundingBox boundingBox;

for (int j = 0; j < rosBoxCounter_[i]; j++) {
if(classLabels_[j] == "person"){
int xmin = (rosBoxes_[i][j].x - rosBoxes_[i][j].w / 2) * frameWidth_;
int ymin = (rosBoxes_[i][j].y - rosBoxes_[i][j].h / 2) * frameHeight_;
int xmax = (rosBoxes_[i][j].x + rosBoxes_[i][j].w / 2) * frameWidth_;
int ymax = (rosBoxes_[i][j].y + rosBoxes_[i][j].h / 2) * frameHeight_;
std::cout<<"hereeeeeeeeeeeeeeeeeeee";
boundingBox.Class = classLabels_[i];
boundingBox.probability = rosBoxes_[i][j].prob;
boundingBox.xmin = xmin;
boundingBox.ymin = ymin;
boundingBox.xmax = xmax;
boundingBox.ymax = ymax;
boundingBoxesResults_.boundingBoxes.push_back(boundingBox);
}
}
}
}
boundingBoxesResults_.header.stamp = ros::Time::now();
boundingBoxesResults_.header.frame_id = "detection";
outmsg.boxes = boundingBoxesResults_;
boundingBoxesPublisher_.publish(boundingBoxesResults_);

if(outmsg.robotId == 1)
compiledMessagePublisher_.publish(outmsg);
else if(outmsg.robotId == 2)
compiledMessagePublisher2_.publish(outmsg);
else if(outmsg.robotId == 3)
compiledMessagePublisher3_.publish(outmsg);
else if(outmsg.robotId == 4)
compiledMessagePublisher4_.publish(outmsg);


} else {
std_msgs::Int8 msg;
msg.data = 0;
objectPublisher_.publish(msg);
}
if (isCheckingForObjects()) {
ROS_DEBUG("[YoloObjectDetector] check for objects in image.");
darknet_ros_msgs::CheckForObjectsResult objectsActionResult;
objectsActionResult.id = buffId_[0];
objectsActionResult.boundingBoxes = boundingBoxesResults_;
checkForObjectsActionServer_->setSucceeded(objectsActionResult, "Send bounding boxes.");
}
boundingBoxesResults_.boundingBoxes.clear();
for (int i = 0; i < numClasses_; i++) {
rosBoxes_[i].clear();
rosBoxCounter_[i] = 0;
}

return 0;
}


} /* namespace darknet_ros*/