@@ -0,0 +1,58 @@

#SHELL = sh -xv
FLAGS := -DCOMPILE_GPU=1 -DGPU -DOPENCV -DCUDNN -fPIC

YOLO_LIBS=-L$(HOME)/darknet-cpp/lib

LDIRS := $(YOLO_LIBS) -L/usr/local/lib -L/usr/local/cuda/lib -L/usr/local/cuda/targets/aarch64-linux/lib

WPILIBS := -lntcore -lCameraServer -lcscore
CVLIBS := -lopencv_highgui -lopencv_core -lopencv_calib3d -lopencv_contrib -lntcore -lopencv_imgproc -lopencv_video -lopencv_videoio
CUDALIBS := -lcudart -lopencv_cudaimgproc -lcudnn -lcublas -lcurand
CAFFELIBS := -lglog -lgflags -lprotobuf -lboost_system -lboost_filesystem -lboost_regex -lm -lhdf5_serial_hl -lhdf5_serial
#YOLOLIB := -ldarknet_c -lstdc++ -pthread
YOLOLIB := -ldarknet-cpp-shared -pthread

LIBS := $(CVLIBS) $(CUDALIBS) $(WPILIBS) $(CAFFELIBS) $(YOLOLIB)

LDFLAGS = `pkg-config --libs opencv`
CPP_SRCS := YOLOProc.cpp

OBJS := YOLOProc.o

SRCDIR=../src

YOLO_INCLUDES=-I$(HOME)/darknet-cpp/src

INCLUDES=$(YOLO_INCLUDES) -I/usr/local/include -I/usr/local/cuda/targets/aarch64-linux/include

RPATH= -Xlinker -rpath $(YOLO_LIBS) -Xlinker -rpath -Xlinker -rpath /usr/local/lib

# Add inputs and outputs from these tool invocations to the build variables

# All Target
all: YOLOProc

# Tool invocations
YOLOProc: $(OBJS) Makefile
@echo 'Building target: $@'
g++ $(RPATH) $(FLAGS) $(LDIRS) -o "YOLOProc" $(OBJS) $(LIBS)
@echo 'Finished building target: $@'
@echo ' '

# Other Targets
clean:
-$(RM) $(OBJS) *.d YOLOProc
-@echo ' '

# Each subdirectory must supply rules for building sources it contributes
%.o: ../src/%.cpp Makefile
@echo 'Building file: $<'
@echo 'Invoking: Cross G++ Compiler'
g++ -std=c++11 -fpermissive $(INCLUDES) $(FLAGS) -O3 -Wall -c -fmessage-length=0 -o "$@" "$<"
@echo 'Finished building: $<'
@echo ' '

YOLOProc.o: ../src/YOLOProc.cpp


@@ -0,0 +1,27 @@

killall YOLOProc gazebo_test.sh

APPDIR=$HOME/YOLOProc/Jetson-TX2
DATADIR=$HOME/data/yolo


WEIGHTS=${DATADIR}/yolo-300.weights
CFG=${DATADIR}/yolo.2_balltote_300x300.cfg

WEIGHTS=${DATADIR}/tiny-yolo-balltote_2000.weights
CFG=${DATADIR}/tiny-yolo-balltote.cfg

WEIGHTS=${DATADIR}/yolo-2.weights
CFG=${DATADIR}/yolo.2_balltote.cfg


WEIGHTS=${DATADIR}/yolo-300.weights
CFG=${DATADIR}/yolo.2_balltote_300x300.cfg


DATA=${DATADIR}/balltote.data

STREAM="http://Ubuntu14.local:5002/?action=stream?dummy=param.mjpg"
OUTPUT=Ubuntu14.local

${APPDIR}/YOLOProc --thresh 0.3 --publish $OUTPUT --output "Annotated" ${DATA} ${CFG} ${WEIGHTS} ${STREAM}
@@ -1,15 +1,19 @@
CAFFE_ROOT=/usr/local/ssd-caffe
#SHELL = sh -xv
FLAGS := -DCOMPILE_GPU=1 -DGPU -DOPENCV -DCUDNN
FLAGS := -DCOMPILE_GPU=1 -DGPU -DOPENCV -DCUDNN -fPIC

#YOLO_LIBS=-L$(HOME)/AI/darknet-git
YOLO_LIBS=-L/usr/local/yolo/lib

#LDIRS := -L$(CAFFE_ROOT)/lib -L/usr/local/lib -L/usr/local/cuda/lib -L$(HOME)/wpilib/simulation/lib -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/local/yolo/lib
LDIRS := -L/usr/local/lib -L/usr/local/cuda/lib -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/local/yolo/lib -L$(HOME)/wpilib/simulation/lib
LDIRS := -L/usr/local/lib -L/usr/local/cuda/lib -L/usr/local/cuda/targets/x86_64-linux/lib -L/usr/local/yolo/lib -L$(HOME)/wpilib/simulation/lib $(YOLO_LIBS)

WPILIBS := -lntcore -lCameraServer -lcscore
CVLIBS := -lopencv_highgui -lopencv_core -lopencv_calib3d -lopencv_contrib -lntcore -lopencv_imgproc -lopencv_video -lopencv_videoio
CUDALIBS := -lcudart -lopencv_cudaimgproc -lcudnn -lcublas -lcurand
CAFFELIBS := -lglog -lgflags -lprotobuf -lboost_system -lboost_filesystem -lboost_regex -lm -lhdf5_hl -lhdf5 # -lcaffe
YOLOLIB := -ldarknet -lstdc++ -pthread
#YOLOLIB := -ldarknet_c -lstdc++ -pthread
YOLOLIB := -ldarknet -pthread

LIBS := $(CVLIBS) $(CUDALIBS) $(WPILIBS) $(CAFFELIBS) $(YOLOLIB)

@@ -20,9 +24,14 @@ OBJS := YOLOProc.o

SRCDIR=../src

INCLUDES=-I../src -I$(CAFFE_ROOT)/include -I/usr/local/include -I$(HOME)/wpilib/simulation/include -I/usr/local/cuda/targets/x86_64-linux/include -I/usr/local/yolo/include
#YOLO_INCLUDES=-I$(HOME)/AI/darknet-git/src -I$(HOME)/AI/darknet-git/include
YOLO_INCLUDES=-I/usr/local/yolo/include

#INCLUDES=-I../src -I$(CAFFE_ROOT)/include -I/usr/local/include -I$(HOME)/wpilib/simulation/include -I/usr/local/cuda/targets/x86_64-linux/include -I/usr/local/yolo/include
INCLUDES=$(YOLO_INCLUDES) -I$(CAFFE_ROOT)/include -I/usr/local/include -I$(HOME)/wpilib/simulation/include -I/usr/local/cuda/targets/x86_64-linux/include
#RPATH=-rpath $(HOME)/wpilib/simulation/lib:../build/lib
RPATH= -Xlinker -rpath $(CAFFE_ROOT)/lib -Xlinker -rpath $(HOME)/wpilib/simulation/lib -Xlinker -rpath /usr/local/yolo/lib
#RPATH= -Xlinker -rpath $(CAFFE_ROOT)/lib -Xlinker -rpath $(HOME)/wpilib/simulation/lib

# Add inputs and outputs from these tool invocations to the build variables

Binary file not shown.
@@ -8,9 +8,11 @@ cd $APPDIR
export WEIGHTS=weights/tiny-yolo-balltote_2000.weights
export CFG=cfg/tiny-yolo-2class.cfg
export DATA=cfg/balltote.data
export VIDEO="http://192.168.1.107:5002/?action=stream?dummy=param.mjpg"
#export VIDEO="http://192.168.1.107:5002/?action=stream?dummy=param.mjpg"
export VIDEO="http://Ubuntu14.local:5002/?action=stream?dummy=param.mjpg"

#export VIDEO=$HOME/data/videos/balltote3.mp4

export OUTPUT=Ubuntu14.local

Ubuntu/YOLOProc --thresh 0.4 --publish $OUTPUT --output "Annotated" --print ${DATA} ${CFG} ${WEIGHTS} ${VIDEO}
rm /tmp/target-camera/*
Ubuntu/YOLOProc --flush 3 --thresh 0.4 --publish $OUTPUT --output "Annotated" --print ${DATA} ${CFG} ${WEIGHTS} ${VIDEO}
@@ -0,0 +1,14 @@
#!/usr/bin/env bash

cd ../

WEIGHTS=weights/tiny-yolo-balltote_2000.weights

CFG=cfg/tiny-yolo-2class.cfg
DATA=cfg/balltote.data
#FILE="http://192.168.1.107:5002/?action=stream?dummy=param.mjpg"
FILE=$HOME/data/videos/balltote3.mp4

#darknet detector demo cfg/balltote.data ${CFG} ${WEIGHTS} ${FILE} -thresh 0.2 -w 320 -h 240 -fps 20 -clear
Ubuntu/YOLOProc ${DATA} ${CFG} ${WEIGHTS} ${FILE} --thresh 0.58 --print

@@ -1,6 +1,6 @@
[net]
batch=64
subdivisions=16
batch=1
subdivisions=1
width=416
height=416
channels=3
@@ -2,7 +2,6 @@

cd ../

rm -f /tmp/target-camera/*
WEIGHTS=weights/tiny-yolo-balltote_2000.weights

CFG=cfg/tiny-yolo-2class.cfg
@@ -13,6 +13,9 @@
#include <stdlib.h>
#include <ctime>

#include <chrono>
#include <thread>

#include <network.h>
#include <parser.h>
#include <option_list.h>
@@ -36,23 +39,22 @@ extern image ipl_to_image(IplImage* src);
extern void draw_box(image a, int x1, int y1, int x2, int y2, float r, float g, float b);
void draw_bbox(image a, box bbox, int w, float r, float g, float b);

//#define SIMULATION
#define IMAGE_WIDTH 320
#define IMAGE_HEIGHT 240

DEFINE_double(thresh, 0.3,
DEFINE_double(thresh, 0.4,
"Only pass detections with confidence scores higher than this [default=0.3]");
DEFINE_string(publish, "",
"NetTables publish URL [no NetTables output if not specified]");
DEFINE_string(output, "Annotated",
DEFINE_string(output, "",
"Output video Name [no image output if not specified]");
DEFINE_bool(timeit, false,
"display cycle-time and FPS if true [default=false]");
DEFINE_bool(print, false,
"display bounding box detection info if true [default=false]");
DEFINE_bool(nobuffer, false,
"disable image buffer if true [default=false]");
DEFINE_bool(nodisplay, false,
"Don't sent annotated images if set");
DEFINE_int32(flush, 0,
"flush image buffer by dummy reads if set");
DEFINE_bool(usage, false,
"print usage if set");

@@ -144,23 +146,42 @@ int main(int argc, char *argv[]) {
bool publish=publish_addrs.empty()?false:true;
std::shared_ptr<NetworkTable> table;
const std::string& output_name = FLAGS_output;
const bool display = !FLAGS_nodisplay;
const bool display = output_name.empty()?false:true;
int skip_frames= FLAGS_flush;
cs::CvSource outputStream;
int classes;

int classes;
char *name_list;
char **names;

std::string camname;
cs::CvSink cvSink;

cv::Scalar color0(0, 255, 255);
cv::Scalar color1(255, 255, 0);
cv::Scalar target_color(0, 0, 255);

if(publish){
std::cout << "Starting SSDProc nettables="<<publish_addrs<<std::endl;
std::cout << "Starting YOLOProc nettables="<<publish_addrs<<std::endl;
NetworkTable::SetClientMode();
NetworkTable::SetIPAddress(llvm::StringRef(publish_addrs));
table=NetworkTable::GetTable("datatable");
}
#ifdef SIMULATION
camname="simcam";
cs::HttpCamera simcam(camname, videoAddress);
cvSink = frc::CameraServer::GetInstance()->GetVideo(simcam);
#else
camname="Logitech";

cs::UsbCamera camera1 = frc::CameraServer::GetInstance()->StartAutomaticCapture(camname,1);
std::this_thread::sleep_for(std::chrono::milliseconds(500));

camera1.SetResolution(640, 480);

//camera1.SetFPS(10);
cvSink = frc::CameraServer::GetInstance()->GetVideo(camera1);
#endif

if(display)
outputStream = frc::CameraServer::GetInstance()->PutVideo(output_name, IMAGE_WIDTH, IMAGE_HEIGHT);

@@ -177,22 +198,16 @@ int main(int argc, char *argv[]) {
set_batch_network(&net, 1);
layer l = net.layers[net.n-1];

printf("video file: %s\n", videoAddress);
printf("video stream: %s\n", videoAddress);

// cv::VideoCapture vcap;
// vcap.set(CV_CAP_PROP_BUFFERSIZE, 1); // set frame buffer depth to 1 (doesn't work on pi-3)
// if(!vcap.open(videoAddress))
// error("Couldn't connect to video\n");
// else
// std::cout << "Video Stream captured "<<videoAddress << std::endl;

#define USE_VCAP

#ifdef USE_VCAP
cv::VideoCapture vcap;
vcap.set(CV_CAP_PROP_BUFFERSIZE, 1); // set frame buffer depth to 1 (doesn't work on pi-3)
if(!vcap.open(videoAddress))
error("Couldn't connect to video\n");
else
std::cout << "Video Stream captured "<<videoAddress << std::endl;
#else
CvCapture * cap = cvCaptureFromFile(videoAddress);
if(!cap)
error("Couldn't connect to video\n");
#endif
int num=l.w*l.h*l.n;
std::cout << "net:"<<net.n<<"x"<<net.w<<"x"<<net.h<<" l:"<<l.w<<"x"<<l.h<<"x"<<l.n<<std::endl;
boxes = (box *)calloc(num, sizeof(box));
@@ -201,38 +216,24 @@ int main(int argc, char *argv[]) {
probs[j] = (float *)calloc(l.classes, sizeof(float));
int count = 0;
double before_frame = get_wall_time();
while(1){
while(true){
++count;
cv::Mat img;
cv::Mat mat;
image im;
#define VCAP_BUFFER_HACK

#define SKIP_FRAMES 3
#ifdef USE_VCAP
#ifdef VCAP_BUFFER_HACK
for(int i=0;i<SKIP_FRAMES;i++) // discard any images that were pre-loaded or arrived during previous processing
vcap.read(img);
#endif
if(!vcap.read(img))
if (cvSink.GrabFrame(img) == 0) {
continue;
im = mat_to_image(img);
#else
IplImage* src;
#ifdef VCAP_BUFFER_HACK
for(int i=0;i<SKIP_FRAMES;i++) // discard any images that were pre-loaded or arrived during previous processing
src = cvQueryFrame(cap);
#endif
src = cvQueryFrame(cap);
if(!src){
std::cout << "Stream closed"<<std::endl;
break;
}
im = ipl_to_image(src);
rgbgr_image(im);
img = cv::cvarrToMat(src);
#endif
img.copyTo(mat);

// for(int i=0;i<skip_frames;i++) // discard any images that were pre-loaded or arrived during previous processing
// vcap.read(img);
// if(!vcap.read(img)){
// std::cout << "Waiting for image"<<videoAddress << std::endl;
// continue;
// }
im = mat_to_image(img);
img.copyTo(mat);

if(!im.data){
std::cout << "Stream closed"<<std::endl;
@@ -267,7 +268,7 @@ int main(int argc, char *argv[]) {
cv::Point ctr((int)(b.x*im.w),(int)(b.y*im.h));

if(display){
int lw=prob>0.8 ? 2:1;
int lw=prob>0.8 ? 3:2;
cv::Scalar color=class1==0?color0:color1;
cv::Point tl(left,top);
cv::Point br(right,bot);
@@ -897,13 +897,13 @@
<sensor name='Sim' type='camera'>
<visualize>1</visualize>
<always_on>1</always_on>
<update_rate>50</update_rate>
<update_rate>60</update_rate>
<pose frame=''>-0.124207 0.303145 -0.122065 1.5708 -0 3.14159</pose>
<camera name='Cam'>
<horizontal_fov>0.87</horizontal_fov>

<clip>
<near>0.1</near>