diff --git a/img_processing/CMakeLists.txt b/img_processing/CMakeLists.txt index 74cb072f..68a92d8c 100644 --- a/img_processing/CMakeLists.txt +++ b/img_processing/CMakeLists.txt @@ -17,6 +17,12 @@ if(SHOW_FRAMES) add_definitions(-DSHOW_FRAMES) endif() +# Check if LANE_DETECT flag is set +option(LANE_DETECT "Enable lane detection" ON) +if(LANE_DETECT) + add_definitions(-DLANE_DETECT) +endif() + # make the library with the src files file(GLOB SOURCES "src/*.cpp" "../logger/*.cpp" "tests/utils.cpp" "../communication/src/*.cpp" "../communication/sockets/*.cpp") add_library(ImageProcessingLib ${SOURCES}) @@ -26,7 +32,7 @@ add_library(CommunicationLib STATIC ../communication/src/communication.cpp ../co configure_file( ${CMAKE_BINARY_DIR}/config.json COPYONLY) # create test executable -add_executable(runTests tests/main.cpp tests/test_serialize.cpp tests/test_detector.cpp tests/test_dynamic_tracker.cpp tests/test_distance.cpp tests/test_manager.cpp tests/test_velocity.cpp) +add_executable(runTests tests/main.cpp tests/test_serialize.cpp tests/test_detector.cpp tests/test_dynamic_tracker.cpp tests/test_distance.cpp tests/test_manager.cpp tests/test_velocity.cpp tests/test_lane_detector.cpp) target_link_libraries(runTests ImageProcessingLib ${OpenCV_LIBS} ${GTEST_LIBRARIES} pthread CommunicationLib) #adding tests diff --git a/img_processing/data.json b/img_processing/data.json new file mode 100644 index 00000000..58b059a1 --- /dev/null +++ b/img_processing/data.json @@ -0,0 +1,12 @@ +[ + { + "path": "C:/Users/user1/Desktop/day/sun/sun_in_street.mov", + "focal_length": 1500, + "is_travel": true + }, + { + "path": "C:/Users/user1/Desktop/day/close_cars/road.mov", + "focal_length": 2000, + "is_travel": true + } +] diff --git a/img_processing/include/lane_detector.h b/img_processing/include/lane_detector.h new file mode 100644 index 00000000..b368f4a3 --- /dev/null +++ b/img_processing/include/lane_detector.h @@ -0,0 +1,105 @@ + +#ifndef __LANE_DETECTION__ +#define __LANE_DETECTION__ + +#define _USE_MATH_DEFINES +#include +#include "regression.h" +#include "log_manager.h" +#include +#include + +#define CAR_IN_IMAGE 80 + +struct LaneDrawingInfo { + int rightX1; //x bottom right + int rightX2; //x top right + int leftX1; //x bottom left + int leftX2; //x top left + int y1; //y bottom right & left + int y2; //y top right & left +}; + +class LaneDetector { + public: + void init(); + void manageLaneDetector(std::shared_ptr frame); + void drawLanesOnImage(std::shared_ptr img); + + private: + std::shared_ptr image; + bool first; + bool withoutCar; + int imgCols; + int imgRows; + LaneDrawingInfo drawingInfo; + + /** + * Returns true when the image is classified as daytime. + * Note: this is based on the mean pixel value of an image and might not + * always lead to accurate results. + */ + bool isDayTime(); + + /** + * Filter source image so that only the white and yellow pixels remain. + * A gray filter will also be added if the source image is classified as taken during the night. + * One assumption for lane detection here is that lanes are either white or yellow. + * @param isDayTime true if image is taken during the day, false if at night + * @return Mat filtered image + */ + cv::Mat filterColors(bool isDayTime); + + /** + * Apply grayscale transform on image. + * @return grayscale image + */ + cv::Mat applyGrayscale(cv::Mat source); + + /** + * Apply Gaussian blur to image. + * @return blurred image + */ + cv::Mat applyGaussianBlur(cv::Mat source); + + /** + * Detect edges of image by applying canny edge detection. + * @return image with detected edges + */ + cv::Mat applyCanny(cv::Mat source); + + /** + * Apply an image mask. + * Only keep the part of the image defined by the + * polygon formed from four points. The rest of the image is set to black. + * @return Mat image with mask + */ + cv::Mat RegionOfInterest(cv::Mat source); + + /** + * Returns a vector with the detected hough lines. + * @param canny image resulted from a canny transform + * @param source image on which hough lines are drawn + * @param drawHough draw detected lines on source image if true. + * It will also show the image with de lines drawn on it, which is why + * it is not recommended to pass in true when working with a video. + * @return vector contains hough lines. + */ + std::vector houghLines(cv::Mat canny, cv::Mat source, + bool drawHough); + + /** + * Creates mask and blends it with source image so that the lanes + * are drawn on the source image. + * @param lines vector < vec4i > holding the lines + * @return Mat image with lines drawn on it + */ + bool drawLanes(std::vector lines); + + /** + * Drawing the lane on the road only + */ + void cutCar(); +}; + +#endif /*__LANE_DETECTION__*/ diff --git a/img_processing/include/log_manager.h b/img_processing/include/log_manager.h index 9c464bb4..4c5a4cdc 100644 --- a/img_processing/include/log_manager.h +++ b/img_processing/include/log_manager.h @@ -12,7 +12,8 @@ enum class ErrorType { FILE_ERROR, DETECTION_ERROR, TRACKING_ERROR, - MODEL_ERROR + MODEL_ERROR, + SIZE_ERROR }; enum class InfoType { @@ -29,7 +30,10 @@ enum class InfoType { MODE }; -enum class DebugType { PRINT }; +enum class DebugType { + PRINT, + DRAW_PREV +}; class LogManager { public: diff --git a/img_processing/include/manager.h b/img_processing/include/manager.h index 44a93e66..b0b50889 100644 --- a/img_processing/include/manager.h +++ b/img_processing/include/manager.h @@ -5,10 +5,12 @@ #include "alerter.h" #include "detector.h" #include "distance.h" +#include "lane_detector.h" #include "dynamic_tracker.h" #include "log_manager.h" #include "velocity.h" #include "communication.h" + class Manager { public: static logger imgLogger; @@ -30,6 +32,8 @@ class Manager { Velocity velocity; DynamicTracker dynamicTracker; Alerter alerter; + LaneDetector laneDetector; + int longTime; int iterationCnt; uint32_t destID; uint32_t processID; @@ -42,7 +46,7 @@ class Manager { bool isResetTracker(bool isTravel); bool isTrack(bool isTravel); void sendAlerts(std::vector> &alerts); - void runOnVideo(std::string videoPath); + void runOnVideo(std::string videoPath, bool isTravel); int readIdFromJson(const char *target); }; #endif //__MANAGER_H__ diff --git a/img_processing/include/regression.h b/img_processing/include/regression.h new file mode 100644 index 00000000..7421d26e --- /dev/null +++ b/img_processing/include/regression.h @@ -0,0 +1,63 @@ +#ifndef __REGRESSION__ +#define __REGRESSION__ + +#include +#include +#include + +/** + * @brief Multiplies two vectors and then calculates the sum of the multiplied values. + * vector A and B must be the same size and their values must be of the same type. + * + * @param A vector. + * @param B vector. + * @return X sum of the multiplied values. + */ +template +X multiplyAndSum(std::vector A, std::vector B) +{ + X sum; + std::vector temp; + for (int i = 0; i < A.size(); i++) { + temp.push_back(A[i] * B[i]); + } + sum = std::accumulate(temp.begin(), temp.end(), 0.0); + + return sum; +} + +/** + * @brief Calculates the coefficients (slope and intercept) of the best fitting line + * given independent and dependent values. Vector A and B must be the same size + * and their values must be of the same type. + * + * @param A vector (independent values). + * @param B vector (dependent values). + * @return vector where first element is the slope (b1), second element is intercept (b0). + */ +template +std::vector estimateCoefficients(std::vector A, std::vector B) +{ + // Sample size + int N = A.size(); + + // Calculate mean of X and Y + X meanA = std::accumulate(A.begin(), A.end(), 0.0) / A.size(); + X meanB = std::accumulate(B.begin(), B.end(), 0.0) / B.size(); + + // Calculating cross-deviation and deviation about x + X SSxy = multiplyAndSum(A, B) - (N * meanA * meanB); + X SSxx = multiplyAndSum(A, A) - (N * meanA * meanA); + + // Calculating regression coefficients + X slopeB1 = SSxy / SSxx; + X interceptB0 = meanB - (slopeB1 * meanA); + + // Return vector, insert slope first and then intercept + std::vector coef; + coef.push_back(slopeB1); + coef.push_back(interceptB0); + return coef; +} + +#endif /*__REGRESSION__*/ \ No newline at end of file diff --git a/img_processing/src/lane_detector.cpp b/img_processing/src/lane_detector.cpp new file mode 100644 index 00000000..4a19bba2 --- /dev/null +++ b/img_processing/src/lane_detector.cpp @@ -0,0 +1,500 @@ +#include "lane_detector.h" + +using namespace std; +using namespace cv; + +void LaneDetector::init() +{ + this->first = true; + this->image = std::make_shared(); + this->withoutCar = true; +} + +void LaneDetector::manageLaneDetector(std::shared_ptr frame) +{ + if (frame->empty()) { + LogManager::logErrorMessage(ErrorType::IMAGE_ERROR, "No image"); + throw runtime_error("No image!"); + } + + this->image = frame; + this->imgCols = image->cols; + this->imgRows = image->rows; + + // Determine if video is taken during daytime or not + bool isDay = isDayTime(); + + // Filter image + Mat filteredIMG = filterColors(isDay); + + // Apply grayscale + Mat gray = applyGrayscale(filteredIMG); + + // Apply Gaussian blur + Mat gBlur = applyGaussianBlur(gray); + + // Find edges + Mat edges = applyCanny(gBlur); + + // Create mask (Region of Interest) + Mat maskedIMG = RegionOfInterest(edges); + + // Detect straight lines and draw the lanes if possible + std::vector linesP = houghLines(maskedIMG, *image, false); + + // lanes = drawLanes(linesP, check); + bool isdraw = drawLanes(linesP); + + // No path was found and there is no previous path, default drawing + if (!isdraw && first) { + this->drawingInfo.rightX1 = 1207; + this->drawingInfo.rightX2 = 754; + this->drawingInfo.leftX1 = 81; + this->drawingInfo.leftX2 = 505; + this->drawingInfo.y1 = imgRows; + this->drawingInfo.y2 = imgRows * (1 - 0.4); + if (this->withoutCar) { + cutCar(); + } + } + + this->first = false; +} +bool LaneDetector::isDayTime() +{ + /* In general, daytime images/videos require different + color filters than nighttime images/videos. For example, in darker light it is + better to add a gray color filter in addition to the white and yellow one */ + + Scalar s = mean(*image); // Mean pixel values + + /* Cut off values by looking at the mean pixel values of + multiple daytime and nighttime images */ + if (s[0] < 30 || s[1] < 33 && s[2] < 30) { + return false; + } + + return true; +} + +Mat LaneDetector::filterColors(bool isDayTime) +{ + Mat hsv, whiteMask, whiteImage, yellowMask, yellowImage, whiteYellow; + + // White mask + std::vector lowerWhite = {130, 130, 130}; + std::vector upperWhite = {255, 255, 255}; + inRange(*image, lowerWhite, upperWhite, whiteMask); + bitwise_and(*image, *image, whiteImage, whiteMask); + + // Yellow mask + cvtColor(*image, hsv, COLOR_BGR2HSV); + std::vector lowerYellow = {20, 100, 110}; + std::vector upperYellow = {30, 180, 240}; + inRange(hsv, lowerYellow, upperYellow, yellowMask); + bitwise_and(*image, *image, yellowImage, yellowMask); + + // Blend yellow and white together + addWeighted(whiteImage, 1., yellowImage, 1., 0., whiteYellow); + + // Add gray filter if image is not taken during the day + if (!isDayTime) { + // Gray mask + Mat grayMask, grayImage, grayAndWhite, dst; + std::vector lowerGray = {80, 80, 80}; + std::vector upperGray = {130, 130, 130}; + inRange(*image, lowerGray, upperGray, grayMask); + bitwise_and(*image, *image, grayImage, grayMask); + + // Blend gray, yellow and white together and return the result + addWeighted(grayImage, 1., whiteYellow, 1., 0., dst); + return dst; + } + + // Return white and yellow mask if image is taken during the day + return whiteYellow; +} + +Mat LaneDetector::applyGrayscale(Mat source) +{ + Mat dst; + cvtColor(source, dst, COLOR_BGR2GRAY); + + return dst; +} + +Mat LaneDetector::applyGaussianBlur(Mat source) +{ + Mat dst; + GaussianBlur(source, dst, Size(3, 3), 0); + + return dst; +} + +Mat LaneDetector::applyCanny(Mat source) +{ + Mat dst; + + if (CAR_IN_IMAGE >= source.rows) { + LogManager::logErrorMessage(ErrorType::SIZE_ERROR, + "car's size is bigger then image"); + throw runtime_error("car's size is bigger then image"); + } + + Mat roi = source(Rect(0, 0, source.cols, source.rows - CAR_IN_IMAGE)); + + Mat extendedImage(CAR_IN_IMAGE, roi.cols, roi.type(), Scalar(0, 0, 0)); + + vconcat(roi, extendedImage, roi); + + Canny(roi, dst, 50, 150); + + return dst; +} + +Mat LaneDetector::RegionOfInterest(Mat source) +{ + /* In an ideal situation, the ROI should only contain the road lanes. + We want to filter out all the other stuff, including things like arrow road + markings. We try to achieve that by creating two trapezoid masks: one big + trapezoid and a smaller one. The smaller one goes inside the bigger one. The + pixels in the space between them will be kept and all the other pixels will be + masked. If it goes well, the space between the two trapezoids contains only + the lanes. */ + + // Parameters big trapezoid + + // Width of bottom edge of trapezoid, expressed as percentage of image width + float trapezoidBottomWidth = 1.0; + // Above comment also applies here, but then for the top edge of trapezoid + float trapezoidTopWidth = 0.07; + // Height of the trapezoid expressed as percentage of image height + float trapezoidHeight = 0.5; + + // Parameters small trapezoid + + // will be added to trapezoidBottomWidth to create a less wide bottom edge + float smallBottomWidth = 0.45; + // Multiply the percentage trapoezoidTopWidth to create a less wide top edge + float smallTopWidth = 0.3; + // Height of the small trapezoid expressed as percentage of height of big trapezoid + float smallHeight = 1.0; + // Make the trapezoids float just above the bottom edge of the image + float bar = 0.97; + + // Vector which holds all the points of the two trapezoids + std::vector pts; + + // Large trapezoid + pts.push_back(cv::Point((source.cols * (1 - trapezoidBottomWidth)) / 2, + source.rows * bar)); // Bottom left + pts.push_back( + cv::Point((source.cols * (1 - trapezoidTopWidth)) / 2, + source.rows - source.rows * trapezoidHeight)); // Top left + pts.push_back( + cv::Point(source.cols - (source.cols * (1 - trapezoidTopWidth)) / 2, + source.rows - source.rows * trapezoidHeight)); // Top right + pts.push_back( + cv::Point(source.cols - (source.cols * (1 - trapezoidBottomWidth)) / 2, + source.rows * bar)); // Bottom right + + // Small trapezoid + pts.push_back(cv::Point( + (source.cols * (1 - trapezoidBottomWidth + smallBottomWidth)) / 2, + source.rows * bar)); // Bottom left + pts.push_back( + cv::Point((source.cols * (1 - trapezoidTopWidth * smallTopWidth)) / 2, + source.rows - source.rows * trapezoidHeight * + smallHeight)); // Top left + pts.push_back(cv::Point( + source.cols - + (source.cols * (1 - trapezoidTopWidth * smallTopWidth)) / 2, + source.rows - + source.rows * trapezoidHeight * smallHeight)); // Top right + pts.push_back(cv::Point( + source.cols - + (source.cols * (1 - trapezoidBottomWidth + smallBottomWidth)) / 2, + source.rows * bar)); // Bottom right + + // Create the mask + Mat mask = Mat::zeros(source.size(), source.type()); + fillPoly(mask, pts, Scalar(255, 255, 255)); + + /* Put the mask over the source image, + return an all black image, except for the part where the mask image + has nonzero pixels: all the pixels in the space between the two trapezoids */ + Mat maskedImage; + bitwise_and(source, mask, maskedImage); + + return maskedImage; +} + +std::vector LaneDetector::houghLines(Mat canny, Mat source, + bool drawHough) +{ + // Distance resolution in pixels of the Hough grid + double rho = 2; + // Angular resolution in radians of the Hough grid + double theta = 1 * M_PI / 180; + // Minimum number of votes (intersections in Hough grid cell) + int thresh = 15; + // Minimum number of pixels making up a line + double minLineLength = 10; + // Maximum gap in pixels between connectable line segments + double maxGapLength = 20; + + std::vector linesP; // Will hold the results of the detection + HoughLinesP(canny, linesP, rho, theta, thresh, minLineLength, maxGapLength); + if (drawHough) { + for (size_t i = 0; i < linesP.size(); i++) { + Vec4i l = linesP[i]; + line(source, Point(l[0], l[1]), Point(l[2], l[3]), + Scalar(0, 0, 255), 3, LINE_AA); + } + imshow("Hough Lines", source); + waitKey(5); + } + + return linesP; +} + +bool LaneDetector::drawLanes(std::vector lines) +{ + if (lines.size() == 0) { + // There are no lines, use the previous lines + return false; + } + + // variables for current paths- Set drawing lanes to true + bool drawRightLane = true; + bool drawLeftLane = true; + + // Find lines with a slope higher than the slope threshold + float slopeThreshold = 0.5; + std::vector slopes; + std::vector goodLines; + + for (int i = 0; i < lines.size(); i++) { + /* Each line is represented by a 4-element vector (x_1, y_1, x_2, y_2), + where (x_1,y_1) is the line's starting point and (x_2, y_2) is the ending + point */ + Vec4i l = lines[i]; + + double slope; + // Calculate the slope + if (l[2] - l[0] == 0) { // Avoid division by zero + slope = 999; // Infinite slope + } + else { + slope = (l[3] - l[1]) / (l[2] / l[0]); + } + + if (abs(slope) > slopeThreshold) { + slopes.push_back(slope); + goodLines.push_back(l); + } + } + + /* Split the good lines into two categories: right and left + The right lines have a positive slope and the left lines have a negative slope */ + + // Outlines' lanes. + std::vector rightLines; + std::vector leftLines; + int imgCenter = imgCols / 2; + + for (int i = 0; i < slopes.size(); i++) { + if (slopes[i] > 0 && goodLines[i][0] > imgCenter && + goodLines[i][2] > imgCenter) { + rightLines.push_back(goodLines[i]); + } + if (slopes[i] < 0 && goodLines[i][0] < imgCenter && + goodLines[i][2] < imgCenter) { + leftLines.push_back(goodLines[i]); + } + } + + /* form two lane lines out of all the lines we've detected. + A line is defined as 2 points: a starting point and an ending point. + Our goal at this step is to use linear regression to find the two + best fitting lines */ + + // Define the vervs here. need them in the condition. + int y1; + int y2; + int rightX1; + int rightX2; + int leftX1; + int leftX2; + + // right and left lanes + std::vector rightLinesX, rightLinesY, leftLinesX, leftLinesY; + double rightB1, rightB0, leftB1, leftB0; // Slope and intercept + + if (rightLines.size() != 0 && leftLines.size() != 0) { + // We start with the right side points + for (int i = 0; i < rightLines.size(); i++) { + // X of starting point of line + rightLinesX.push_back(rightLines[i][0]); + // X of ending point of line + rightLinesX.push_back(rightLines[i][2]); + // Y of starting point of line + rightLinesY.push_back(rightLines[i][1]); + // Y of ending point of line + rightLinesY.push_back(rightLines[i][3]); + } + + if (rightLinesX.size() > 0) { + std::vector coefRight = estimateCoefficients( + rightLinesX, rightLinesY); // y = b1x + b0 + rightB1 = coefRight[0]; + rightB0 = coefRight[1]; + } + else { + LogManager::logDebugMessage(DebugType::DRAW_PREV, + "drawRightLane false"); + return false; + } + + for (int i = 0; i < leftLines.size(); i++) { + // X of starting point of line + leftLinesX.push_back(leftLines[i][0]); + // X of ending point of line + leftLinesX.push_back(leftLines[i][2]); + // Y of starting point of line + leftLinesY.push_back(leftLines[i][1]); + // Y of ending point of line + leftLinesY.push_back(leftLines[i][3]); + } + if (leftLinesX.size() > 0) { + std::vector coefLeft = + estimateCoefficients(leftLinesX, leftLinesY); + leftB1 = coefLeft[0]; + leftB0 = coefLeft[1]; + } + else { + LogManager::logDebugMessage(DebugType::DRAW_PREV, + "drawLeftLane false"); + return false; + } + + /* Now we need to find the two points for the right and left lane: + starting points and ending points */ + + // Y coordinate of starting point of both the left and right lane + y1 = imgRows; + + /* 0.5 = trapezoidHeight (see RegionOfInterest), we set the y coordinate of + the ending point below the trapezoid height (0.4) to draw shorter lanes. I + think that looks nicer. */ + + // Y coordinate of ending point of both the left and right lane + y2 = imgRows * (1 - 0.4); + + // X coordinate of starting point of right lane + rightX1 = (y1 - rightB0) / rightB1; + // X coordinate of ending point of right lane + rightX2 = (y2 - rightB0) / rightB1; + // X coordinate of starting point of left lane + leftX1 = (y1 - leftB0) / leftB1; + // X coordinate of ending point of left lane + leftX2 = (y2 - leftB0) / leftB1; + + /* If the end point of the paths exceed the boundaries of + the image excessively, return prev drawings. */ + if (!first && (rightX2 < leftX2 || leftX2 > rightX2 || rightX1 < 0 || + rightX2 > imgCols || leftX1 > imgCols || leftX2 < 0 || + rightX1 > imgRows + 350 || leftX1 < -350 || + rightX1 - leftX1 < 300)) { + LogManager::logDebugMessage( + DebugType::DRAW_PREV, + "points outside the boundaries of the image"); + return false; + } + + else { + LogManager::logDebugMessage(DebugType::PRINT, "current path"); + } + + double angleThreshold = 45.0; // 45 degree angle + if (drawRightLane && drawLeftLane) { + double angle = + atan(abs((leftB1 - rightB1) / (1 + leftB1 * rightB1))) * + (180.0 / CV_PI); + + if (angle > angleThreshold && !first) { + LogManager::logDebugMessage(DebugType::DRAW_PREV, + "angle > angleThreshold"); + return false; + } + } + } + + else { + LogManager::logDebugMessage(DebugType::DRAW_PREV, "missLine"); + return false; + } + + // Draw paths + Mat mask = Mat::zeros(image->size(), image->type()); + + drawingInfo.rightX1 = rightX1; + drawingInfo.rightX2 = rightX2; + drawingInfo.leftX1 = leftX1; + drawingInfo.leftX2 = leftX2; + drawingInfo.y1 = y1; + drawingInfo.y2 = y2; + + if (this->withoutCar) { + cutCar(); + } + cout << "cuurent" << endl; + LogManager::logDebugMessage(DebugType::PRINT, "Using current path"); + + return true; +} + +void LaneDetector::drawLanesOnImage(std::shared_ptr img) +{ + // Draw a transverse line 120 pixels above the bottom of the image + cv::line(*img, cv::Point(0, img->rows - CAR_IN_IMAGE), + cv::Point(img->cols, img->rows - CAR_IN_IMAGE), + cv::Scalar(255, 255, 255), 4); + + // Draw the lanes on the image using the updated values + cv::line(*img, cv::Point(drawingInfo.rightX1, drawingInfo.y1), + cv::Point(drawingInfo.rightX2, drawingInfo.y2), + cv::Scalar(0, 255, 0), 7); + cv::line(*img, cv::Point(drawingInfo.leftX1, drawingInfo.y1), + cv::Point(drawingInfo.leftX2, drawingInfo.y2), + cv::Scalar(255, 0, 0), 7); + + // Fill the area between the lanes + Point prevPts[4] = {Point(drawingInfo.leftX1, drawingInfo.y1), + Point(drawingInfo.leftX2, drawingInfo.y2), + Point(drawingInfo.rightX2, drawingInfo.y2), + Point(drawingInfo.rightX1, drawingInfo.y1)}; + Mat mask = Mat::zeros(img->size(), img->type()); + fillConvexPoly(mask, prevPts, 4, Scalar(235, 229, 52)); + + // Blend the mask and image together + addWeighted(*img, 0.9, mask, 0.3, 0.0, *img); +} + +void LaneDetector::cutCar() +{ + float rightSlope = static_cast(drawingInfo.y2 - drawingInfo.y1) / + (drawingInfo.rightX2 - drawingInfo.rightX1); + float leftSlope = static_cast(drawingInfo.y2 - drawingInfo.y1) / + (drawingInfo.leftX2 - drawingInfo.leftX1); + + // Acoording to straight formula: y = slope*x + d. + float dRight = drawingInfo.y1 - rightSlope * drawingInfo.rightX1; + float dLeft = drawingInfo.y1 - leftSlope * drawingInfo.leftX1; + drawingInfo.y1 -= CAR_IN_IMAGE; + + // Finding X by placing Y in the equation of the straight line + drawingInfo.rightX1 = + static_cast((drawingInfo.y1 - dRight) / rightSlope); + drawingInfo.leftX1 = static_cast((drawingInfo.y1 - dLeft) / leftSlope); +} diff --git a/img_processing/src/log_manager.cpp b/img_processing/src/log_manager.cpp index 918411af..1aeedb37 100644 --- a/img_processing/src/log_manager.cpp +++ b/img_processing/src/log_manager.cpp @@ -17,6 +17,7 @@ string LogManager::enumTypeToString(T enumType) {ErrorType::DETECTION_ERROR, "DETECTION_ERROR"}, {ErrorType::TRACKING_ERROR, "TRACKING_ERROR"}, {ErrorType::MODEL_ERROR, "MODEL_ERROR"}, + {ErrorType::SIZE_ERROR, "SIZE_ERROR"}, {InfoType::MEDIA_FINISH, "MEDIA_FINISH"}, {InfoType::ALERT_SENT, "ALERT_SENT"}, {InfoType::CHANGED, "CHANGED"}, @@ -28,6 +29,7 @@ string LogManager::enumTypeToString(T enumType) {InfoType::TRACKING, "TRACKING"}, {InfoType::DISTANCE, "DISTANCE"}, {InfoType::MODE, "MODE"}, + {DebugType::DRAW_PREV, "DRAW_PREV"}, {DebugType::PRINT, "PRINT"}}; auto it = enumTypeToStringMap.find(enumType); if (it != enumTypeToStringMap.end()) { diff --git a/img_processing/src/manager.cpp b/img_processing/src/manager.cpp index 918b6e3b..fa25fbb0 100644 --- a/img_processing/src/manager.cpp +++ b/img_processing/src/manager.cpp @@ -1,9 +1,13 @@ +#include +#include +#include "nlohmann/json.hpp" #include "manager.h" #include "alert.h" #define ESC 27 #define NUM_OF_TRACKING 10 +using json=nlohmann::json; using namespace std; using namespace cv; @@ -38,44 +42,56 @@ void Manager::init() detector.init(isCuda); dynamicTracker.init(); velocity.init(0.04); + laneDetector.init(); + longTime = 0; } void Manager::mainDemo() { - string filePath = "../data.txt"; - // open the file - ifstream file(filePath); + string path; + int focalLength; + bool isTravel; + // Open the JSON file + std::ifstream file("../data.json"); + if (!file.is_open()) { - LogManager::logErrorMessage(ErrorType::FILE_ERROR); + LogManager::logErrorMessage(ErrorType::FILE_ERROR, + "Failed to open the file"); + throw runtime_error("Failed to open the file"); return; } - string line; - // run over the file and read the lines - while (getline(file, line)) { - // intialize the iteration cnt - iterationCnt = 1; - istringstream iss(line); - string videoPath; - double focalLength; - // read the parameters - if (getline(iss, videoPath, '|') && iss >> focalLength) { - // Trim leading and trailing whitespaces from videoPath - videoPath.erase(0, videoPath.find_first_not_of(" \t\n\r\f\v")); - videoPath.erase(videoPath.find_last_not_of(" \t\n\r\f\v") + 1); - } - else { - LogManager::logErrorMessage(ErrorType::VIDEO_ERROR); - return; + + // Read the content of the file into a JSON object + json jsonData; + file >> jsonData; + + // Check if the JSON data is an array + if (jsonData.is_array()) { + // Iterate over each object in the array + for (const auto &obj : jsonData) { + iterationCnt = 1; + if (obj.find("path") != obj.end() && obj["path"].is_string()) { + path = obj["path"]; + } + + if (obj.find("focal_length") != obj.end() && + obj["focal_length"].is_number_integer()) { + focalLength = obj["focal_length"]; + } + + if (obj.find("is_travel") != obj.end() && + obj["is_travel"].is_boolean()) { + isTravel = obj["is_travel"]; + } + // Get the distance instance and set the focal length + Distance &distance = Distance::getInstance(); + distance.setFocalLength(focalLength); + runOnVideo(path, isTravel); } - // intialize focal length - Distance &distance = Distance::getInstance(); - distance.setFocalLength(focalLength); - runOnVideo(videoPath); } - cout << "finish reading data"; } -void Manager::runOnVideo(string videoPath) +void Manager::runOnVideo(string videoPath, bool isTravel) { // Convert Windows file path to WSL file path format if (videoPath.length() >= 3 && videoPath[1] == ':') { @@ -93,13 +109,14 @@ void Manager::runOnVideo(string videoPath) throw runtime_error("video not found"); return; } + //run on video while (1) { capture >> frame; if (frame.empty()) { LogManager::logInfoMessage(InfoType::MEDIA_FINISH); break; } - int result = processing(frame, true); + int result = processing(frame, isTravel); if (result == -1) return; } @@ -159,17 +176,19 @@ int Manager::processing(const Mat &newFrame, bool isTravel) iterationCnt = iterationCnt % NUM_OF_TRACKING + 1; } - // visual +#ifdef LANE_DETECT + laneDetector.manageLaneDetector(this->currentFrame); +#endif +// visual +#ifdef SHOW_FRAMES drawOutput(); - #ifdef SHOW_FRAMES - drawOutput(); - imshow("aaa", *currentFrame); - int key = cv::waitKey(1); - if (key == ESC) { - return -1; - } - return 1; - #endif + imshow("currentFrame", *currentFrame); + int key = cv::waitKey(1); + if (key == ESC) { + return -1; + } + return 1; +#endif } void Manager::drawOutput() @@ -239,6 +258,10 @@ void Manager::drawOutput() Point(legendX + 10, legendY + 55), Scalar(255, 0, 0), FILLED); putText(*currentFrame, "velocity", Point(legendX + 15, legendY + 50), FONT_HERSHEY_SIMPLEX, 0.6, Scalar(255, 255, 255), 1); + +#ifdef LANE_DETECT + laneDetector.drawLanesOnImage(currentFrame); +#endif } void Manager::sendAlerts(vector> &alerts) diff --git a/img_processing/tests/test_lane_detector.cpp b/img_processing/tests/test_lane_detector.cpp new file mode 100644 index 00000000..ac37a724 --- /dev/null +++ b/img_processing/tests/test_lane_detector.cpp @@ -0,0 +1,42 @@ +#include +#include +#include +#include "lane_detector.h" +#include "log_manager.h" + +using namespace std; +using namespace cv; + +TEST(TLaneDetector, CheckRun) +{ + cout << "start" << endl; + string path = "/home/tirtza/train6/171.mov"; + VideoCapture cap(path); + + if (!cap.isOpened()) { + + LogManager::logErrorMessage(ErrorType::VIDEO_ERROR, "Could not load video"); + + throw runtime_error("Could not load video"); + } + + LaneDetector laneDetector; + + laneDetector.init(); + + while(true){ + + Mat frame; + cap >> frame; + + auto sharedFrame = make_shared(frame); + + laneDetector.manageLaneDetector(sharedFrame); + laneDetector.drawLanesOnImage(sharedFrame); + + imshow("lane-detection",*sharedFrame); + if (waitKey() == 27) break; + } + + cap.release(); +} \ No newline at end of file