From 0e0d1be8d093f518b7bdf23222fa36a2f3d92910 Mon Sep 17 00:00:00 2001 From: Freek van Tienen Date: Wed, 11 Mar 2015 14:40:19 +0100 Subject: [PATCH 01/17] [vision] Opticflow cleanup --- conf/modules/cv_opticflow.xml | 57 ++-- .../modules/computer_vision/cv/framerate.c | 76 ----- .../modules/computer_vision/cv/framerate.h | 28 -- .../cv/opticflow/fast9/LICENSE | 30 -- .../{fast9/fastRosten.c => fast_rosten.c} | 2 +- .../{fast9/fastRosten.h => fast_rosten.h} | 0 .../{optic_flow_int.c => lucas_kanade.c} | 5 +- .../{optic_flow_int.h => lucas_kanade.h} | 2 +- sw/airborne/modules/computer_vision/cv/trig.c | 159 ---------- sw/airborne/modules/computer_vision/cv/trig.h | 7 - .../modules/computer_vision/lib/v4l/v4l2.c | 3 + .../modules/computer_vision/lib/v4l/v4l2.h | 9 +- .../opticflow/hover_stabilization.c | 213 ------------- .../opticflow/hover_stabilization.h | 60 ---- .../opticflow/inter_thread_data.h | 11 +- .../opticflow/opticflow_calculator.c | 258 ++++++++++++++++ .../opticflow/opticflow_calculator.h | 57 ++++ .../opticflow/opticflow_thread.c | 154 ---------- .../opticflow/opticflow_thread.h | 33 -- .../opticflow/stabilization_opticflow.c | 172 +++++++++++ .../opticflow/stabilization_opticflow.h | 70 +++++ .../opticflow/visual_estimator.c | 283 ------------------ .../opticflow/visual_estimator.h | 41 --- .../computer_vision/opticflow_module.c | 204 +++++++------ .../computer_vision/opticflow_module.h | 4 +- 25 files changed, 722 insertions(+), 1216 deletions(-) delete mode 100644 sw/airborne/modules/computer_vision/cv/framerate.c delete mode 100644 sw/airborne/modules/computer_vision/cv/framerate.h delete mode 100644 sw/airborne/modules/computer_vision/cv/opticflow/fast9/LICENSE rename sw/airborne/modules/computer_vision/cv/opticflow/{fast9/fastRosten.c => fast_rosten.c} (99%) rename sw/airborne/modules/computer_vision/cv/opticflow/{fast9/fastRosten.h => fast_rosten.h} (100%) rename sw/airborne/modules/computer_vision/cv/opticflow/{optic_flow_int.c => lucas_kanade.c} (99%) rename sw/airborne/modules/computer_vision/cv/opticflow/{optic_flow_int.h => lucas_kanade.h} (96%) delete mode 100644 sw/airborne/modules/computer_vision/cv/trig.c delete mode 100644 sw/airborne/modules/computer_vision/cv/trig.h delete mode 100644 sw/airborne/modules/computer_vision/opticflow/hover_stabilization.c delete mode 100644 sw/airborne/modules/computer_vision/opticflow/hover_stabilization.h create mode 100644 sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c create mode 100644 sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h delete mode 100644 sw/airborne/modules/computer_vision/opticflow/opticflow_thread.c delete mode 100644 sw/airborne/modules/computer_vision/opticflow/opticflow_thread.h create mode 100644 sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c create mode 100644 sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.h delete mode 100644 sw/airborne/modules/computer_vision/opticflow/visual_estimator.c delete mode 100644 sw/airborne/modules/computer_vision/opticflow/visual_estimator.h diff --git a/conf/modules/cv_opticflow.xml b/conf/modules/cv_opticflow.xml index 8ca5073172f..71bbf51f43a 100644 --- a/conf/modules/cv_opticflow.xml +++ b/conf/modules/cv_opticflow.xml @@ -13,7 +13,6 @@
- @@ -21,20 +20,19 @@
- - - - - - - - - - - + + + + + + + + + +
@@ -42,31 +40,30 @@
- + - + + + + + - - - - - - - - - - - - - - + + + + + + + + - - + + diff --git a/sw/airborne/modules/computer_vision/cv/framerate.c b/sw/airborne/modules/computer_vision/cv/framerate.c deleted file mode 100644 index 9ec262d0bcc..00000000000 --- a/sw/airborne/modules/computer_vision/cv/framerate.c +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright (C) 2015 The Paparazzi Community - * - * This file is part of Paparazzi. - * - * Paparazzi is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * Paparazzi is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Paparazzi; see the file COPYING. If not, see - * . - */ - -/** - * @file modules/computer_vision/cv/framerate.c - * - */ - -#include "std.h" -#include "framerate.h" - -// Frame Rate (FPS) -#include - -// local variables -volatile long timestamp; -struct timeval start_time; -struct timeval end_time; - -#define USEC_PER_SEC 1000000L - -static long time_elapsed(struct timeval *t1, struct timeval *t2) -{ - long sec, usec; - sec = t2->tv_sec - t1->tv_sec; - usec = t2->tv_usec - t1->tv_usec; - if (usec < 0) { - --sec; - usec = usec + USEC_PER_SEC; - } - return sec * USEC_PER_SEC + usec; -} - -static void start_timer(void) -{ - gettimeofday(&start_time, NULL); -} - -static long end_timer(void) -{ - gettimeofday(&end_time, NULL); - return time_elapsed(&start_time, &end_time); -} - - -void framerate_init(void) { - // Frame Rate Initialization - timestamp = 0; - start_timer(); -} - -float framerate_run(void) { - // FPS - timestamp = end_timer(); - float framerate_FPS = (float) 1000000 / (float)timestamp; - // printf("dt = %d, FPS = %f\n",timestamp, FPS); - start_timer(); - return framerate_FPS; -} diff --git a/sw/airborne/modules/computer_vision/cv/framerate.h b/sw/airborne/modules/computer_vision/cv/framerate.h deleted file mode 100644 index 9044881ff05..00000000000 --- a/sw/airborne/modules/computer_vision/cv/framerate.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (C) 2015 The Paparazzi Community - * - * This file is part of Paparazzi. - * - * Paparazzi is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * Paparazzi is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Paparazzi; see the file COPYING. If not, see - * . - */ - -/** - * @file modules/computer_vision/cv/framerate.h - * - */ - - -void framerate_init(void); -float framerate_run(void); diff --git a/sw/airborne/modules/computer_vision/cv/opticflow/fast9/LICENSE b/sw/airborne/modules/computer_vision/cv/opticflow/fast9/LICENSE deleted file mode 100644 index 63a85126e57..00000000000 --- a/sw/airborne/modules/computer_vision/cv/opticflow/fast9/LICENSE +++ /dev/null @@ -1,30 +0,0 @@ -Copyright(c) 2006, 2008 Edward Rosten -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - -*Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - -*Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and / or other materials provided with the distribution. - -*Neither the name of the University of Cambridge nor the names of -its contributors may be used to endorse or promote products derived -from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/sw/airborne/modules/computer_vision/cv/opticflow/fast9/fastRosten.c b/sw/airborne/modules/computer_vision/cv/opticflow/fast_rosten.c similarity index 99% rename from sw/airborne/modules/computer_vision/cv/opticflow/fast9/fastRosten.c rename to sw/airborne/modules/computer_vision/cv/opticflow/fast_rosten.c index a8d856f9b37..d5f03a94acf 100644 --- a/sw/airborne/modules/computer_vision/cv/opticflow/fast9/fastRosten.c +++ b/sw/airborne/modules/computer_vision/cv/opticflow/fast_rosten.c @@ -32,7 +32,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include -#include "fastRosten.h" +#include "fast_rosten.h" #define Compare(X, Y) ((X)>=(Y)) diff --git a/sw/airborne/modules/computer_vision/cv/opticflow/fast9/fastRosten.h b/sw/airborne/modules/computer_vision/cv/opticflow/fast_rosten.h similarity index 100% rename from sw/airborne/modules/computer_vision/cv/opticflow/fast9/fastRosten.h rename to sw/airborne/modules/computer_vision/cv/opticflow/fast_rosten.h diff --git a/sw/airborne/modules/computer_vision/cv/opticflow/optic_flow_int.c b/sw/airborne/modules/computer_vision/cv/opticflow/lucas_kanade.c similarity index 99% rename from sw/airborne/modules/computer_vision/cv/opticflow/optic_flow_int.c rename to sw/airborne/modules/computer_vision/cv/opticflow/lucas_kanade.c index b1e385972e5..f27f614cb0f 100644 --- a/sw/airborne/modules/computer_vision/cv/opticflow/optic_flow_int.c +++ b/sw/airborne/modules/computer_vision/cv/opticflow/lucas_kanade.c @@ -19,7 +19,7 @@ */ /** - * @file modules/computer_vision/cv/opticflow/optic_flow_int.c + * @file modules/computer_vision/cv/opticflow/lucas_kanade.c * @brief efficient fixed-point optical-flow * * - Initial fixed-point C implementation by G. de Croon @@ -31,8 +31,7 @@ #include #include #include -#include "optic_flow_int.h" -#include "modules/computer_vision/opticflow_module.h" +#include "lucas_kanade.h" #define int_index(x,y) (y * IMG_WIDTH + x) #define uint_index(xx, yy) (((yy * IMG_WIDTH + xx) * 2) & 0xFFFFFFFC) diff --git a/sw/airborne/modules/computer_vision/cv/opticflow/optic_flow_int.h b/sw/airborne/modules/computer_vision/cv/opticflow/lucas_kanade.h similarity index 96% rename from sw/airborne/modules/computer_vision/cv/opticflow/optic_flow_int.h rename to sw/airborne/modules/computer_vision/cv/opticflow/lucas_kanade.h index 786916ac625..192b5a1bbc6 100644 --- a/sw/airborne/modules/computer_vision/cv/opticflow/optic_flow_int.h +++ b/sw/airborne/modules/computer_vision/cv/opticflow/lucas_kanade.h @@ -19,7 +19,7 @@ */ /** - * @file modules/computer_vision/cv/opticflow/optic_flow_int.h + * @file modules/computer_vision/cv/opticflow/lucas_kanade.h * @brief efficient fixed-point optical-flow * */ diff --git a/sw/airborne/modules/computer_vision/cv/trig.c b/sw/airborne/modules/computer_vision/cv/trig.c deleted file mode 100644 index 88a1103f32b..00000000000 --- a/sw/airborne/modules/computer_vision/cv/trig.c +++ /dev/null @@ -1,159 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Trigonometry - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -#include "trig.h" - -static int cosine[] = { - 10000, 9998, 9994, 9986, 9976, 9962, 9945, 9925, 9903, 9877, - 9848, 9816, 9781, 9744, 9703, 9659, 9613, 9563, 9511, 9455, - 9397, 9336, 9272, 9205, 9135, 9063, 8988, 8910, 8829, 8746, - 8660, 8572, 8480, 8387, 8290, 8192, 8090, 7986, 7880, 7771, - 7660, 7547, 7431, 7314, 7193, 7071, 6947, 6820, 6691, 6561, - 6428, 6293, 6157, 6018, 5878, 5736, 5592, 5446, 5299, 5150, - 5000, 4848, 4695, 4540, 4384, 4226, 4067, 3907, 3746, 3584, - 3420, 3256, 3090, 2924, 2756, 2588, 2419, 2250, 2079, 1908, - 1736, 1564, 1392, 1219, 1045, 872, 698, 523, 349, 175, - 0 -}; - -int sin_zelf(int ix) -{ - while (ix < 0) { - ix = ix + 360; - } - while (ix >= 360) { - ix = ix - 360; - } - if (ix < 90) { return cosine[90 - ix] / 10; } - if (ix < 180) { return cosine[ix - 90] / 10; } - if (ix < 270) { return -cosine[270 - ix] / 10; } - if (ix < 360) { return -cosine[ix - 270] / 10; } - return 0; -} - -int cos_zelf(int ix) -{ - while (ix < 0) { - ix = ix + 360; - } - while (ix >= 360) { - ix = ix - 360; - } - if (ix < 90) { return cosine[ix] / 10; } - if (ix < 180) { return -cosine[180 - ix] / 10; } - if (ix < 270) { return -cosine[ix - 180] / 10; } - if (ix < 360) { return cosine[360 - ix] / 10; } - return 0; -} - -int tan_zelf(int ix) -{ - - while (ix < 0) { - ix = ix + 360; - } - while (ix >= 360) { - ix = ix - 360; - } - if (ix == 90) { return 9999; } - if (ix == 270) { return -9999; } - if (ix < 90) { return (1000 * cosine[90 - ix]) / cosine[ix]; } - if (ix < 180) { return -(1000 * cosine[ix - 90]) / cosine[180 - ix]; } - if (ix < 270) { return (1000 * cosine[270 - ix]) / cosine[ix - 180]; } - if (ix < 360) { return -(1000 * cosine[ix - 270]) / cosine[360 - ix]; } - return 0; -} - -int asin_zelf(int y, int hyp) -{ - int quot, sgn, ix; - if ((y > hyp) || (y == 0)) { - return 0; - } - sgn = hyp * y; - if (hyp < 0) { - hyp = -hyp; - } - if (y < 0) { - y = -y; - } - quot = (y * 10000) / hyp; - if (quot > 9999) { - quot = 9999; - } - for (ix = 0; ix < 90; ix++) - if ((quot < cosine[ix]) && (quot >= cosine[ix + 1])) { - break; - } - if (sgn < 0) { - return -(90 - ix); - } else { - return 90 - ix; - } -} - -int acos_zelf(int x, int hyp) -{ - int quot, sgn, ix; - if (x > hyp) { - return 0; - } - if (x == 0) { - if (hyp < 0) { - return -90; - } else { - return 90; - } - return 0; - } - sgn = hyp * x; - if (hyp < 0) { - hyp = -hyp; - } - if (x < 0) { - x = -x; - } - quot = (x * 10000) / hyp; - if (quot > 9999) { - quot = 9999; - } - for (ix = 0; ix < 90; ix++) - if ((quot < cosine[ix]) && (quot >= cosine[ix + 1])) { - break; - } - if (sgn < 0) { - return -ix; - } else { - return ix; - } -} - -//atan_zelf(y/x) in degrees -int atan_zelf(int y, int x) -{ - int angle, flip, t, xy; - - if (x < 0) { x = -x; } - if (y < 0) { y = -y; } - flip = 0; - if (x < y) { flip = 1; t = x; x = y; y = t; } - if (x == 0) { return 90; } - - xy = (y * 1000) / x; - angle = (360 * xy) / (6283 + ((((1764 * xy) / 1000) * xy) / 1000)); - if (flip) { angle = 90 - angle; } - return angle; -} - -unsigned int isqrt(unsigned int val) -{ - unsigned int temp, g = 0, b = 0x8000, bshft = 15; - do { - if (val >= (temp = (((g << 1) + b) << bshft--))) { - g += b; - val -= temp; - } - } while (b >>= 1); - return g; -} diff --git a/sw/airborne/modules/computer_vision/cv/trig.h b/sw/airborne/modules/computer_vision/cv/trig.h deleted file mode 100644 index 9603d41789c..00000000000 --- a/sw/airborne/modules/computer_vision/cv/trig.h +++ /dev/null @@ -1,7 +0,0 @@ -int cos_zelf(int ix); -int sin_zelf(int); -int tan_zelf(int); -int acos_zelf(int, int); -int asin_zelf(int, int); -int atan_zelf(int, int); -unsigned int isqrt(unsigned int); diff --git a/sw/airborne/modules/computer_vision/lib/v4l/v4l2.c b/sw/airborne/modules/computer_vision/lib/v4l/v4l2.c index bcbcd8fcb5e..576a4e097aa 100644 --- a/sw/airborne/modules/computer_vision/lib/v4l/v4l2.c +++ b/sw/airborne/modules/computer_vision/lib/v4l/v4l2.c @@ -88,6 +88,9 @@ static void *v4l2_capture_thread(void *data) } assert(buf.index < dev->buffers_cnt); + // Copy the timestamp + memcpy(&dev->buffers[buf.index].timestamp, &buf.timestamp, sizeof(struct timeval)); + // Update the dequeued id // We need lock because between setting prev_idx and updating the deq_idx the deq_idx could change pthread_mutex_lock(&dev->mutex); diff --git a/sw/airborne/modules/computer_vision/lib/v4l/v4l2.h b/sw/airborne/modules/computer_vision/lib/v4l/v4l2.h index 650e9810fc8..7c83e44da94 100644 --- a/sw/airborne/modules/computer_vision/lib/v4l/v4l2.h +++ b/sw/airborne/modules/computer_vision/lib/v4l/v4l2.h @@ -30,14 +30,17 @@ #include "std.h" #include +#include +#include #define V4L2_IMG_NONE 255 //< There currently no image available /* V4L2 memory mapped image buffer */ struct v4l2_img_buf { - uint8_t idx; //< The index of the buffer - size_t length; //< The size of the buffer - void *buf; //< Pointer to the memory mapped buffer + uint8_t idx; //< The index of the buffer + size_t length; //< The size of the buffer + struct timeval timestamp; //< The time value of the image + void *buf; //< Pointer to the memory mapped buffer }; /* V4L2 device */ diff --git a/sw/airborne/modules/computer_vision/opticflow/hover_stabilization.c b/sw/airborne/modules/computer_vision/opticflow/hover_stabilization.c deleted file mode 100644 index 8394ba32ec9..00000000000 --- a/sw/airborne/modules/computer_vision/opticflow/hover_stabilization.c +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Copyright (C) 2014 Hann Woei Ho - * - * This file is part of Paparazzi. - * - * Paparazzi is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * Paparazzi is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Paparazzi; see the file COPYING. If not, see - * . - */ - -/** - * @file modules/computer_vision/opticflow/hover_stabilization.c - * @brief optical-flow based hovering for Parrot AR.Drone 2.0 - * - * Control loops for optic flow based hovering. - * Computes setpoint for the lower level attitude stabilization to control horizontal velocity. - */ - -// Own Header -#include "hover_stabilization.h" - -// Stabilization -#include "firmwares/rotorcraft/stabilization/stabilization_attitude.h" -#include "firmwares/rotorcraft/guidance/guidance_v.h" -#include "autopilot.h" - -// Downlink -#include "subsystems/datalink/downlink.h" - -// Controller Gains -/* error if some gains are negative */ -#if (VISION_PHI_PGAIN < 0) || \ - (VISION_PHI_IGAIN < 0) || \ - (VISION_THETA_PGAIN < 0) || \ - (VISION_THETA_IGAIN < 0) -#error "ALL control gains have to be positive!!!" -#endif -bool activate_opticflow_hover; -float vision_desired_vx; -float vision_desired_vy; -int32_t vision_phi_pgain; -int32_t vision_phi_igain; -int32_t vision_theta_pgain; -int32_t vision_theta_igain; - -// Controller Commands -struct Int32Eulers cmd_euler; - -// Hover Stabilization -float Velx_Int; -float Vely_Int; -float Error_Velx; -float Error_Vely; - -#define CMD_OF_SAT 1500 // 40 deg = 2859.1851 -unsigned char saturateX = 0, saturateY = 0; -unsigned int set_heading; - - -#ifndef VISION_HOVER -#define VISION_HOVER TRUE -#endif - -#ifndef VISION_PHI_PGAIN -#define VISION_PHI_PGAIN 500. -#endif - -#ifndef VISION_PHI_IGAIN -#define VISION_PHI_IGAIN 10. -#endif - -#ifndef VISION_THETA_PGAIN -#define VISION_THETA_PGAIN 500. -#endif - -#ifndef VISION_THETA_IGAIN -#define VISION_THETA_IGAIN 10. -#endif - -#ifndef VISION_DESIRED_VX -#define VISION_DESIRED_VX 0. -#endif - -#ifndef VISION_DESIRED_VY -#define VISION_DESIRED_VY 0. -#endif - -void run_opticflow_hover(void); - -void guidance_h_module_enter(void) -{ - // INIT - Velx_Int = 0; - Vely_Int = 0; - // GUIDANCE: Set Hover-z-hold - guidance_v_z_sp = -1; -} - -void guidance_h_module_read_rc(void) -{ - // Do not read RC - // Setpoint being set by vision -} - -void guidance_h_module_run(bool_t in_flight) -{ - // Run - // Setpoint being set by vision - stabilization_attitude_run(in_flight); -} - - -void init_hover_stabilization_onvision() -{ - INT_EULERS_ZERO(cmd_euler); - - activate_opticflow_hover = VISION_HOVER; - vision_phi_pgain = VISION_PHI_PGAIN; - vision_phi_igain = VISION_PHI_IGAIN; - vision_theta_pgain = VISION_THETA_PGAIN; - vision_theta_igain = VISION_THETA_IGAIN; - vision_desired_vx = VISION_DESIRED_VX; - vision_desired_vy = VISION_DESIRED_VY; - - set_heading = 1; - - Error_Velx = 0; - Error_Vely = 0; - Velx_Int = 0; - Vely_Int = 0; -} - -void run_hover_stabilization_onvision(struct CVresults* vision ) -{ - struct FloatVect3 V_body; - if (activate_opticflow_hover == TRUE) { - // Compute body velocities from ENU - struct FloatVect3 *vel_ned = (struct FloatVect3*)stateGetSpeedNed_f(); - struct FloatQuat *q_n2b = stateGetNedToBodyQuat_f(); - float_quat_vmult(&V_body, q_n2b, vel_ned); - } - - // ************************************************************************************* - // Downlink Message - // ************************************************************************************* - - DOWNLINK_SEND_OF_HOVER(DefaultChannel, DefaultDevice, - &vision->FPS, &vision->dx_sum, &vision->dy_sum, &vision->OFx, &vision->OFy, - &vision->diff_roll, &vision->diff_pitch, - &vision->Velx, &vision->Vely, - &V_body.x, &V_body.y, - &vision->cam_h, &vision->count); - - if (autopilot_mode != AP_MODE_MODULE) { - return; - } - - if (vision->flow_count) { - Error_Velx = vision->Velx - vision_desired_vx; - Error_Vely = vision->Vely - vision_desired_vy; - } else { - Error_Velx = 0; - Error_Vely = 0; - } - - if (saturateX == 0) { - if (activate_opticflow_hover == TRUE) { - Velx_Int += vision_theta_igain * Error_Velx; - } else { - Velx_Int += vision_theta_igain * V_body.x; - } - } - if (saturateY == 0) { - if (activate_opticflow_hover == TRUE) { - Vely_Int += vision_phi_igain * Error_Vely; - } else { - Vely_Int += vision_phi_igain * V_body.y; - } - } - - if (set_heading) { - cmd_euler.psi = stateGetNedToBodyEulers_i()->psi; - set_heading = 0; - } - - if (activate_opticflow_hover == TRUE) { - cmd_euler.phi = - (vision_phi_pgain * Error_Vely + Vely_Int); - cmd_euler.theta = (vision_theta_pgain * Error_Velx + Velx_Int); - } else { - cmd_euler.phi = - (vision_phi_pgain * V_body.y + Vely_Int); - cmd_euler.theta = (vision_theta_pgain * V_body.x + Velx_Int); - } - - saturateX = 0; saturateY = 0; - if (cmd_euler.phi < -CMD_OF_SAT) {cmd_euler.phi = -CMD_OF_SAT; saturateX = 1;} - else if (cmd_euler.phi > CMD_OF_SAT) {cmd_euler.phi = CMD_OF_SAT; saturateX = 1;} - if (cmd_euler.theta < -CMD_OF_SAT) {cmd_euler.theta = -CMD_OF_SAT; saturateY = 1;} - else if (cmd_euler.theta > CMD_OF_SAT) {cmd_euler.theta = CMD_OF_SAT; saturateY = 1;} - - stabilization_attitude_set_rpy_setpoint_i(&cmd_euler); - DOWNLINK_SEND_VISION_STABILIZATION(DefaultChannel, DefaultDevice, &vision->Velx, &vision->Vely, &Velx_Int, - &Vely_Int, &cmd_euler.phi, &cmd_euler.theta); -} diff --git a/sw/airborne/modules/computer_vision/opticflow/hover_stabilization.h b/sw/airborne/modules/computer_vision/opticflow/hover_stabilization.h deleted file mode 100644 index 285ac708649..00000000000 --- a/sw/airborne/modules/computer_vision/opticflow/hover_stabilization.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (C) 2014 Hann Woei Ho - * - * This file is part of Paparazzi. - * - * Paparazzi is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * Paparazzi is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Paparazzi; see the file COPYING. If not, see - * . - */ - -/** - * @file modules/computer_vision/opticflow/hover_stabilization.h - * @brief optical-flow based hovering for Parrot AR.Drone 2.0 - * - * Control loops for optic flow based hovering. - * Computes setpoint for the lower level attitude stabilization to control horizontal velocity. - */ - -#ifndef HOVER_STABILIZATION_H_ -#define HOVER_STABILIZATION_H_ - -#include -#include "inter_thread_data.h" - -// Controller module - -// Vertical loop re-uses Alt-hold -#define GUIDANCE_V_MODE_MODULE_SETTING GUIDANCE_V_MODE_HOVER - -// Horizontal mode is a specific controller -#define GUIDANCE_H_MODE_MODULE_SETTING GUIDANCE_H_MODE_MODULE - -// Implement own Horizontal loops -extern void guidance_h_module_enter(void); -extern void guidance_h_module_read_rc(void); -extern void guidance_h_module_run(bool_t in_flight); - - -void init_hover_stabilization_onvision(void); -void run_hover_stabilization_onvision(struct CVresults *vision); - -extern bool activate_opticflow_hover; -extern float vision_desired_vx; -extern float vision_desired_vy; -extern int32_t vision_phi_pgain; -extern int32_t vision_phi_igain; -extern int32_t vision_theta_pgain; -extern int32_t vision_theta_igain; - -#endif /* HOVER_STABILIZATION_H_ */ diff --git a/sw/airborne/modules/computer_vision/opticflow/inter_thread_data.h b/sw/airborne/modules/computer_vision/opticflow/inter_thread_data.h index 65ef0cb41e0..87ab62f99b3 100644 --- a/sw/airborne/modules/computer_vision/opticflow/inter_thread_data.h +++ b/sw/airborne/modules/computer_vision/opticflow/inter_thread_data.h @@ -29,8 +29,8 @@ #ifndef _INTER_THREAD_DATA_H #define _INTER_THREAD_DATA_H -/// Data from thread to module -struct CVresults { +/* The result calculated from the opticflow */ +struct opticflow_result_t { int cnt; // Number of processed frames float Velx; // Velocity as measured by camera @@ -42,12 +42,11 @@ struct CVresults { float OFx, OFy, dx_sum, dy_sum; float diff_roll; float diff_pitch; - float FPS; + float fps; //< Frames per second of the optical flow calculation }; -/// Data from module to thread -struct PPRZinfo { - int cnt; // IMU msg counter +/* The state of the drone when it took an image */ +struct opticflow_state_t { float phi; // roll [rad] float theta; // pitch [rad] float agl; // height above ground [m] diff --git a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c new file mode 100644 index 00000000000..ce8086abfce --- /dev/null +++ b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c @@ -0,0 +1,258 @@ +/* + * Copyright (C) 2014 Hann Woei Ho + * 2015 Freek van Tienen + * + * This file is part of Paparazzi. + * + * Paparazzi is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * Paparazzi is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Paparazzi; see the file COPYING. If not, see + * . + */ + +/** + * @file modules/computer_vision/opticflow/opticflow_calculator.c + * @brief Estimate velocity from optic flow. + * + * Using images from a vertical camera and IMU sensor data. + */ + +#include "std.h" + +#include +#include +#include + +// Own Header +#include "opticflow_calculator.h" + +// Computer Vision +#include "cv/opticflow/lucas_kanade.h" +#include "cv/opticflow/fast_rosten.h" + +// ARDrone Vertical Camera Parameters +#define FOV_H 0.67020643276 +#define FOV_W 0.89360857702 +#define Fx_ARdrone 343.1211 +#define Fy_ARdrone 348.5053 + +// Corner Detection +#define MAX_COUNT 100 + +// Flow Derotation +#define FLOW_DEROTATION + +/* Usefull for calculating FPS */ +static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime); + +/** + * Initialize the opticflow calculator + */ +void opticflow_calc_init(struct opticflow_t *opticflow, unsigned int w, unsigned int h) +{ + /* Set the width/height of the image */ + opticflow->img_w = w; + opticflow->img_h = h; + + /* Create the image buffers */ + opticflow->gray_frame = (unsigned char *) calloc(w * h, sizeof(uint8_t)); + opticflow->prev_gray_frame = (unsigned char *) calloc(w * h, sizeof(uint8_t)); + + /* Set the previous values */ + opticflow->got_first_img = FALSE; + opticflow->prev_pitch = 0.0; + opticflow->prev_roll = 0.0; +} + +/** + * Run the optical flow on a new image frame + */ +void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_t *state, struct v4l2_img_buf *img, struct opticflow_result_t *result) +{ + // Corner Tracking + // Working Variables + int max_count = 25; + int borderx = 24, bordery = 24; + int x[MAX_COUNT], y[MAX_COUNT]; + int new_x[MAX_COUNT], new_y[MAX_COUNT]; + int status[MAX_COUNT]; + int dx[MAX_COUNT], dy[MAX_COUNT]; + + // Update FPS for information + result->fps = 1 / (timeval_diff(&opticflow->prev_timestamp, &img->timestamp) / 1000.); + memcpy(&opticflow->prev_timestamp, &img->timestamp, sizeof(struct timeval)); + + if (!opticflow->got_first_img) { + CvtYUYV2Gray(opticflow->prev_gray_frame, img->buf, opticflow->img_w, opticflow->img_h); + opticflow->got_first_img = TRUE; + } + + // ************************************************************************************* + // Corner detection + // ************************************************************************************* + + // FAST corner detection + int fast_threshold = 20; + xyFAST *pnts_fast; + pnts_fast = fast9_detect((const byte *)opticflow->prev_gray_frame, opticflow->img_w, opticflow->img_h, opticflow->img_w, + fast_threshold, &result->count); + if (result->count > MAX_COUNT) { result->count = MAX_COUNT; } + for (int i = 0; i < result->count; i++) { + x[i] = pnts_fast[i].x; + y[i] = pnts_fast[i].y; + } + free(pnts_fast); + + // Remove neighboring corners + const float min_distance = 3; + float min_distance2 = min_distance * min_distance; + int labelmin[MAX_COUNT]; + for (int i = 0; i < result->count; i++) { + for (int j = i + 1; j < result->count; j++) { + // distance squared: + float distance2 = (x[i] - x[j]) * (x[i] - x[j]) + (y[i] - y[j]) * (y[i] - y[j]); + if (distance2 < min_distance2) { + labelmin[i] = 1; + } + } + } + + int count_fil = result->count; + for (int i = result->count - 1; i >= 0; i--) { + int remove_point = 0; + + if (labelmin[i]) { + remove_point = 1; + } + + if (remove_point) { + for (int c = i; c < count_fil - 1; c++) { + x[c] = x[c + 1]; + y[c] = y[c + 1]; + } + count_fil--; + } + } + + if (count_fil > max_count) { count_fil = max_count; } + result->count = count_fil; + + // ************************************************************************************* + // Corner Tracking + // ************************************************************************************* + CvtYUYV2Gray(opticflow->gray_frame, img->buf, opticflow->img_w, opticflow->img_h); + + opticFlowLK(opticflow->gray_frame, opticflow->prev_gray_frame, x, y, + count_fil, opticflow->img_w, opticflow->img_h, new_x, new_y, status, 5, 100); + + result->flow_count = count_fil; + for (int i = count_fil - 1; i >= 0; i--) { + int remove_point = 1; + + if (status[i] && !(new_x[i] < borderx || new_x[i] > (opticflow->img_w - 1 - borderx) || + new_y[i] < bordery || new_y[i] > (opticflow->img_h - 1 - bordery))) { + remove_point = 0; + } + + if (remove_point) { + for (int c = i; c < result->flow_count - 1; c++) { + x[c] = x[c + 1]; + y[c] = y[c + 1]; + new_x[c] = new_x[c + 1]; + new_y[c] = new_y[c + 1]; + } + result->flow_count--; + } + } + + result->dx_sum = 0.0; + result->dy_sum = 0.0; + + // Optical Flow Computation + for (int i = 0; i < result->flow_count; i++) { + dx[i] = new_x[i] - x[i]; + dy[i] = new_y[i] - y[i]; + } + + // Median Filter + if (result->flow_count) { + quick_sort_int(dx, result->flow_count); // 11 + quick_sort_int(dy, result->flow_count); // 11 + + result->dx_sum = (float) dx[result->flow_count / 2]; + result->dy_sum = (float) dy[result->flow_count / 2]; + } else { + result->dx_sum = 0.0; + result->dy_sum = 0.0; + } + + // Flow Derotation + result->diff_pitch = (state->theta - opticflow->prev_pitch) * opticflow->img_h / FOV_H; + result->diff_roll = (state->phi - opticflow->prev_roll) * opticflow->img_w / FOV_W; + opticflow->prev_pitch = state->theta; + opticflow->prev_roll = state->phi; + + float OFx_trans, OFy_trans; +#ifdef FLOW_DEROTATION + if (result->flow_count) { + OFx_trans = result->dx_sum - result->diff_roll; + OFy_trans = result->dy_sum - result->diff_pitch; + + if ((OFx_trans <= 0) != (result->dx_sum <= 0)) { + OFx_trans = 0; + OFy_trans = 0; + } + } else { + OFx_trans = result->dx_sum; + OFy_trans = result->dy_sum; + } +#else + OFx_trans = result->dx_sum; + OFy_trans = result->dy_sum; +#endif + + // Average Filter + OFfilter(&result->OFx, &result->OFy, OFx_trans, OFy_trans, result->flow_count, 1); + + // Velocity Computation + if (state->agl < 0.01) { + result->cam_h = 0.01; + } + else { + result->cam_h = state->agl; + } + + if (result->flow_count) { + result->Velx = result->OFy * result->cam_h * result->fps / Fy_ARdrone + 0.05; + result->Vely = -result->OFx * result->cam_h * result->fps / Fx_ARdrone - 0.1; + } else { + result->Velx = 0.0; + result->Vely = 0.0; + } + + // ************************************************************************************* + // Next Loop Preparation + // ************************************************************************************* + + memcpy(opticflow->prev_gray_frame, opticflow->gray_frame, opticflow->img_w * opticflow->img_h); +} + +/** + * calculate the difference from start till finish + */ +static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime) +{ + uint32_t msec; + msec=(finishtime->tv_sec-starttime->tv_sec)*1000; + msec+=(finishtime->tv_usec-starttime->tv_usec)/1000; + return msec; +} diff --git a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h new file mode 100644 index 00000000000..83d9ed4b04a --- /dev/null +++ b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2014 Hann Woei Ho + * 2015 Freek van Tienen + * + * This file is part of Paparazzi. + * + * Paparazzi is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * Paparazzi is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Paparazzi; see the file COPYING. If not, see + * . + */ + +/** + * @file modules/computer_vision/opticflow/opticflow_calculator.h + * @brief Calculate velocity from optic flow. + * + * Using images from a vertical camera and IMU sensor data. + */ + +#ifndef OPTICFLOW_CALCULATOR_H +#define OPTICFLOW_CALCULATOR_H + +#include "std.h" +#include "inter_thread_data.h" +#include "lib/v4l/v4l2.h" +#include + +struct opticflow_t +{ + unsigned int img_w; //< The image width + unsigned int img_h; //< The image width + + uint8_t *gray_frame; //< Current gray image frame + uint8_t *prev_gray_frame; //< Previous gray image frame + + bool_t got_first_img; //< If we got a image to work with + + // Store previous values + float prev_pitch; + float prev_roll; + struct timeval prev_timestamp; //< Frames per second of the optical flow calculation +}; + + +void opticflow_calc_init(struct opticflow_t *opticflow, unsigned int w, unsigned int h); +void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_t *state, struct v4l2_img_buf *img, struct opticflow_result_t *result); + +#endif /* OPTICFLOW_CALCULATOR_H */ diff --git a/sw/airborne/modules/computer_vision/opticflow/opticflow_thread.c b/sw/airborne/modules/computer_vision/opticflow/opticflow_thread.c deleted file mode 100644 index a36342439fd..00000000000 --- a/sw/airborne/modules/computer_vision/opticflow/opticflow_thread.c +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright (C) 2015 The Paparazzi Community - * - * This file is part of Paparazzi. - * - * Paparazzi is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * Paparazzi is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Paparazzi; see the file COPYING. If not, see - * . - */ - -/** - * @file modules/computer_vision/opticflow/opticflow_thread.c - * - */ - -// Sockets -#include -#include -#include -#include - -#include "opticflow_thread.h" - - -///////////////////////////////////////////////////////////////////////// -// COMPUTER VISION THREAD - -// Video -#include "v4l/v4l2.h" -#include "resize.h" - -// Payload Code -#include "visual_estimator.h" - -// Downlink Video -//#define DOWNLINK_VIDEO 1 - -#ifdef DOWNLINK_VIDEO -#include "encoding/jpeg.h" -#include "encoding/rtp.h" -#endif - -#include -#define DEBUG_INFO(X, ...) ; - -static volatile enum{RUN,EXIT} computer_vision_thread_command = RUN; /** request to close: set to 1 */ - -void computervision_thread_request_exit(void) { - computer_vision_thread_command = EXIT; -} - -void *computervision_thread_main(void *args) -{ - int thread_socket = *(int *) args; - - // Local data in/out - struct CVresults vision_results; - struct PPRZinfo autopilot_data; - - // Status - computer_vision_thread_command = RUN; - - - /* On ARDrone2: - * video1 = front camera; video2 = bottom camera - */ - // Create a V4L2 device - struct v4l2_device *dev = v4l2_init("/dev/video2", 320, 240, 10); - if (dev == NULL) { - printf("Error initialising video\n"); - return 0; - } - - // Start the streaming on the V4L2 device - if(!v4l2_start_capture(dev)) { - printf("Could not start capture\n"); - return 0; - } - -#ifdef DOWNLINK_VIDEO - // Video Compression - uint8_t *jpegbuf = (uint8_t *)malloc(dev->w * dev->h * 2); - - // Network Transmit - struct UdpSocket *vsock; - //#define FMS_UNICAST 0 - //#define FMS_BROADCAST 1 - vsock = udp_socket("192.168.1.255", 5000, 5001, FMS_BROADCAST); -#endif - - // First Apply Settings before init - opticflow_plugin_init(dev->w, dev->h, &vision_results); - - while (computer_vision_thread_command == RUN) { - - // Wait for a new frame - struct v4l2_img_buf *img = v4l2_image_get(dev); - - // Get most recent State information - int bytes_read = sizeof(autopilot_data); - while (bytes_read == sizeof(autopilot_data)) - { - bytes_read = recv(thread_socket, &autopilot_data, sizeof(autopilot_data), MSG_DONTWAIT); - if (bytes_read != sizeof(autopilot_data)) { - if (bytes_read != -1) { - printf("[thread] Failed to read %d bytes PPRZ info from socket.\n",bytes_read); - } - } - } - DEBUG_INFO("[thread] Read # %d\n",autopilot_data.cnt); - - // Run Image Processing with image and data and get results - opticflow_plugin_run(img->buf, &autopilot_data, &vision_results); - - //printf("Vision result %f %f\n", vision_results.Velx, vision_results.Vely); - - /* Send results to main */ - vision_results.cnt++; - int bytes_written = write(thread_socket, &vision_results, sizeof(vision_results)); - if (bytes_written != sizeof(vision_results)){ - perror("[thread] Failed to write to socket.\n"); - } - DEBUG_INFO("[thread] Write # %d, (bytes %d)\n",vision_results.cnt, bytes_written); - -#ifdef DOWNLINK_VIDEO - // JPEG encode the image: - uint32_t quality_factor = 10; //20 if no resize, - uint8_t dri_header = 0; - uint32_t image_format = FOUR_TWO_TWO; // format (in jpeg.h) - uint8_t *end = encode_image(img->buf, jpegbuf, quality_factor, image_format, dev->w, dev->h, dri_header); - uint32_t size = end - (jpegbuf); - - //printf("Sending an image ...%u\n", size); - send_rtp_frame(vsock, jpegbuf, size, dev->w, dev->h, 0, quality_factor, dri_header, 0); -#endif - - // Free the image - v4l2_image_free(dev, img); - } - - printf("Thread Closed\n"); - v4l2_close(dev); - return 0; -} diff --git a/sw/airborne/modules/computer_vision/opticflow/opticflow_thread.h b/sw/airborne/modules/computer_vision/opticflow/opticflow_thread.h deleted file mode 100644 index 5826da2229b..00000000000 --- a/sw/airborne/modules/computer_vision/opticflow/opticflow_thread.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (C) 2015 The Paparazzi Community - * - * This file is part of Paparazzi. - * - * Paparazzi is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * Paparazzi is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Paparazzi; see the file COPYING. If not, see - * . - */ - -/** - * @file modules/computer_vision/opticflow/opticflow_thread.h - * @brief computer vision thread - * - */ - -#ifndef OPTICFLOW_THREAD_H -#define OPTICFLOW_THREAD_H - -void *computervision_thread_main(void *args); /* computer vision thread: should be given a pointer to a socketpair as argument */ -void computervision_thread_request_exit(void); - -#endif diff --git a/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c b/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c new file mode 100644 index 00000000000..f5a1f168c49 --- /dev/null +++ b/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2014 Hann Woei Ho + * 2015 Freek van Tienen + * + * This file is part of Paparazzi. + * + * Paparazzi is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * Paparazzi is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Paparazzi; see the file COPYING. If not, see + * . + */ + +/** + * @file modules/computer_vision/opticflow/hover_stabilization.c + * @brief Optical-flow based control for Linux based systems + * + * Control loops for optic flow based hovering. + * Computes setpoint for the lower level attitude stabilization to control horizontal velocity. + */ + +// Own Header +#include "stabilization_opticflow.h" + +// Stabilization +#include "firmwares/rotorcraft/stabilization/stabilization_attitude.h" +#include "firmwares/rotorcraft/guidance/guidance_v.h" +#include "autopilot.h" +#include "subsystems/datalink/downlink.h" + +#define CMD_OF_SAT 1500 // 40 deg = 2859.1851 + +#ifndef VISION_PHI_PGAIN +#define VISION_PHI_PGAIN 500 +#endif +PRINT_CONFIG_VAR(VISION_PHI_PGAIN); + +#ifndef VISION_PHI_IGAIN +#define VISION_PHI_IGAIN 10 +#endif +PRINT_CONFIG_VAR(VISION_PHI_IGAIN); + +#ifndef VISION_THETA_PGAIN +#define VISION_THETA_PGAIN 500 +#endif +PRINT_CONFIG_VAR(VISION_THETA_PGAIN); + +#ifndef VISION_THETA_IGAIN +#define VISION_THETA_IGAIN 10 +#endif +PRINT_CONFIG_VAR(VISION_THETA_IGAIN); + +#ifndef VISION_DESIRED_VX +#define VISION_DESIRED_VX 0 +#endif +PRINT_CONFIG_VAR(VISION_DESIRED_VX); + +#ifndef VISION_DESIRED_VY +#define VISION_DESIRED_VY 0 +#endif +PRINT_CONFIG_VAR(VISION_DESIRED_VY); + +/* Check the control gains */ +#if (VISION_PHI_PGAIN < 0) || \ + (VISION_PHI_IGAIN < 0) || \ + (VISION_THETA_PGAIN < 0) || \ + (VISION_THETA_IGAIN < 0) +#error "ALL control gains have to be positive!!!" +#endif + +/* Initialize the default gains and settings */ +struct opticflow_stab_t opticflow_stab = { + .phi_pgain = VISION_PHI_PGAIN, + .phi_igain = VISION_PHI_IGAIN, + .theta_pgain = VISION_THETA_PGAIN, + .theta_igain = VISION_THETA_IGAIN, + .desired_vx = VISION_DESIRED_VX, + .desired_vy = VISION_DESIRED_VY +}; + +/** + * Horizontal guidance mode enter resets the errors + * and starts the controller. + */ +void guidance_h_module_enter(void) +{ + // Set the errors to 0 + opticflow_stab.err_vx = 0; + opticflow_stab.err_vx_int = 0; + opticflow_stab.err_vy = 0; + opticflow_stab.err_vy_int = 0; + + // Set the euler command to 0 + INT_EULERS_ZERO(opticflow_stab.cmd); + + // GUIDANCE: Set Hover-z-hold (1 meter???) + guidance_v_z_sp = -1; +} + +/** + * Read the RC commands + */ +void guidance_h_module_read_rc(void) +{ + // TODO: change the desired vx/vy +} + +/** + * Main guidance loop + */ +void guidance_h_module_run(bool_t in_flight) +{ + // Run the default attitude stabilization + stabilization_attitude_run(in_flight); +} + +/** + * Update the controls based on a vision result + */ +void stabilization_opticflow_update(struct opticflow_result_t* result) +{ + // ************************************************************************************* + // Downlink Message + // ************************************************************************************* + + float test = 1.5; + DOWNLINK_SEND_OF_HOVER(DefaultChannel, DefaultDevice, + &result->fps, &result->dx_sum, &result->dy_sum, &result->OFx, &result->OFy, + &result->diff_roll, &result->diff_pitch, + &result->Velx, &result->Vely, + &test, &test, + &result->cam_h, &result->count); + + /* Check if we are in the correct AP_MODE before setting commands */ + if (autopilot_mode != AP_MODE_MODULE) { + return; + } + + /* Calculate the error if we have enough flow */ + if (result->flow_count > 0) { + opticflow_stab.err_vx = result->Velx - opticflow_stab.desired_vx; + opticflow_stab.err_vy = result->Vely - opticflow_stab.desired_vy; + } else { + opticflow_stab.err_vx = 0; + opticflow_stab.err_vy = 0; + } + + /* Calculate the integrated errors (TODO: bound??) */ + opticflow_stab.err_vx_int += opticflow_stab.theta_igain * opticflow_stab.err_vx; + opticflow_stab.err_vy_int += opticflow_stab.phi_igain * opticflow_stab.err_vy; + + /* Calculate the commands */ + opticflow_stab.cmd.theta = (opticflow_stab.theta_pgain * opticflow_stab.err_vx + opticflow_stab.err_vx_int); + opticflow_stab.cmd.phi = -(opticflow_stab.phi_pgain * opticflow_stab.err_vy + opticflow_stab.err_vy_int); + + /* Bound the roll and pitch commands */ + BoundAbs(opticflow_stab.cmd.phi, CMD_OF_SAT); + BoundAbs(opticflow_stab.cmd.theta, CMD_OF_SAT); + + /* Update the setpoint */ + stabilization_attitude_set_rpy_setpoint_i(&opticflow_stab.cmd); + //DOWNLINK_SEND_VISION_STABILIZATION(DefaultChannel, DefaultDevice, &result->Velx, &result->Vely, &Velx_Int, + // &Vely_Int, &cmd_euler.phi, &cmd_euler.theta); +} diff --git a/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.h b/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.h new file mode 100644 index 00000000000..2391bc0b524 --- /dev/null +++ b/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.h @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2014 Hann Woei Ho + * 2015 Freek van Tienen + * + * This file is part of Paparazzi. + * + * Paparazzi is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * Paparazzi is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Paparazzi; see the file COPYING. If not, see + * . + */ + +/** + * @file modules/computer_vision/opticflow/hover_stabilization.h + * @brief Optical-flow based control for Linux based systems + * + * Control loops for optic flow based hovering. + * Computes setpoint for the lower level attitude stabilization to control horizontal velocity. + */ + +#ifndef CV_STABILIZATION_OPTICFLOW_H_ +#define CV_STABILIZATION_OPTICFLOW_H_ + +#include "std.h" +#include "lib/v4l/v4l2.h" +#include "inter_thread_data.h" +#include "math/pprz_algebra_int.h" + +/* The opticflow stabilization */ +struct opticflow_stab_t { + int32_t phi_pgain; //< The roll P gain on the err_vx + int32_t phi_igain; //< The roll I gain on the err_vx_int + int32_t theta_pgain; //< The pitch P gain on the err_vy + int32_t theta_igain; //< The pitch I gain on the err_vy_int + float desired_vx; //< The desired velocity in the x direction (m/s??) + float desired_vy; //< The desired velocity in the y direction (m/s??) + + float err_vx; //< The velocity error in x direction (m/s??) + float err_vx_int; //< The integrated velocity error in x direction (m/s??) + float err_vy; //< The velocity error in y direction (m/s??) + float err_vy_int; //< The integrated velocity error in y direction (m/s??) + + struct Int32Eulers cmd; //< The commands that are send to the hover loop +}; +extern struct opticflow_stab_t opticflow_stab; + +// Vertical loop re-uses Alt-hold +#define GUIDANCE_V_MODE_MODULE_SETTING GUIDANCE_V_MODE_HOVER + +// Horizontal mode is a specific controller +#define GUIDANCE_H_MODE_MODULE_SETTING GUIDANCE_H_MODE_MODULE + +// Implement own Horizontal loops +extern void guidance_h_module_enter(void); +extern void guidance_h_module_read_rc(void); +extern void guidance_h_module_run(bool_t in_flight); + +// Update the stabiliztion commands based on a vision result +void stabilization_opticflow_update(struct opticflow_result_t *vision); + +#endif /* CV_STABILIZATION_OPTICFLOW_H_ */ diff --git a/sw/airborne/modules/computer_vision/opticflow/visual_estimator.c b/sw/airborne/modules/computer_vision/opticflow/visual_estimator.c deleted file mode 100644 index 48bd4ffd5d2..00000000000 --- a/sw/airborne/modules/computer_vision/opticflow/visual_estimator.c +++ /dev/null @@ -1,283 +0,0 @@ -/* - * Copyright (C) 2014 Hann Woei Ho - * - * This file is part of Paparazzi. - * - * Paparazzi is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * Paparazzi is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Paparazzi; see the file COPYING. If not, see - * . - */ - -/** - * @file modules/computer_vision/opticflow/visual_estimator.c - * @brief Estimate velocity from optic flow. - * - * Using sensors from vertical camera and IMU of Parrot AR.Drone 2.0. - * - * Warning: all this code is called form the Vision-Thread: do not access any autopilot data in here. - */ - -#include "std.h" - -#include -#include -#include - -// Own Header -#include "visual_estimator.h" - -// Computer Vision -#include "opticflow/optic_flow_int.h" -#include "opticflow/fast9/fastRosten.h" - -// for FPS -#include "modules/computer_vision/cv/framerate.h" - - -// Local variables -struct visual_estimator_struct -{ - // Image size - unsigned int imgWidth; - unsigned int imgHeight; - - // Images - uint8_t *prev_frame; - uint8_t *gray_frame; - uint8_t *prev_gray_frame; - - // Initialization - int old_img_init; - - // Store previous - float prev_pitch; - float prev_roll; -} visual_estimator; - -// ARDrone Vertical Camera Parameters -#define FOV_H 0.67020643276 -#define FOV_W 0.89360857702 -#define Fx_ARdrone 343.1211 -#define Fy_ARdrone 348.5053 - -// Corner Detection -#define MAX_COUNT 100 - -// Flow Derotation -#define FLOW_DEROTATION - - -// Called by plugin -void opticflow_plugin_init(unsigned int w, unsigned int h, struct CVresults *results) -{ - // Initialize variables - visual_estimator.imgWidth = w; - visual_estimator.imgHeight = h; - - visual_estimator.gray_frame = (unsigned char *) calloc(w * h, sizeof(uint8_t)); - visual_estimator.prev_frame = (unsigned char *) calloc(w * h * 2, sizeof(uint8_t)); - visual_estimator.prev_gray_frame = (unsigned char *) calloc(w * h, sizeof(uint8_t)); - - visual_estimator.old_img_init = 1; - visual_estimator.prev_pitch = 0.0; - visual_estimator.prev_roll = 0.0; - - results->OFx = 0.0; - results->OFy = 0.0; - results->dx_sum = 0.0; - results->dy_sum = 0.0; - results->diff_roll = 0.0; - results->diff_pitch = 0.0; - results->cam_h = 0.0; - results->Velx = 0.0; - results->Vely = 0.0; - results->flow_count = 0; - results->cnt = 0; - results->count = 0; - - framerate_init(); -} - -void opticflow_plugin_run(unsigned char *frame, struct PPRZinfo* info, struct CVresults *results) -{ - // Corner Tracking - // Working Variables - int max_count = 25; - int borderx = 24, bordery = 24; - int x[MAX_COUNT], y[MAX_COUNT]; - int new_x[MAX_COUNT], new_y[MAX_COUNT]; - int status[MAX_COUNT]; - int dx[MAX_COUNT], dy[MAX_COUNT]; - int w = visual_estimator.imgWidth; - int h = visual_estimator.imgHeight; - - // Framerate Measuring - results->FPS = framerate_run(); - - if (visual_estimator.old_img_init == 1) { - memcpy(visual_estimator.prev_frame, frame, w * h * 2); - CvtYUYV2Gray(visual_estimator.prev_gray_frame, visual_estimator.prev_frame, w, h); - visual_estimator.old_img_init = 0; - } - - // ************************************************************************************* - // Corner detection - // ************************************************************************************* - - // FAST corner detection - int fast_threshold = 20; - xyFAST *pnts_fast; - pnts_fast = fast9_detect((const byte *)visual_estimator.prev_gray_frame, w, h, w, - fast_threshold, &results->count); - if (results->count > MAX_COUNT) { results->count = MAX_COUNT; } - for (int i = 0; i < results->count; i++) { - x[i] = pnts_fast[i].x; - y[i] = pnts_fast[i].y; - } - free(pnts_fast); - - // Remove neighboring corners - const float min_distance = 3; - float min_distance2 = min_distance * min_distance; - int labelmin[MAX_COUNT]; - for (int i = 0; i < results->count; i++) { - for (int j = i + 1; j < results->count; j++) { - // distance squared: - float distance2 = (x[i] - x[j]) * (x[i] - x[j]) + (y[i] - y[j]) * (y[i] - y[j]); - if (distance2 < min_distance2) { - labelmin[i] = 1; - } - } - } - - int count_fil = results->count; - for (int i = results->count - 1; i >= 0; i--) { - int remove_point = 0; - - if (labelmin[i]) { - remove_point = 1; - } - - if (remove_point) { - for (int c = i; c < count_fil - 1; c++) { - x[c] = x[c + 1]; - y[c] = y[c + 1]; - } - count_fil--; - } - } - - if (count_fil > max_count) { count_fil = max_count; } - results->count = count_fil; - - // ************************************************************************************* - // Corner Tracking - // ************************************************************************************* - CvtYUYV2Gray(visual_estimator.gray_frame, frame, w, h); - - opticFlowLK(visual_estimator.gray_frame, visual_estimator.prev_gray_frame, x, y, - count_fil, w, h, new_x, new_y, status, 5, 100); - - results->flow_count = count_fil; - for (int i = count_fil - 1; i >= 0; i--) { - int remove_point = 1; - - if (status[i] && !(new_x[i] < borderx || new_x[i] > (w - 1 - borderx) || - new_y[i] < bordery || new_y[i] > (h - 1 - bordery))) { - remove_point = 0; - } - - if (remove_point) { - for (int c = i; c < results->flow_count - 1; c++) { - x[c] = x[c + 1]; - y[c] = y[c + 1]; - new_x[c] = new_x[c + 1]; - new_y[c] = new_y[c + 1]; - } - results->flow_count--; - } - } - - results->dx_sum = 0.0; - results->dy_sum = 0.0; - - // Optical Flow Computation - for (int i = 0; i < results->flow_count; i++) { - dx[i] = new_x[i] - x[i]; - dy[i] = new_y[i] - y[i]; - } - - // Median Filter - if (results->flow_count) { - quick_sort_int(dx, results->flow_count); // 11 - quick_sort_int(dy, results->flow_count); // 11 - - results->dx_sum = (float) dx[results->flow_count / 2]; - results->dy_sum = (float) dy[results->flow_count / 2]; - } else { - results->dx_sum = 0.0; - results->dy_sum = 0.0; - } - - // Flow Derotation - results->diff_pitch = (info->theta - visual_estimator.prev_pitch) * h / FOV_H; - results->diff_roll = (info->phi - visual_estimator.prev_roll) * w / FOV_W; - visual_estimator.prev_pitch = info->theta; - visual_estimator.prev_roll = info->phi; - - float OFx_trans, OFy_trans; -#ifdef FLOW_DEROTATION - if (results->flow_count) { - OFx_trans = results->dx_sum - results->diff_roll; - OFy_trans = results->dy_sum - results->diff_pitch; - - if ((OFx_trans <= 0) != (results->dx_sum <= 0)) { - OFx_trans = 0; - OFy_trans = 0; - } - } else { - OFx_trans = results->dx_sum; - OFy_trans = results->dy_sum; - } -#else - OFx_trans = results->dx_sum; - OFy_trans = results->dy_sum; -#endif - - // Average Filter - OFfilter(&results->OFx, &results->OFy, OFx_trans, OFy_trans, results->flow_count, 1); - - // Velocity Computation - if (info->agl < 0.01) { - results->cam_h = 0.01; - } - else { - results->cam_h = info->agl; - } - - if (results->flow_count) { - results->Velx = results->OFy * results->cam_h * results->FPS / Fy_ARdrone + 0.05; - results->Vely = -results->OFx * results->cam_h * results->FPS / Fx_ARdrone - 0.1; - } else { - results->Velx = 0.0; - results->Vely = 0.0; - } - - // ************************************************************************************* - // Next Loop Preparation - // ************************************************************************************* - - memcpy(visual_estimator.prev_frame, frame, w * h * 2); - memcpy(visual_estimator.prev_gray_frame, visual_estimator.gray_frame, w * h); - -} diff --git a/sw/airborne/modules/computer_vision/opticflow/visual_estimator.h b/sw/airborne/modules/computer_vision/opticflow/visual_estimator.h deleted file mode 100644 index f499ffd320a..00000000000 --- a/sw/airborne/modules/computer_vision/opticflow/visual_estimator.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (C) 2014 Hann Woei Ho - * - * This file is part of Paparazzi. - * - * Paparazzi is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * Paparazzi is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Paparazzi; see the file COPYING. If not, see - * . - */ - -/** - * @file modules/computer_vision/opticflow/visual_estimator.h - * @brief Estimate velocity from optic flow. - * - * Using sensors from vertical camera and IMU of Parrot AR.Drone 2.0 - */ - -#ifndef VISUAL_ESTIMATOR_H -#define VISUAL_ESTIMATOR_H - -#include "inter_thread_data.h" - -/** - * Initialize visual estimator. - * @param w image width - * @param h image height - */ -void opticflow_plugin_init(unsigned int w, unsigned int h, struct CVresults *results); -void opticflow_plugin_run(unsigned char *frame, struct PPRZinfo* info, struct CVresults* results); - -#endif /* VISUAL_ESTIMATOR_H */ diff --git a/sw/airborne/modules/computer_vision/opticflow_module.c b/sw/airborne/modules/computer_vision/opticflow_module.c index dfd33ef3e39..dcba3bb6b46 100644 --- a/sw/airborne/modules/computer_vision/opticflow_module.c +++ b/sw/airborne/modules/computer_vision/opticflow_module.c @@ -28,120 +28,150 @@ #include "opticflow_module.h" -// Computervision Runs in a thread -#include "opticflow/opticflow_thread.h" -#include "opticflow/inter_thread_data.h" - -// Navigate Based On Vision, needed to call init/run_hover_stabilization_onvision -#include "opticflow/hover_stabilization.h" - -// Threaded computer vision -#include - -// Sockets -#include #include -#include -#include -#include - -int cv_sockets[2]; - -// Paparazzi Data +#include #include "state.h" #include "subsystems/abi.h" +#include "lib/v4l/v4l2.h" -// Downlink -#include "subsystems/datalink/downlink.h" - - -struct PPRZinfo opticflow_module_data; - -/** height above ground level, from ABI - * Used for scale computation, negative value means invalid. - */ -/** default sonar/agl to use in opticflow visual_estimator */ +/* default sonar/agl to use in opticflow visual_estimator */ #ifndef OPTICFLOW_AGL_ID #define OPTICFLOW_AGL_ID ABI_BROADCAST #endif -abi_event agl_ev; -static void agl_cb(uint8_t sender_id, float distance); +PRINT_CONFIG_VAR(OPTICFLOW_AGL_ID); -static void agl_cb(uint8_t sender_id __attribute__((unused)), float distance) -{ - if (distance > 0) { - opticflow_module_data.agl = distance; - } -} +/* The main opticflow variables */ +static struct opticflow_t opticflow; //< Opticflow calculations +static struct opticflow_result_t opticflow_result; //< The opticflow result +static struct opticflow_state_t opticflow_state; //< State of the drone to communicate with the opticflow +static struct v4l2_device *opticflow_dev; //< The opticflow camera V4L2 device +static abi_event opticflow_agl_ev; //< The altitude ABI event +static pthread_t opticflow_calc_thread; //< The optical flow calculation thread +static bool_t opticflow_got_result; //< When we have an optical flow calculation +static pthread_mutex_t opticflow_mutex; //< Mutex lock fo thread safety -#define DEBUG_INFO(X, ...) ; +/* Static functions */ +static void *opticflow_module_calc(void *data); //< The main optical flow calculation thread +static void opticflow_agl_cb(uint8_t sender_id, float distance); //< Callback function of the ground altitude + +/** + * Initialize the optical flow module for the bottom camera + */ void opticflow_module_init(void) { - // get AGL from sonar via ABI - AbiBindMsgAGL(OPTICFLOW_AGL_ID, &agl_ev, agl_cb); - - // Initialize local data - opticflow_module_data.cnt = 0; - opticflow_module_data.phi = 0; - opticflow_module_data.theta = 0; - opticflow_module_data.agl = 0; - - // Stabilization Code Initialization - init_hover_stabilization_onvision(); + // Subscribe to the altitude above ground level ABI messages + AbiBindMsgAGL(OPTICFLOW_AGL_ID, &opticflow_agl_ev, opticflow_agl_cb); + + // Set the opticflow state to 0 + opticflow_state.phi = 0; + opticflow_state.theta = 0; + opticflow_state.agl = 0; + + // Initialize the opticflow calculation + opticflow_calc_init(&opticflow, 320, 240); + opticflow_got_result = FALSE; + + /* Try to initialize the video device */ + opticflow_dev = v4l2_init("/dev/video2", 320, 240, 10); //TODO: Fix defines + if (opticflow_dev == NULL) { + printf("[opticflow_module] Could not initialize the video device\n"); + } } - +/** + * Update the optical flow state for the calculation thread + * and update the stabilization loops with the newest result + */ void opticflow_module_run(void) { + pthread_mutex_lock(&opticflow_mutex); // Send Updated data to thread - opticflow_module_data.cnt++; - opticflow_module_data.phi = stateGetNedToBodyEulers_f()->phi; - opticflow_module_data.theta = stateGetNedToBodyEulers_f()->theta; - int bytes_written = write(cv_sockets[0], &opticflow_module_data, sizeof(opticflow_module_data)); - if (bytes_written != sizeof(opticflow_module_data) && errno !=4){ - printf("[module] Failed to write to socket: written = %d, error=%d, %s.\n",bytes_written, errno, strerror(errno)); - } - else { - DEBUG_INFO("[module] Write # %d (%d bytes)\n",opticflow_module_data.cnt, bytes_written); - } + opticflow_state.phi = stateGetNedToBodyEulers_f()->phi; + opticflow_state.theta = stateGetNedToBodyEulers_f()->theta; - // Read Latest Vision Module Results - struct CVresults vision_results; - // Warning: if the vision runs faster than the module, you need to read multiple times - int bytes_read = recv(cv_sockets[0], &vision_results, sizeof(vision_results), MSG_DONTWAIT); - if (bytes_read != sizeof(vision_results)) { - if (bytes_read != -1) { - printf("[module] Failed to read %d bytes: CV results from socket errno=%d.\n",bytes_read, errno); - } - } else { - //////////////////////////////////////////// - // Module-Side Code - //////////////////////////////////////////// - DEBUG_INFO("[module] Read vision %d\n",vision_results.cnt); - run_hover_stabilization_onvision(&vision_results); + // Update the stabilization loops on the current calculation + if(opticflow_got_result) { + stabilization_opticflow_update(&opticflow_result); + opticflow_got_result = FALSE; } + pthread_mutex_unlock(&opticflow_mutex); } +/** + * Start the optical flow calculation + */ void opticflow_module_start(void) { - pthread_t computervision_thread; - if (socketpair(AF_UNIX, SOCK_DGRAM, 0, cv_sockets) == 0) { - //////////////////////////////////////////// - // Thread-Side Code - //////////////////////////////////////////// - int rc = pthread_create(&computervision_thread, NULL, computervision_thread_main, - &cv_sockets[1]); - if (rc) { - printf("ctl_Init: Return code from pthread_create(mot_thread) is %d\n", rc); - } + // Check if we are not already running + if(opticflow_calc_thread != 0) { + printf("[opticflow_module] Opticflow already started!\n"); + return; } - else { - perror("Could not create socket.\n"); + + // Create the opticalflow calculation thread + int rc = pthread_create(&opticflow_calc_thread, NULL, opticflow_module_calc, NULL); + if (rc) { + printf("[opticflow_module] Could not initialize opticflow thread (return code: %d)\n", rc); } } +/** + * Stop the optical flow calculation + */ void opticflow_module_stop(void) { - computervision_thread_request_exit(); + // Stop the capturing + v4l2_stop_capture(opticflow_dev); + + // TODO: fix thread stop +} + +/** + * The main optical flow calculation thread + * This thread passes the images trough the optical flow + * calculator based on Lucas Kanade + */ +static void *opticflow_module_calc(void *data __attribute__((unused))) { + // Start the streaming on the V4L2 device + if(!v4l2_start_capture(opticflow_dev)) { + printf("[opticflow_module] Could not start capture of the camera\n"); + return 0; + } + + /* Main loop of the optical flow calculation */ + while(TRUE) { + // Try to fetch an image + struct v4l2_img_buf *img = v4l2_image_get(opticflow_dev); + + // Copy the state + pthread_mutex_lock(&opticflow_mutex); + struct opticflow_state_t temp_state; + memcpy(&temp_state, &opticflow_state, sizeof(struct opticflow_state_t)); + pthread_mutex_unlock(&opticflow_mutex); + + // Do the optical flow calculation + struct opticflow_result_t temp_result; + opticflow_calc_frame(&opticflow, &temp_state, img, &temp_result); + + // Copy the result if finished + pthread_mutex_lock(&opticflow_mutex); + memcpy(&opticflow_result, &temp_result, sizeof(struct opticflow_result_t)); + opticflow_got_result = TRUE; + pthread_mutex_unlock(&opticflow_mutex); + + // Free the image + v4l2_image_free(opticflow_dev, img); + } +} + +/** + * Get the altitude above ground of the drone + */ +static void opticflow_agl_cb(uint8_t sender_id __attribute__((unused)), float distance) +{ + // Update the distance if we got a valid measurement + if (distance > 0) { + opticflow_state.agl = distance; + } } diff --git a/sw/airborne/modules/computer_vision/opticflow_module.h b/sw/airborne/modules/computer_vision/opticflow_module.h index 098d0b15b3e..5e583ce5a68 100644 --- a/sw/airborne/modules/computer_vision/opticflow_module.h +++ b/sw/airborne/modules/computer_vision/opticflow_module.h @@ -28,7 +28,9 @@ #ifndef OPTICFLOW_MODULE_H #define OPTICFLOW_MODULE_H -#include "std.h" +// Include opticflow calculator and stabilization loops +#include "opticflow/opticflow_calculator.h" +#include "opticflow/stabilization_opticflow.h" // Module functions extern void opticflow_module_init(void); From 9be322692fc40d166a0332147a887fd24f512b5b Mon Sep 17 00:00:00 2001 From: Freek van Tienen Date: Thu, 12 Mar 2015 10:53:43 +0100 Subject: [PATCH 02/17] [vision] Move folders and add general image struct --- conf/modules/cv_opticflow.xml | 10 +- conf/modules/video_rtp_stream.xml | 5 +- .../modules/computer_vision/cv/color.h | 88 -------- .../modules/computer_vision/cv/image.h | 35 ---- .../modules/computer_vision/cv/resize.h | 61 ------ .../{cv => lib}/encoding/jpeg.c | 26 ++- .../{cv => lib}/encoding/jpeg.h | 11 +- .../{cv => lib}/encoding/rtp.c | 18 +- .../{cv => lib}/encoding/rtp.h | 4 +- .../modules/computer_vision/lib/udp/socket.c | 104 ---------- .../modules/computer_vision/lib/udp/socket.h | 25 --- .../modules/computer_vision/lib/v4l/v4l2.c | 36 +++- .../modules/computer_vision/lib/v4l/v4l2.h | 11 +- .../opticflow => lib/vision}/fast_rosten.c | 0 .../opticflow => lib/vision}/fast_rosten.h | 0 .../computer_vision/lib/vision/image.c | 188 ++++++++++++++++++ .../computer_vision/lib/vision/image.h | 60 ++++++ .../opticflow => lib/vision}/lucas_kanade.c | 4 +- .../opticflow => lib/vision}/lucas_kanade.h | 6 +- .../opticflow/opticflow_calculator.c | 117 ++++++----- .../opticflow/opticflow_calculator.h | 2 +- .../computer_vision/opticflow_module.c | 47 ++++- .../modules/computer_vision/viewvideo.c | 62 +++--- 23 files changed, 454 insertions(+), 466 deletions(-) delete mode 100644 sw/airborne/modules/computer_vision/cv/color.h delete mode 100644 sw/airborne/modules/computer_vision/cv/image.h delete mode 100644 sw/airborne/modules/computer_vision/cv/resize.h rename sw/airborne/modules/computer_vision/{cv => lib}/encoding/jpeg.c (97%) rename sw/airborne/modules/computer_vision/{cv => lib}/encoding/jpeg.h (79%) rename sw/airborne/modules/computer_vision/{cv => lib}/encoding/rtp.c (94%) rename sw/airborne/modules/computer_vision/{cv => lib}/encoding/rtp.h (92%) delete mode 100644 sw/airborne/modules/computer_vision/lib/udp/socket.c delete mode 100644 sw/airborne/modules/computer_vision/lib/udp/socket.h rename sw/airborne/modules/computer_vision/{cv/opticflow => lib/vision}/fast_rosten.c (100%) rename sw/airborne/modules/computer_vision/{cv/opticflow => lib/vision}/fast_rosten.h (100%) create mode 100644 sw/airborne/modules/computer_vision/lib/vision/image.c create mode 100644 sw/airborne/modules/computer_vision/lib/vision/image.h rename sw/airborne/modules/computer_vision/{cv/opticflow => lib/vision}/lucas_kanade.c (98%) rename sw/airborne/modules/computer_vision/{cv/opticflow => lib/vision}/lucas_kanade.h (88%) diff --git a/conf/modules/cv_opticflow.xml b/conf/modules/cv_opticflow.xml index 71bbf51f43a..199e3c6462d 100644 --- a/conf/modules/cv_opticflow.xml +++ b/conf/modules/cv_opticflow.xml @@ -45,8 +45,9 @@ - + + + @@ -55,13 +56,14 @@ - - + + + diff --git a/conf/modules/video_rtp_stream.xml b/conf/modules/video_rtp_stream.xml index 1abd3caf18d..98f8a5935cc 100644 --- a/conf/modules/video_rtp_stream.xml +++ b/conf/modules/video_rtp_stream.xml @@ -37,8 +37,9 @@ - - + + + diff --git a/sw/airborne/modules/computer_vision/cv/color.h b/sw/airborne/modules/computer_vision/cv/color.h deleted file mode 100644 index 46ffdb413a8..00000000000 --- a/sw/airborne/modules/computer_vision/cv/color.h +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (C) 2012-2013 - * - * This file is part of Paparazzi. - * - * Paparazzi is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * Paparazzi is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Paparazzi; see the file COPYING. If not, write to - * the Free Software Foundation, 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. - */ - - -#include -#include "image.h" - -inline void grayscale_uyvy(struct img_struct *input, struct img_struct *output); -inline void grayscale_uyvy(struct img_struct *input, struct img_struct *output) -{ - uint8_t *source = input->buf; - uint8_t *dest = output->buf; - source++; - - for (int y = 0; y < output->h; y++) { - for (int x = 0; x < output->w; x++) { - // UYVY - *dest++ = 127; // U - *dest++ = *source; // Y - source += 2; - } - } -} - -inline int colorfilt_uyvy(struct img_struct *input, struct img_struct *output, uint8_t y_m, uint8_t y_M, uint8_t u_m, - uint8_t u_M, uint8_t v_m, uint8_t v_M); -inline int colorfilt_uyvy(struct img_struct *input, struct img_struct *output, uint8_t y_m, uint8_t y_M, uint8_t u_m, - uint8_t u_M, uint8_t v_m, uint8_t v_M) -{ - int cnt = 0; - uint8_t *source = input->buf; - uint8_t *dest = output->buf; - - for (int y = 0; y < output->h; y++) { - for (int x = 0; x < output->w; x += 2) { - // Color Check: - if ( - // Light - (dest[1] >= y_m) - && (dest[1] <= y_M) - && (dest[0] >= u_m) - && (dest[0] <= u_M) - && (dest[2] >= v_m) - && (dest[2] <= v_M) - ) { // && (dest[2] > 128)) - cnt ++; - // UYVY - dest[0] = 64; // U - dest[1] = source[1]; // Y - dest[2] = 255; // V - dest[3] = source[3]; // Y - } else { - // UYVY - char u = source[0] - 127; - u /= 4; - dest[0] = 127; // U - dest[1] = source[1]; // Y - u = source[2] - 127; - u /= 4; - dest[2] = 127; // V - dest[3] = source[3]; // Y - } - - dest += 4; - source += 4; - } - } - return cnt; -} - diff --git a/sw/airborne/modules/computer_vision/cv/image.h b/sw/airborne/modules/computer_vision/cv/image.h deleted file mode 100644 index cf749ced5bb..00000000000 --- a/sw/airborne/modules/computer_vision/cv/image.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (C) 2012-2013 - * - * This file is part of Paparazzi. - * - * Paparazzi is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * Paparazzi is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Paparazzi; see the file COPYING. If not, write to - * the Free Software Foundation, 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. - */ - - -#ifndef _MY_IMAGE_HEADER_ -#define _MY_IMAGE_HEADER_ - - -struct img_struct { - int seq; - double timestamp; - unsigned char *buf; - int w; - int h; -}; - -#endif diff --git a/sw/airborne/modules/computer_vision/cv/resize.h b/sw/airborne/modules/computer_vision/cv/resize.h deleted file mode 100644 index b0524a52705..00000000000 --- a/sw/airborne/modules/computer_vision/cv/resize.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (C) 2012-2013 - * - * This file is part of Paparazzi. - * - * Paparazzi is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * Paparazzi is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Paparazzi; see the file COPYING. If not, write to - * the Free Software Foundation, 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. - */ - - -#include -#include "image.h" - -/** Simplified high-speed low CPU downsample function without averaging - * - * downsample factor must be 1, 2, 4, 8 ... 2^X - * image of typ UYVY expected. Only one color UV per 2 pixels - * - * we keep the UV color of the first pixel pair - * and sample the intensity evenly 1-3-5-7-... or 1-5-9-... - * - * input: u1y1 v1y2 u3y3 v3y4 u5y5 v5y6 u7y7 v7y8 ... - * downsample=1 u1y1 v1y2 u3y3 v3y4 u5y5 v5y6 u7y7 v7y8 ... - * downsample=2 u1y1v1 (skip2) y3 (skip2) u5y5v5 (skip2 y7 (skip2) ... - * downsample=4 u1y1v1 (skip6) y5 (skip6) ... - */ - -inline void resize_uyuv(struct img_struct *input, struct img_struct *output, int downsample); -inline void resize_uyuv(struct img_struct *input, struct img_struct *output, int downsample) -{ - uint8_t *source = input->buf; - uint8_t *dest = output->buf; - - int pixelskip = (downsample - 1) * 2; - for (int y = 0; y < output->h; y++) { - for (int x = 0; x < output->w; x += 2) { - // YUYV - *dest++ = *source++; // U - *dest++ = *source++; // Y - *dest++ = *source++; // V - source += pixelskip; - *dest++ = *source++; // Y - source += pixelskip; - } - // read 1 in every 'downsample' rows, so skip (downsample-1) rows after reading the first - source += (downsample-1) * input->w * 2; - } -} - diff --git a/sw/airborne/modules/computer_vision/cv/encoding/jpeg.c b/sw/airborne/modules/computer_vision/lib/encoding/jpeg.c similarity index 97% rename from sw/airborne/modules/computer_vision/cv/encoding/jpeg.c rename to sw/airborne/modules/computer_vision/lib/encoding/jpeg.c index e26c69e900f..c0ede9eebcb 100644 --- a/sw/airborne/modules/computer_vision/cv/encoding/jpeg.c +++ b/sw/airborne/modules/computer_vision/lib/encoding/jpeg.c @@ -415,20 +415,24 @@ void MakeTables(int q) } } - - - - -uint8_t *jpeg_encode_image(uint8_t *input_ptr, uint8_t *output_ptr, uint32_t quality_factor, uint32_t image_format, - uint32_t image_width, uint32_t image_height, bool_t add_dri_header) +/** + * Encode an YUV422 image + * @param[in] *in The input image + * @param[out] *out The output JPEG image + * @param[in] quality_factor Quality factor of the encoding (0-99) + * @param[in] add_dri_header Add the DRI header (needed for full JPEG) + */ +void jpeg_encode_image(struct image_t *in, struct image_t *out, uint32_t quality_factor, bool_t add_dri_header) { uint16_t i, j; + uint8_t *output_ptr = out->buf; + uint8_t *input_ptr = in->buf; JPEG_ENCODER_STRUCTURE JpegStruct; JPEG_ENCODER_STRUCTURE *jpeg_encoder_structure = &JpegStruct; /* Initialization of JPEG control structure */ - jpeg_initialization(jpeg_encoder_structure, image_format, image_width, image_height); + jpeg_initialization(jpeg_encoder_structure, FOUR_TWO_TWO, in->w, in->h); /* Quantization Table Initialization */ //jpeg_initialize_quantization_tables (quality_factor); @@ -437,7 +441,7 @@ uint8_t *jpeg_encode_image(uint8_t *input_ptr, uint8_t *output_ptr, uint32_t qua /* Writing Marker Data */ if (add_dri_header) { - output_ptr = jpeg_write_markers(output_ptr, image_format, image_width, image_height); + output_ptr = jpeg_write_markers(output_ptr, FOUR_TWO_TWO, in->w, in->h); } for (i = 1; i <= jpeg_encoder_structure->vertical_mcus; i++) { @@ -459,7 +463,7 @@ uint8_t *jpeg_encode_image(uint8_t *input_ptr, uint8_t *output_ptr, uint32_t qua read_format(jpeg_encoder_structure, input_ptr); /* Encode the data in MCU */ - output_ptr = jpeg_encodeMCU(jpeg_encoder_structure, image_format, output_ptr); + output_ptr = jpeg_encodeMCU(jpeg_encoder_structure, FOUR_TWO_TWO, output_ptr); input_ptr += jpeg_encoder_structure->mcu_width_size; } @@ -469,7 +473,9 @@ uint8_t *jpeg_encode_image(uint8_t *input_ptr, uint8_t *output_ptr, uint32_t qua /* Close Routine */ output_ptr = jpeg_close_bitstream(output_ptr); - return output_ptr; + out->w = in->w; + out->h = in->h; + out->buf_size = output_ptr - (uint8_t*)out->buf; } static uint8_t *jpeg_encodeMCU(JPEG_ENCODER_STRUCTURE *jpeg_encoder_structure, uint32_t image_format, uint8_t *output_ptr) diff --git a/sw/airborne/modules/computer_vision/cv/encoding/jpeg.h b/sw/airborne/modules/computer_vision/lib/encoding/jpeg.h similarity index 79% rename from sw/airborne/modules/computer_vision/cv/encoding/jpeg.h rename to sw/airborne/modules/computer_vision/lib/encoding/jpeg.h index f36021f15e3..fafe6dbc9f9 100644 --- a/sw/airborne/modules/computer_vision/cv/encoding/jpeg.h +++ b/sw/airborne/modules/computer_vision/lib/encoding/jpeg.h @@ -22,6 +22,7 @@ #define _CV_ENCODING_JPEG_H #include "std.h" +#include "lib/vision/image.h" /* The different type of image encodings */ #define FOUR_ZERO_ZERO 0 @@ -31,15 +32,7 @@ #define RGB 4 /* JPEG encode an image */ -unsigned char *jpeg_encode_image( - uint8_t *in, - uint8_t *out, - uint32_t q, // image quality 1-8 - uint32_t fmt, // image format code - uint32_t width, // image width - uint32_t height, // image height - bool_t add_dri_header // data only or full jpeg file -); +void jpeg_encode_image(struct image_t *in, struct image_t *out, uint32_t quality_factor, bool_t add_dri_header); /* Create an SVS header */ int jpeg_create_svs_header(unsigned char *buf, int32_t size, int w); diff --git a/sw/airborne/modules/computer_vision/cv/encoding/rtp.c b/sw/airborne/modules/computer_vision/lib/encoding/rtp.c similarity index 94% rename from sw/airborne/modules/computer_vision/cv/encoding/rtp.c rename to sw/airborne/modules/computer_vision/lib/encoding/rtp.c index 92961f6ff14..047e574d52d 100644 --- a/sw/airborne/modules/computer_vision/cv/encoding/rtp.c +++ b/sw/airborne/modules/computer_vision/lib/encoding/rtp.c @@ -87,12 +87,14 @@ void rtp_frame_test(struct udp_periph *udp) /** * Send an RTP frame */ -void rtp_frame_send(struct udp_periph *udp, uint8_t *Jpeg, uint32_t JpegLen, int w, int h, uint8_t format_code, +void rtp_frame_send(struct udp_periph *udp, struct image_t *img, uint8_t format_code, uint8_t quality_code, uint8_t has_dri_header, uint32_t delta_t) { static uint32_t packetcounter = 0; static uint32_t timecounter = 0; uint32_t offset = 0; + uint32_t jpeg_size = img->buf_size; + uint8_t *jpeg_ptr = img->buf; #define MAX_PACKET_SIZE 1400 @@ -104,20 +106,20 @@ void rtp_frame_send(struct udp_periph *udp, uint8_t *Jpeg, uint32_t JpegLen, int } // Split frame into packets - for (; JpegLen > 0;) { + for (; jpeg_size > 0;) { uint32_t len = MAX_PACKET_SIZE; uint8_t lastpacket = 0; - if (JpegLen <= len) { + if (jpeg_size <= len) { lastpacket = 1; - len = JpegLen; + len = jpeg_size; } - rtp_packet_send(udp, Jpeg, len, packetcounter, timecounter, offset, lastpacket, w, h, format_code, quality_code, - has_dri_header); + rtp_packet_send(udp, jpeg_ptr, len, packetcounter, timecounter, offset, lastpacket, img->w, img->h, format_code, + quality_code, has_dri_header); - JpegLen -= len; - Jpeg += len; + jpeg_size -= len; + jpeg_ptr += len; offset += len; packetcounter++; } diff --git a/sw/airborne/modules/computer_vision/cv/encoding/rtp.h b/sw/airborne/modules/computer_vision/lib/encoding/rtp.h similarity index 92% rename from sw/airborne/modules/computer_vision/cv/encoding/rtp.h rename to sw/airborne/modules/computer_vision/lib/encoding/rtp.h index 74521536279..d099d1a1d83 100644 --- a/sw/airborne/modules/computer_vision/cv/encoding/rtp.h +++ b/sw/airborne/modules/computer_vision/lib/encoding/rtp.h @@ -29,12 +29,12 @@ #define _CV_ENCODING_RTP_H #include "std.h" +#include "lib/vision/image.h" #include "mcu_periph/udp.h" void rtp_frame_send( struct udp_periph *udp, // socket - uint8_t *Jpeg, uint32_t JpegLen, // jpeg data - int w, int h, // width and height + struct image_t *img, // The image to send uint8_t format_code, // 0=422, 1=421 uint8_t quality_code, // 0-99 of 128 for custom (include uint8_t has_dri_header, // Does Jpeg data include Header Info? diff --git a/sw/airborne/modules/computer_vision/lib/udp/socket.c b/sw/airborne/modules/computer_vision/lib/udp/socket.c deleted file mode 100644 index fba489e2828..00000000000 --- a/sw/airborne/modules/computer_vision/lib/udp/socket.c +++ /dev/null @@ -1,104 +0,0 @@ -#include "socket.h" - -#include -#include -#include -#include -#include - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -# define ADDR_SIZE_TYPE socklen_t -# define SOCKET_ERROR -1 -# define IO_SOCKET ioctl - -struct UdpSocket *udp_socket(const char *str_ip_out, const int port_out, const int port_in, const int broadcast) -{ - - struct UdpSocket *me = malloc(sizeof(struct UdpSocket)); - - int so_reuseaddr = 1; - me->socket_out = socket(AF_INET, SOCK_DGRAM, 0); - setsockopt(me->socket_out, SOL_SOCKET, SO_REUSEADDR, - &so_reuseaddr, sizeof(so_reuseaddr)); - - /* only set broadcast option if explicitly enabled */ - if (broadcast) - setsockopt(me->socket_out, SOL_SOCKET, SO_BROADCAST, - &broadcast, sizeof(broadcast)); - - me->addr_out.sin_family = AF_INET; - me->addr_out.sin_port = htons(port_out); - me->addr_out.sin_addr.s_addr = inet_addr(str_ip_out); - - me->socket_in = socket(AF_INET, SOCK_DGRAM, 0); - setsockopt(me->socket_in, SOL_SOCKET, SO_REUSEADDR, - &so_reuseaddr, sizeof(so_reuseaddr)); - - me->addr_in.sin_family = AF_INET; - me->addr_in.sin_port = htons(port_in); - me->addr_in.sin_addr.s_addr = htonl(INADDR_ANY); - - bind(me->socket_in, (struct sockaddr *)&me->addr_in, sizeof(me->addr_in)); - - return me; -} - -#include -//#define UDP_MODE MSG_DONTWAIT -#define UDP_MODE 0 - -int udp_write(struct UdpSocket *me, unsigned char *buf, int len) -{ - sendto(me->socket_out, buf, len, UDP_MODE, - (struct sockaddr *)&me->addr_out, sizeof(me->addr_out)); - //printf("sendto ret=%d\n",ret); - return len; -} - -unsigned long MIN(unsigned long a, unsigned long b); -unsigned long MIN(unsigned long a, unsigned long b) -{ - if (a < b) { return a; } - return b; -} - -int udp_read(struct UdpSocket *me, unsigned char *buf, int len) -{ - unsigned long toread = 0; - int btr = 1; // set to >0 in order to start the reading loop - int newbytes = 0; - - int status; - - // if socket is connected - for (; btr > 0;) { - // Check Status - status = IO_SOCKET(me->socket_in, FIONREAD, &toread); - if (status == SOCKET_ERROR) { - printf("problem receiving from socket\n"); - break; - } - - //printf("UDP has %d bytes\n", toread); - if (toread <= 0) { - break; - } - - // If status: ok and new data: read it - btr = MIN(toread, (unsigned long)len); - recvfrom(me->socket_in, buf, btr, 0, (struct sockaddr *)&me->addr_in, (socklen_t *) sizeof(me->addr_in)); - newbytes += btr; - } - return newbytes; -} diff --git a/sw/airborne/modules/computer_vision/lib/udp/socket.h b/sw/airborne/modules/computer_vision/lib/udp/socket.h deleted file mode 100644 index 98a831a6ee4..00000000000 --- a/sw/airborne/modules/computer_vision/lib/udp/socket.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef SOCKET_H -#define SOCKET_H - - -#include -#include - -#define FMS_UNICAST 0 -#define FMS_BROADCAST 1 - -struct UdpSocket { - int socket_in; - int socket_out; - struct sockaddr_in addr_in; - struct sockaddr_in addr_out; -}; - - -extern struct UdpSocket *udp_socket(const char *str_ip_out, const int port_out, const int port_in, const int broadcast); -extern int udp_write(struct UdpSocket *me, unsigned char *buf, int len); -extern int udp_read(struct UdpSocket *me, unsigned char *buf, int len); - - -#endif /* SOCKET_H */ - diff --git a/sw/airborne/modules/computer_vision/lib/v4l/v4l2.c b/sw/airborne/modules/computer_vision/lib/v4l/v4l2.c index 576a4e097aa..077e4dfc61b 100644 --- a/sw/airborne/modules/computer_vision/lib/v4l/v4l2.c +++ b/sw/airborne/modules/computer_vision/lib/v4l/v4l2.c @@ -254,7 +254,6 @@ struct v4l2_device *v4l2_init(char *device_name, uint16_t width, uint16_t height } // Map the buffer - buffers[i].idx = i; buffers[i].length = buf.length; buffers[i].buf = mmap(NULL, buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, buf.m.offset); if (MAP_FAILED == buffers[i].buf) { @@ -282,8 +281,10 @@ struct v4l2_device *v4l2_init(char *device_name, uint16_t width, uint16_t height * This functions blocks until image access is granted. This should not take that long, because * it is only locked while enqueueing an image. * Make sure you free the image after processing! + * @param[in] *dev The V4L2 video device we want to get an image from + * @param[out] *img The image that we got from the video device */ -struct v4l2_img_buf *v4l2_image_get(struct v4l2_device *dev) { +void v4l2_image_get(struct v4l2_device *dev, struct image_t *img) { uint16_t img_idx = V4L2_IMG_NONE; // Continu to wait for an image @@ -302,16 +303,24 @@ struct v4l2_img_buf *v4l2_image_get(struct v4l2_device *dev) { } } - // Rreturn the image - return &dev->buffers[img_idx]; + // Set the image + img->type = IMAGE_YUV422; + img->w = dev->w; + img->h = dev->h; + img->buf_idx = img_idx; + img->buf = dev->buffers[img_idx].buf; + memcpy(&img->ts, &dev->buffers[img_idx].timestamp, sizeof(struct timeval)); } /** * Get the latest image and lock it (Thread safe, NON BLOCKING) * This function returns NULL if it can't get access to the current image. * Make sure you free the image after processing! + * @param[in] *dev The V4L2 video device we want to get an image from + * @param[out] *img The image that we got from the video device + * @return Whether we got an image or not */ -struct v4l2_img_buf *v4l2_image_get_nonblock(struct v4l2_device *dev) { +bool_t v4l2_image_get_nonblock(struct v4l2_device *dev, struct image_t *img) { uint16_t img_idx = V4L2_IMG_NONE; // Try to get the current image @@ -324,9 +333,16 @@ struct v4l2_img_buf *v4l2_image_get_nonblock(struct v4l2_device *dev) { // Check if we really got an image if (img_idx == V4L2_IMG_NONE) { - return NULL; + return FALSE; } else { - return &dev->buffers[img_idx]; + // Set the image + img->type = IMAGE_YUV422; + img->w = dev->w; + img->h = dev->h; + img->buf_idx = img_idx; + img->buf = dev->buffers[img_idx].buf; + memcpy(&img->ts, &dev->buffers[img_idx].timestamp, sizeof(struct timeval)); + return TRUE; } } @@ -334,7 +350,7 @@ struct v4l2_img_buf *v4l2_image_get_nonblock(struct v4l2_device *dev) { * Free the image and enqueue the buffer (Thread safe) * This must be done after processing the image, because else all buffers are locked */ -void v4l2_image_free(struct v4l2_device *dev, struct v4l2_img_buf *img_buf) +void v4l2_image_free(struct v4l2_device *dev, struct image_t *img) { struct v4l2_buffer buf; @@ -342,9 +358,9 @@ void v4l2_image_free(struct v4l2_device *dev, struct v4l2_img_buf *img_buf) CLEAR(buf); buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; - buf.index = img_buf->idx; + buf.index = img->buf_idx; if (ioctl(dev->fd, VIDIOC_QBUF, &buf) < 0) { - printf("[v4l2] Could not enqueue %d for %s\n", img_buf->idx, dev->name); + printf("[v4l2] Could not enqueue %d for %s\n", img->buf_idx, dev->name); } } diff --git a/sw/airborne/modules/computer_vision/lib/v4l/v4l2.h b/sw/airborne/modules/computer_vision/lib/v4l/v4l2.h index 7c83e44da94..4b900056c3e 100644 --- a/sw/airborne/modules/computer_vision/lib/v4l/v4l2.h +++ b/sw/airborne/modules/computer_vision/lib/v4l/v4l2.h @@ -28,16 +28,17 @@ #ifndef _CV_LIB_V4L2_H #define _CV_LIB_V4L2_H -#include "std.h" #include #include #include +#include "std.h" +#include "lib/vision/image.h" + #define V4L2_IMG_NONE 255 //< There currently no image available /* V4L2 memory mapped image buffer */ struct v4l2_img_buf { - uint8_t idx; //< The index of the buffer size_t length; //< The size of the buffer struct timeval timestamp; //< The time value of the image void *buf; //< Pointer to the memory mapped buffer @@ -59,9 +60,9 @@ struct v4l2_device { /* External functions */ bool_t v4l2_init_subdev(char *subdev_name, uint8_t pad, uint8_t which, uint16_t code, uint16_t width, uint16_t height); struct v4l2_device *v4l2_init(char *device_name, uint16_t width, uint16_t height, uint8_t buffers_cnt); -struct v4l2_img_buf *v4l2_image_get(struct v4l2_device *dev); -struct v4l2_img_buf *v4l2_image_get_nonblock(struct v4l2_device *dev); -void v4l2_image_free(struct v4l2_device *dev, struct v4l2_img_buf *img_buf); +void v4l2_image_get(struct v4l2_device *dev, struct image_t *img); +bool_t v4l2_image_get_nonblock(struct v4l2_device *dev, struct image_t *img); +void v4l2_image_free(struct v4l2_device *dev, struct image_t *img); bool_t v4l2_start_capture(struct v4l2_device *dev); bool_t v4l2_stop_capture(struct v4l2_device *dev); void v4l2_close(struct v4l2_device *dev); diff --git a/sw/airborne/modules/computer_vision/cv/opticflow/fast_rosten.c b/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c similarity index 100% rename from sw/airborne/modules/computer_vision/cv/opticflow/fast_rosten.c rename to sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c diff --git a/sw/airborne/modules/computer_vision/cv/opticflow/fast_rosten.h b/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.h similarity index 100% rename from sw/airborne/modules/computer_vision/cv/opticflow/fast_rosten.h rename to sw/airborne/modules/computer_vision/lib/vision/fast_rosten.h diff --git a/sw/airborne/modules/computer_vision/lib/vision/image.c b/sw/airborne/modules/computer_vision/lib/vision/image.c new file mode 100644 index 00000000000..086c3d195c4 --- /dev/null +++ b/sw/airborne/modules/computer_vision/lib/vision/image.c @@ -0,0 +1,188 @@ +/* + * Copyright (C) 2015 Freek van Tienen + * + * This file is part of Paparazzi. + * + * Paparazzi is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * Paparazzi is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Paparazzi; see the file COPYING. If not, write to + * the Free Software Foundation, 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + +/** + * @file modules/computer_vision/lib/vision/image.c + * Image helper functions, like resizing, color filter, converters... + */ + +#include "image.h" +#include +#include + +/** + * Create a new image + * @param[out] *img The output image + * @param[in] width The width of the image + * @param[in] height The height of the image + * @param[in] type The type of image (YUV422 or grayscale) + */ +void image_create(struct image_t *img, uint16_t width, uint16_t height, enum image_type type) +{ + // Set the variables + img->type = type; + img->w = width; + img->h = height; + + // Depending on the type the size differs + if(type == IMAGE_YUV422) + img->buf = malloc(sizeof(uint8_t)*2 * width * height); + else if(type == IMAGE_JPEG) + img->buf = malloc(sizeof(uint8_t)*1.1 * width * height); // At maximum quality this is enough + else + img->buf = malloc(sizeof(uint8_t) * width * height); +} + +/** + * Free the image + * @param[in] *img The image to free + */ +void image_free(struct image_t *img) +{ + free(img->buf); +} + +/** + * Convert an image to grayscale. + * Depending on the output type the U/V bytes are removed + * @param[in] *input The input image (Needs to be YUV422) + * @param[out] *output The output image + */ +void image_to_grayscale(struct image_t *input, struct image_t *output) +{ + uint8_t *source = input->buf; + uint8_t *dest = output->buf; + source++; + + // Copy the creation timestamp (stays the same) + memcpy(&output->ts, &input->ts, sizeof(struct timeval)); + + // Copy the pixels + for (int y = 0; y < output->h; y++) { + for (int x = 0; x < output->w; x++) { + if(output->type == IMAGE_YUV422) + *dest++ = 127; // U / V + *dest++ = *source; // Y + source += 2; + } + } +} + +/** + * Filter colors in an YUV422 image + * @param[in] *input The input image to filter + * @param[out] *output The filtered output image + * @param[in] y_m The Y minimum value + * @param[in] y_M The Y maximum value + * @param[in] u_m The U minimum value + * @param[in] u_M The U maximum value + * @param[in] v_m The V minimum value + * @param[in] v_M The V maximum value + * @return The amount of filtered pixels + */ +uint16_t image_yuv422_colorfilt(struct image_t *input, struct image_t *output, uint8_t y_m, uint8_t y_M, uint8_t u_m, + uint8_t u_M, uint8_t v_m, uint8_t v_M) +{ + uint16_t cnt = 0; + uint8_t *source = input->buf; + uint8_t *dest = output->buf; + + // Copy the creation timestamp (stays the same) + memcpy(&output->ts, &input->ts, sizeof(struct timeval)); + + // Go trough all the pixels + for (uint16_t y = 0; y < output->h; y++) { + for (uint16_t x = 0; x < output->w; x += 2) { + // Check if the color is inside the specified values + if ( + (dest[1] >= y_m) + && (dest[1] <= y_M) + && (dest[0] >= u_m) + && (dest[0] <= u_M) + && (dest[2] >= v_m) + && (dest[2] <= v_M) + ) { + cnt ++; + // UYVY + dest[0] = 64; // U + dest[1] = source[1]; // Y + dest[2] = 255; // V + dest[3] = source[3]; // Y + } else { + // UYVY + char u = source[0] - 127; + u /= 4; + dest[0] = 127; // U + dest[1] = source[1]; // Y + u = source[2] - 127; + u /= 4; + dest[2] = 127; // V + dest[3] = source[3]; // Y + } + + // Go to the next 2 pixels + dest += 4; + source += 4; + } + } + return cnt; +} + +/** +* Simplified high-speed low CPU downsample function without averaging +* downsample factor must be 1, 2, 4, 8 ... 2^X +* image of typ UYVY expected. Only one color UV per 2 pixels +* +* we keep the UV color of the first pixel pair +* and sample the intensity evenly 1-3-5-7-... or 1-5-9-... +* +* input: u1y1 v1y2 u3y3 v3y4 u5y5 v5y6 u7y7 v7y8 ... +* downsample=1 u1y1 v1y2 u3y3 v3y4 u5y5 v5y6 u7y7 v7y8 ... +* downsample=2 u1y1v1 (skip2) y3 (skip2) u5y5v5 (skip2 y7 (skip2) ... +* downsample=4 u1y1v1 (skip6) y5 (skip6) ... +* @param[in] *input The input YUV422 image +* @param[out] *output The downscaled YUV422 image +* @param[in] downsample The downsampel facter (must be downsample=2^X) +*/ +void image_yuv422_downsample(struct image_t *input, struct image_t *output, uint16_t downsample) +{ + uint8_t *source = input->buf; + uint8_t *dest = output->buf; + uint16_t pixelskip = (downsample - 1) * 2; + + // Copy the creation timestamp (stays the same) + memcpy(&output->ts, &input->ts, sizeof(struct timeval)); + + // Go trough all the pixels + for (uint16_t y = 0; y < output->h; y++) { + for (uint16_t x = 0; x < output->w; x += 2) { + // YUYV + *dest++ = *source++; // U + *dest++ = *source++; // Y + *dest++ = *source++; // V + source += pixelskip; + *dest++ = *source++; // Y + source += pixelskip; + } + // read 1 in every 'downsample' rows, so skip (downsample-1) rows after reading the first + source += (downsample-1) * input->w * 2; + } +} diff --git a/sw/airborne/modules/computer_vision/lib/vision/image.h b/sw/airborne/modules/computer_vision/lib/vision/image.h new file mode 100644 index 00000000000..8dd8054fc4d --- /dev/null +++ b/sw/airborne/modules/computer_vision/lib/vision/image.h @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2015 Freek van Tienen + * + * This file is part of Paparazzi. + * + * Paparazzi is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * Paparazzi is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Paparazzi; see the file COPYING. If not, write to + * the Free Software Foundation, 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + +/** + * @file modules/computer_vision/lib/vision/image.h + * Image helper functions like resizing, color filter, converters... + */ + +#ifndef _CV_LIB_VISION_IMAGE_H +#define _CV_LIB_VISION_IMAGE_H + +#include "std.h" +#include + +/* The different type of images we currently support */ +enum image_type { + IMAGE_YUV422, //< UYVY format + IMAGE_GRAYSCALE, //< Grayscale image with only the Y part + IMAGE_JPEG //< An JPEG encoded image +}; + +/* Main image structure */ +struct image_t { + enum image_type type; //< The image type + uint16_t w; //< Image width + uint16_t h; //< Image height + struct timeval ts; //< The timestamp of creation + + uint8_t buf_idx; //< Buffer index for V4L2 freeing + uint32_t buf_size; //< The buffer size + void *buf; //< Image buffer (depending on the image_type) +}; + +/* Usefull image functions */ +void image_create(struct image_t *img, uint16_t width, uint16_t height, enum image_type type); +void image_free(struct image_t *img); +void image_to_grayscale(struct image_t *input, struct image_t *output); +uint16_t image_yuv422_colorfilt(struct image_t *input, struct image_t *output, uint8_t y_m, uint8_t y_M, uint8_t u_m, + uint8_t u_M, uint8_t v_m, uint8_t v_M); +void image_yuv422_downsample(struct image_t *input, struct image_t *output, uint16_t downsample); + +#endif diff --git a/sw/airborne/modules/computer_vision/cv/opticflow/lucas_kanade.c b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c similarity index 98% rename from sw/airborne/modules/computer_vision/cv/opticflow/lucas_kanade.c rename to sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c index f27f614cb0f..af7ed226d20 100644 --- a/sw/airborne/modules/computer_vision/cv/opticflow/lucas_kanade.c +++ b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c @@ -243,8 +243,8 @@ int calculateError(int *ImC, int width, int height) return error; } -int opticFlowLK(unsigned char *new_image_buf, unsigned char *old_image_buf, int *p_x, int *p_y, int n_found_points, - int imW, int imH, int *new_x, int *new_y, int *status, int half_window_size, int max_iterations) +int opticFlowLK(unsigned char *new_image_buf, unsigned char *old_image_buf, uint16_t *p_x, uint16_t *p_y, uint16_t n_found_points, + uint16_t imW, uint16_t imH, uint16_t *new_x, uint16_t *new_y, bool_t *status, uint16_t half_window_size, uint8_t max_iterations) { // A straightforward one-level implementation of Lucas-Kanade. // For all points: diff --git a/sw/airborne/modules/computer_vision/cv/opticflow/lucas_kanade.h b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h similarity index 88% rename from sw/airborne/modules/computer_vision/cv/opticflow/lucas_kanade.h rename to sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h index 192b5a1bbc6..9d1be401bd4 100644 --- a/sw/airborne/modules/computer_vision/cv/opticflow/lucas_kanade.h +++ b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h @@ -27,6 +27,8 @@ #ifndef OPTIC_FLOW_INT_H #define OPTIC_FLOW_INT_H +#include "std.h" + void multiplyImages(int *ImA, int *ImB, int *ImC, int width, int height); void getImageDifference(int *ImA, int *ImB, int *ImC, int width, int height); void getSubPixel_gray(int *Patch, unsigned char *frame_buf, int center_x, int center_y, int half_window_size, @@ -35,8 +37,8 @@ void getGradientPatch(int *Patch, int *DX, int *DY, int half_window_size); int getSumPatch(int *Patch, int size); int calculateG(int *G, int *DX, int *DY, int half_window_size); int calculateError(int *ImC, int width, int height); -int opticFlowLK(unsigned char *new_image_buf, unsigned char *old_image_buf, int *p_x, int *p_y, int n_found_points, - int imW, int imH, int *new_x, int *new_y, int *status, int half_window_size, int max_iterations); +int opticFlowLK(unsigned char *new_image_buf, unsigned char *old_image_buf, uint16_t *p_x, uint16_t *p_y, uint16_t n_found_points, + uint16_t imW, uint16_t imH, uint16_t *new_x, uint16_t *new_y, bool_t *status, uint16_t half_window_size, uint8_t max_iterations); void quick_sort(float *a, int n); void quick_sort_int(int *a, int n); void CvtYUYV2Gray(unsigned char *grayframe, unsigned char *frame, int imW, int imH); diff --git a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c index ce8086abfce..43d015c0faf 100644 --- a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c +++ b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c @@ -36,8 +36,9 @@ #include "opticflow_calculator.h" // Computer Vision -#include "cv/opticflow/lucas_kanade.h" -#include "cv/opticflow/fast_rosten.h" +#include "lib/vision/image.h" +#include "lib/vision/lucas_kanade.h" +#include "lib/vision/fast_rosten.h" // ARDrone Vertical Camera Parameters #define FOV_H 0.67020643276 @@ -56,6 +57,9 @@ static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishti /** * Initialize the opticflow calculator + * @param[out] *opticflow The new optical flow calculator + * @param[in] *w The image width + * @param[in] *h The image height */ void opticflow_calc_init(struct opticflow_t *opticflow, unsigned int w, unsigned int h) { @@ -75,21 +79,24 @@ void opticflow_calc_init(struct opticflow_t *opticflow, unsigned int w, unsigned /** * Run the optical flow on a new image frame + * @param[in] *opticflow The opticalflow structure that keeps track of previous images + * @param[in] *state The state of the drone + * @param[in] *img The image frame to calculate the optical flow from + * @param[out] *result The optical flow result */ -void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_t *state, struct v4l2_img_buf *img, struct opticflow_result_t *result) +void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_t *state, struct image_t *img, struct opticflow_result_t *result) { // Corner Tracking // Working Variables - int max_count = 25; - int borderx = 24, bordery = 24; - int x[MAX_COUNT], y[MAX_COUNT]; - int new_x[MAX_COUNT], new_y[MAX_COUNT]; - int status[MAX_COUNT]; + uint16_t borderx = 24, bordery = 24; + uint16_t x[MAX_COUNT], y[MAX_COUNT]; + uint16_t new_x[MAX_COUNT], new_y[MAX_COUNT]; + bool_t status[MAX_COUNT]; int dx[MAX_COUNT], dy[MAX_COUNT]; // Update FPS for information - result->fps = 1 / (timeval_diff(&opticflow->prev_timestamp, &img->timestamp) / 1000.); - memcpy(&opticflow->prev_timestamp, &img->timestamp, sizeof(struct timeval)); + result->fps = 1 / (timeval_diff(&opticflow->prev_timestamp, &img->ts) / 1000.); + memcpy(&opticflow->prev_timestamp, &img->ts, sizeof(struct timeval)); if (!opticflow->got_first_img) { CvtYUYV2Gray(opticflow->prev_gray_frame, img->buf, opticflow->img_w, opticflow->img_h); @@ -101,51 +108,46 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ // ************************************************************************************* // FAST corner detection - int fast_threshold = 20; - xyFAST *pnts_fast; - pnts_fast = fast9_detect((const byte *)opticflow->prev_gray_frame, opticflow->img_w, opticflow->img_h, opticflow->img_w, + int fast_threshold = 5; + xyFAST *pnts_fast = fast9_detect((const byte *)opticflow->prev_gray_frame, opticflow->img_w, opticflow->img_h, opticflow->img_w, fast_threshold, &result->count); if (result->count > MAX_COUNT) { result->count = MAX_COUNT; } - for (int i = 0; i < result->count; i++) { - x[i] = pnts_fast[i].x; - y[i] = pnts_fast[i].y; - } - free(pnts_fast); - // Remove neighboring corners - const float min_distance = 3; - float min_distance2 = min_distance * min_distance; - int labelmin[MAX_COUNT]; - for (int i = 0; i < result->count; i++) { - for (int j = i + 1; j < result->count; j++) { - // distance squared: - float distance2 = (x[i] - x[j]) * (x[i] - x[j]) + (y[i] - y[j]) * (y[i] - y[j]); - if (distance2 < min_distance2) { - labelmin[i] = 1; - } - } - } + // Copy the points and remove neighboring corners + const float min_distance2 = 10 * 10; + bool_t remove_points[MAX_COUNT]; + uint16_t count_fil = 0; + memset(&remove_points, FALSE, sizeof(bool_t) * MAX_COUNT); - int count_fil = result->count; - for (int i = result->count - 1; i >= 0; i--) { - int remove_point = 0; + for (uint16_t i = 0; i < result->count; i++) { + if(remove_points[i]) + continue; - if (labelmin[i]) { - remove_point = 1; - } + x[count_fil] = pnts_fast[i].x; + y[count_fil++] = pnts_fast[i].y; - if (remove_point) { - for (int c = i; c < count_fil - 1; c++) { - x[c] = x[c + 1]; - y[c] = y[c + 1]; - } - count_fil--; + // Skip some if they are too close + for(uint16_t j = i+1; j < result->count; j++) { + float distance2 = (x[i] - x[j]) * (x[i] - x[j]) + (y[i] - y[j]) * (y[i] - y[j]); + if(distance2 < min_distance2) + remove_points[j] = TRUE; } } - - if (count_fil > max_count) { count_fil = max_count; } + free(pnts_fast); + if(count_fil > 25) count_fil = 25; result->count = count_fil; + + uint8_t *im = (uint8_t *)img->buf; + for(int i = 0; i < result->count; i++) { + uint16_t idx = 2*y[i]*opticflow->img_w + 2*x[i]; + im[idx] = 255; + idx = idx+1 % (opticflow->img_w*opticflow->img_h*2); + im[idx] = 255; + idx = idx+1 % (opticflow->img_w*opticflow->img_h*2); + im[idx] = 255; + } + // ************************************************************************************* // Corner Tracking // ************************************************************************************* @@ -154,16 +156,10 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ opticFlowLK(opticflow->gray_frame, opticflow->prev_gray_frame, x, y, count_fil, opticflow->img_w, opticflow->img_h, new_x, new_y, status, 5, 100); - result->flow_count = count_fil; - for (int i = count_fil - 1; i >= 0; i--) { - int remove_point = 1; - - if (status[i] && !(new_x[i] < borderx || new_x[i] > (opticflow->img_w - 1 - borderx) || - new_y[i] < bordery || new_y[i] > (opticflow->img_h - 1 - bordery))) { - remove_point = 0; - } - - if (remove_point) { + // Remove points if we lost tracking + /* for (int i = count_fil - 1; i >= 0; i--) { + if (!status[i] || new_x[i] < borderx || new_x[i] > (opticflow->img_w - 1 - borderx) || + new_y[i] < bordery || new_y[i] > (opticflow->img_h - 1 - bordery)) { for (int c = i; c < result->flow_count - 1; c++) { x[c] = x[c + 1]; y[c] = y[c + 1]; @@ -172,7 +168,7 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ } result->flow_count--; } - } + }*/ result->dx_sum = 0.0; result->dy_sum = 0.0; @@ -196,7 +192,7 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ } // Flow Derotation - result->diff_pitch = (state->theta - opticflow->prev_pitch) * opticflow->img_h / FOV_H; + /*result->diff_pitch = (state->theta - opticflow->prev_pitch) * opticflow->img_h / FOV_H; result->diff_roll = (state->phi - opticflow->prev_roll) * opticflow->img_w / FOV_W; opticflow->prev_pitch = state->theta; opticflow->prev_roll = state->phi; @@ -242,12 +238,15 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ // ************************************************************************************* // Next Loop Preparation // ************************************************************************************* - + */ memcpy(opticflow->prev_gray_frame, opticflow->gray_frame, opticflow->img_w * opticflow->img_h); } /** - * calculate the difference from start till finish + * Calculate the difference from start till finish + * @param[in] *starttime The start time to calculate the difference from + * @param[in] *finishtime The finish time to calculate the difference from + * @return The difference in milliseconds */ static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime) { diff --git a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h index 83d9ed4b04a..a8d9538a10a 100644 --- a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h +++ b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h @@ -52,6 +52,6 @@ struct opticflow_t void opticflow_calc_init(struct opticflow_t *opticflow, unsigned int w, unsigned int h); -void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_t *state, struct v4l2_img_buf *img, struct opticflow_result_t *result); +void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_t *state, struct image_t *img, struct opticflow_result_t *result); #endif /* OPTICFLOW_CALCULATOR_H */ diff --git a/sw/airborne/modules/computer_vision/opticflow_module.c b/sw/airborne/modules/computer_vision/opticflow_module.c index dcba3bb6b46..bd853425261 100644 --- a/sw/airborne/modules/computer_vision/opticflow_module.c +++ b/sw/airborne/modules/computer_vision/opticflow_module.c @@ -32,7 +32,9 @@ #include #include "state.h" #include "subsystems/abi.h" + #include "lib/v4l/v4l2.h" +#include "lib/encoding/jpeg.h" /* default sonar/agl to use in opticflow visual_estimator */ #ifndef OPTICFLOW_AGL_ID @@ -139,10 +141,17 @@ static void *opticflow_module_calc(void *data __attribute__((unused))) { return 0; } +#ifdef OPTICFLOW_DEBUG + // Create a new JPEG image + struct image_t img_jpeg; + image_create(&img_jpeg, opticflow_dev->w, opticflow_dev->h, IMAGE_JPEG); +#endif + /* Main loop of the optical flow calculation */ while(TRUE) { // Try to fetch an image - struct v4l2_img_buf *img = v4l2_image_get(opticflow_dev); + struct image_t img; + v4l2_image_get(opticflow_dev, &img); // Copy the state pthread_mutex_lock(&opticflow_mutex); @@ -152,7 +161,7 @@ static void *opticflow_module_calc(void *data __attribute__((unused))) { // Do the optical flow calculation struct opticflow_result_t temp_result; - opticflow_calc_frame(&opticflow, &temp_state, img, &temp_result); + opticflow_calc_frame(&opticflow, &temp_state, &img, &temp_result); // Copy the result if finished pthread_mutex_lock(&opticflow_mutex); @@ -160,9 +169,41 @@ static void *opticflow_module_calc(void *data __attribute__((unused))) { opticflow_got_result = TRUE; pthread_mutex_unlock(&opticflow_mutex); +#ifdef OPTICFLOW_DEBUG + jpeg_encode_image(&img, &img_jpeg, 99, TRUE); + + // Open process to send using netcat (in a fork because sometimes kills itself???) + pid_t pid = fork(); + + if(pid < 0) { + printf("[viewvideo] Could not create netcat fork.\n"); + } + else if(pid ==0) { + // We are the child and want to send the image + FILE *netcat = popen("nc 192.168.1.2 5000 2>/dev/null", "w"); + if (netcat != NULL) { + fwrite(img_jpeg.buf, sizeof(uint8_t), img_jpeg.buf_size, netcat); + pclose(netcat); // Ignore output, because it is too much when not connected + } else { + printf("[viewvideo] Failed to open netcat process.\n"); + } + + // Exit the program since we don't want to continue after transmitting + exit(0); + } + else { + // We want to wait until the child is finished + wait(NULL); + } +#endif + // Free the image - v4l2_image_free(opticflow_dev, img); + v4l2_image_free(opticflow_dev, &img); } + +#ifdef OPTICFLOW_DEBUG + image_free(&img_jpeg); +#endif } /** diff --git a/sw/airborne/modules/computer_vision/viewvideo.c b/sw/airborne/modules/computer_vision/viewvideo.c index 85ebd24e3e7..b79f0987119 100644 --- a/sw/airborne/modules/computer_vision/viewvideo.c +++ b/sw/airborne/modules/computer_vision/viewvideo.c @@ -42,9 +42,9 @@ // Video #include "lib/v4l/v4l2.h" -#include "cv/resize.h" -#include "cv/encoding/jpeg.h" -#include "cv/encoding/rtp.h" +#include "lib/vision/image.h" +#include "lib/encoding/jpeg.h" +#include "lib/encoding/rtp.h" // Threaded computer vision #include @@ -136,19 +136,17 @@ static void *viewvideo_thread(void *data __attribute__((unused))) } // Resize image if needed - struct img_struct small; - small.w = viewvideo.dev->w / viewvideo.downsize_factor; - small.h = viewvideo.dev->h / viewvideo.downsize_factor; - if (viewvideo.downsize_factor != 1) { - small.buf = (uint8_t *)malloc(small.w * small.h * 2); - } else { - small.buf = NULL; - } + struct image_t img_small; + image_create(&img_small, + viewvideo.dev->w/viewvideo.downsize_factor, + viewvideo.dev->h/viewvideo.downsize_factor, + IMAGE_YUV422); - // JPEG compression (8.25 bits are required for a 100% quality image, margin of ~0.55) - uint8_t *jpegbuf = (uint8_t *)malloc(ceil(small.w * small.h * 1.1)); + // Create the JPEG encoded image + struct image_t img_jpeg; + image_create(&img_jpeg, img_small.w, img_small.h, IMAGE_JPEG); - // time + // Initialize timing uint32_t microsleep = (uint32_t)(1000000. / (float)viewvideo.fps); struct timeval last_time; gettimeofday(&last_time, NULL); @@ -169,14 +167,15 @@ static void *viewvideo_thread(void *data __attribute__((unused))) last_time = vision_thread_sleep_time; // Wait for a new frame (blocking) - struct v4l2_img_buf *img = v4l2_image_get(viewvideo.dev); + struct image_t img; + v4l2_image_get(viewvideo.dev, &img); // Check if we need to take a shot if (viewvideo.take_shot) { // Create a high quality image (99% JPEG encoded) - uint8_t *jpegbuf_hr = (uint8_t *)malloc(ceil(viewvideo.dev->w * viewvideo.dev->h * 1.1)); - uint8_t *end = jpeg_encode_image(img->buf, jpegbuf_hr, 99, FOUR_TWO_TWO, viewvideo.dev->w, viewvideo.dev->h, TRUE); - uint32_t size = end - (jpegbuf_hr); + struct image_t jpeg_hr; + image_create(&jpeg_hr, img.w, img.h, IMAGE_JPEG); + jpeg_encode_image(&img, &jpeg_hr, 99, TRUE); // Search for a file where we can write to char save_name[128]; @@ -189,7 +188,7 @@ static void *viewvideo_thread(void *data __attribute__((unused))) printf("[viewvideo-thread] Could not write shot %s.\n", save_name); } else { // Save it to the file and close it - fwrite(jpegbuf_hr, sizeof(uint8_t), size, fp); + fwrite(jpeg_hr.buf, sizeof(uint8_t), jpeg_hr.buf_size, fp); fclose(fp); } @@ -199,25 +198,18 @@ static void *viewvideo_thread(void *data __attribute__((unused))) } // We finished the shot - free(jpegbuf_hr); + image_free(&jpeg_hr); viewvideo.take_shot = FALSE; } // Only resize when needed if (viewvideo.downsize_factor != 1) { - struct img_struct input; - input.buf = img->buf; - input.w = viewvideo.dev->w; - input.h = viewvideo.dev->h; - resize_uyuv(&input, &small, viewvideo.downsize_factor); + image_yuv422_downsample(&img, &img_small, viewvideo.downsize_factor); + jpeg_encode_image(&img_small, &img_jpeg, VIEWVIDEO_QUALITY_FACTOR, VIEWVIDEO_USE_NETCAT); } else { - small.buf = img->buf; + jpeg_encode_image(&img, &img_jpeg, VIEWVIDEO_QUALITY_FACTOR, VIEWVIDEO_USE_NETCAT); } - // JPEG encode the image: - uint8_t *end = jpeg_encode_image(small.buf, jpegbuf, VIEWVIDEO_QUALITY_FACTOR, FOUR_TWO_TWO, small.w, small.h, VIEWVIDEO_USE_NETCAT); - uint32_t size = end - (jpegbuf); - #if VIEWVIDEO_USE_NETCAT // Open process to send using netcat (in a fork because sometimes kills itself???) pid_t pid = fork(); @@ -246,8 +238,7 @@ static void *viewvideo_thread(void *data __attribute__((unused))) // Send image with RTP rtp_frame_send( &VIEWVIDEO_DEV, // UDP device - jpegbuf, size, // JPEG - small.w, small.h, // Img Size + &img_jpeg, 0, // Format 422 VIEWVIDEO_QUALITY_FACTOR, // Jpeg-Quality 0, // DRI Header @@ -264,13 +255,12 @@ static void *viewvideo_thread(void *data __attribute__((unused))) #endif // Free the image - v4l2_image_free(viewvideo.dev, img); + v4l2_image_free(viewvideo.dev, &img); } // Free all buffers - free(jpegbuf); - if (viewvideo.downsize_factor != 1) - free(small.buf); + image_free(&img_jpeg); + image_free(&img_small); return 0; } From b5a2500bc9503f14651c702ac86ef374378b030d Mon Sep 17 00:00:00 2001 From: Freek van Tienen Date: Thu, 12 Mar 2015 12:17:00 +0100 Subject: [PATCH 03/17] [vision] More cleanup but still needs testing --- .../modules/computer_vision/lib/v4l/v4l2.c | 2 + .../computer_vision/lib/vision/fast_rosten.c | 3801 +---------------- .../computer_vision/lib/vision/fast_rosten.h | 15 +- .../computer_vision/lib/vision/image.c | 26 +- .../computer_vision/lib/vision/image.h | 7 + .../computer_vision/lib/vision/lucas_kanade.c | 116 - .../computer_vision/lib/vision/lucas_kanade.h | 3 - .../opticflow/inter_thread_data.h | 5 +- .../opticflow/opticflow_calculator.c | 36 +- .../opticflow/opticflow_calculator.h | 8 +- .../opticflow/stabilization_opticflow.c | 2 +- 11 files changed, 120 insertions(+), 3901 deletions(-) diff --git a/sw/airborne/modules/computer_vision/lib/v4l/v4l2.c b/sw/airborne/modules/computer_vision/lib/v4l/v4l2.c index 077e4dfc61b..12d81959972 100644 --- a/sw/airborne/modules/computer_vision/lib/v4l/v4l2.c +++ b/sw/airborne/modules/computer_vision/lib/v4l/v4l2.c @@ -308,6 +308,7 @@ void v4l2_image_get(struct v4l2_device *dev, struct image_t *img) { img->w = dev->w; img->h = dev->h; img->buf_idx = img_idx; + img->buf_size = dev->buffers[img_idx].length; img->buf = dev->buffers[img_idx].buf; memcpy(&img->ts, &dev->buffers[img_idx].timestamp, sizeof(struct timeval)); } @@ -340,6 +341,7 @@ bool_t v4l2_image_get_nonblock(struct v4l2_device *dev, struct image_t *img) { img->w = dev->w; img->h = dev->h; img->buf_idx = img_idx; + img->buf_size = dev->buffers[img_idx].length; img->buf = dev->buffers[img_idx].buf; memcpy(&img->ts, &dev->buffers[img_idx].timestamp, sizeof(struct timeval)); return TRUE; diff --git a/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c b/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c index d5f03a94acf..0ff567e62bc 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c +++ b/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c @@ -34,3748 +34,41 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include "fast_rosten.h" -#define Compare(X, Y) ((X)>=(Y)) +static void fast_make_offsets(int32_t *pixel, uint16_t row_stride); -xyFAST *fast9_detect_nonmax(const byte *im, int xsize, int ysize, int stride, int b, int *ret_num_corners) +/** + * Do a FAST9 corner detection + * @param[in] *img The image to do the corner detection on + * @param[in] threshold The threshold which we use for FAST9 + * @param[out] *num_corner The amount of corners found + * @return The corners found + */ +struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint32_t *num_corners) { - xyFAST *corners; - int num_corners; - int *scores; - xyFAST *nonmax; - - corners = fast9_detect(im, xsize, ysize, stride, b, &num_corners); - scores = fast9_score(im, stride, corners, num_corners, b); - nonmax = nonmax_suppression(corners, scores, num_corners, ret_num_corners); - - free(corners); - free(scores); - - return nonmax; -} - -xyFAST *nonmax_suppression(const xyFAST *corners, const int *scores, int num_corners, int *ret_num_nonmax) -{ - int num_nonmax = 0; - int last_row; - int *row_start; - int i, j; - xyFAST *ret_nonmax; - const int sz = (int)num_corners; - - /*Point above points (roughly) to the pixel above the one of interest, if there - is a feature there.*/ - int point_above = 0; - int point_below = 0; - - - if (num_corners < 1) { - *ret_num_nonmax = 0; - return 0; - } - - ret_nonmax = (xyFAST *)malloc(num_corners * sizeof(xyFAST)); - - /* Find where each row begins - (the corners are output in raster scan order). A beginning of -1 signifies - that there are no corners on that row. */ - last_row = corners[num_corners - 1].y; - row_start = (int *)malloc((last_row + 1) * sizeof(int)); - - for (i = 0; i < last_row + 1; i++) { - row_start[i] = -1; - } - - { - int prev_row = -1; - for (i = 0; i < num_corners; i++) - if (corners[i].y != prev_row) { - row_start[corners[i].y] = i; - prev_row = corners[i].y; - } - } - - - - for (i = 0; i < sz; i++) { - int score = scores[i]; - xyFAST pos = corners[i]; - - /*Check left */ - if (i > 0) - if (corners[i - 1].x == pos.x - 1 && corners[i - 1].y == pos.y && Compare(scores[i - 1], score)) { - continue; - } - - /*Check right*/ - if (i < (sz - 1)) - if (corners[i + 1].x == pos.x + 1 && corners[i + 1].y == pos.y && Compare(scores[i + 1], score)) { - continue; - } - - /*Check above (if there is a valid row above)*/ - if (pos.y != 0 && row_start[pos.y - 1] != -1) { - /*Make sure that current point_above is one - row above.*/ - if (corners[point_above].y < pos.y - 1) { - point_above = row_start[pos.y - 1]; - } - - /*Make point_above point to the first of the pixels above the current point, - if it exists.*/ - for (; corners[point_above].y < pos.y && corners[point_above].x < pos.x - 1; point_above++) - {} - - - for (j = point_above; corners[j].y < pos.y && corners[j].x <= pos.x + 1; j++) { - int x = corners[j].x; - if ((x == pos.x - 1 || x == pos.x || x == pos.x + 1) && Compare(scores[j], score)) { - goto cont; - } - } - - } - - /*Check below (if there is anything below)*/ - if (pos.y != last_row && row_start[pos.y + 1] != -1 && point_below < sz) { /*Nothing below*/ - if (corners[point_below].y < pos.y + 1) { - point_below = row_start[pos.y + 1]; - } - - /* Make point below point to one of the pixels belowthe current point, if it - exists.*/ - for (; point_below < sz && corners[point_below].y == pos.y + 1 && corners[point_below].x < pos.x - 1; point_below++) - {} - - for (j = point_below; j < sz && corners[j].y == pos.y + 1 && corners[j].x <= pos.x + 1; j++) { - int x = corners[j].x; - if ((x == pos.x - 1 || x == pos.x || x == pos.x + 1) && Compare(scores[j], score)) { - goto cont; - } - } - } - - ret_nonmax[num_nonmax++] = corners[i]; -cont: - ; - } - - free(row_start); - *ret_num_nonmax = num_nonmax; - return ret_nonmax; -} - -int fast9_corner_score(const byte *p, const int pixel[], int bstart) -{ - int bmin = bstart; - int bmax = 255; - int b = (bmax + bmin) / 2; - - /*Compute the score using binary search*/ - for (;;) { - int cb = *p + b; - int c_b = *p - b; - - - if (p[pixel[0]] > cb) - if (p[pixel[1]] > cb) - if (p[pixel[2]] > cb) - if (p[pixel[3]] > cb) - if (p[pixel[4]] > cb) - if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) { - goto is_a_corner; - } else if (p[pixel[15]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else if (p[pixel[7]] < c_b) - if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else if (p[pixel[14]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[6]] < c_b) - if (p[pixel[15]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else if (p[pixel[13]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[14]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[13]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[5]] < c_b) - if (p[pixel[14]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[12]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[13]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[14]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[6]] < c_b) { - goto is_a_corner; - } else if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[12]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) - if (p[pixel[6]] < c_b) { - goto is_a_corner; - } else if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[4]] < c_b) - if (p[pixel[13]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[11]] < c_b) - if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) - if (p[pixel[12]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[13]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[5]] < c_b) { - goto is_a_corner; - } else if (p[pixel[14]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[11]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[5]] < c_b) { - goto is_a_corner; - } else if (p[pixel[14]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[3]] < c_b) - if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[4]] > cb) - if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[5]] < c_b) - if (p[pixel[4]] < c_b) { - goto is_a_corner; - } else if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[4]] > cb) - if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[5]] < c_b) - if (p[pixel[4]] < c_b) { - goto is_a_corner; - } else if (p[pixel[13]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[2]] < c_b) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[4]] > cb) - if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[3]] > cb) - if (p[pixel[4]] > cb) - if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[9]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[10]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[5]] < c_b) - if (p[pixel[4]] < c_b) - if (p[pixel[3]] < c_b) { - goto is_a_corner; - } else if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[4]] > cb) - if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[3]] > cb) - if (p[pixel[4]] > cb) - if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[9]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[5]] < c_b) - if (p[pixel[4]] < c_b) - if (p[pixel[3]] < c_b) { - goto is_a_corner; - } else if (p[pixel[12]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[1]] < c_b) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[4]] > cb) - if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[3]] > cb) - if (p[pixel[4]] > cb) - if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[2]] > cb) - if (p[pixel[3]] > cb) - if (p[pixel[4]] > cb) - if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[8]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[5]] < c_b) - if (p[pixel[4]] < c_b) - if (p[pixel[3]] < c_b) - if (p[pixel[2]] < c_b) { - goto is_a_corner; - } else if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[4]] > cb) - if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[3]] > cb) - if (p[pixel[4]] > cb) - if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[2]] > cb) - if (p[pixel[3]] > cb) - if (p[pixel[4]] > cb) - if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[8]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[5]] < c_b) - if (p[pixel[4]] < c_b) - if (p[pixel[3]] < c_b) - if (p[pixel[2]] < c_b) { - goto is_a_corner; - } else if (p[pixel[11]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[0]] < c_b) - if (p[pixel[1]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[5]] > cb) - if (p[pixel[4]] > cb) - if (p[pixel[3]] > cb) - if (p[pixel[2]] > cb) { - goto is_a_corner; - } else if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[4]] < c_b) - if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[3]] < c_b) - if (p[pixel[4]] < c_b) - if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[2]] < c_b) - if (p[pixel[3]] < c_b) - if (p[pixel[4]] < c_b) - if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[1]] < c_b) - if (p[pixel[2]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[5]] > cb) - if (p[pixel[4]] > cb) - if (p[pixel[3]] > cb) { - goto is_a_corner; - } else if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[4]] < c_b) - if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[3]] < c_b) - if (p[pixel[4]] < c_b) - if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[2]] < c_b) - if (p[pixel[3]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[5]] > cb) - if (p[pixel[4]] > cb) { - goto is_a_corner; - } else if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[4]] < c_b) - if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[3]] < c_b) - if (p[pixel[4]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[5]] > cb) { - goto is_a_corner; - } else if (p[pixel[14]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[13]] < c_b) - if (p[pixel[11]] > cb) - if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[12]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[5]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[4]] < c_b) - if (p[pixel[5]] > cb) - if (p[pixel[14]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[6]] > cb) { - goto is_a_corner; - } else if (p[pixel[15]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[14]] < c_b) - if (p[pixel[12]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[13]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[6]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[5]] < c_b) - if (p[pixel[6]] > cb) - if (p[pixel[15]] < c_b) - if (p[pixel[13]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[14]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[6]] < c_b) - if (p[pixel[7]] > cb) - if (p[pixel[14]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) { - goto is_a_corner; - } else if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[13]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[12]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) - if (p[pixel[6]] > cb) { - goto is_a_corner; - } else if (p[pixel[15]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[11]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[5]] > cb) { - goto is_a_corner; - } else if (p[pixel[14]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[5]] > cb) - if (p[pixel[4]] > cb) { - goto is_a_corner; - } else if (p[pixel[13]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[4]] < c_b) - if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[9]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[5]] > cb) - if (p[pixel[4]] > cb) - if (p[pixel[3]] > cb) { - goto is_a_corner; - } else if (p[pixel[12]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[4]] < c_b) - if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[3]] < c_b) - if (p[pixel[4]] < c_b) - if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[8]] > cb) - if (p[pixel[7]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[10]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[5]] > cb) - if (p[pixel[4]] > cb) - if (p[pixel[3]] > cb) - if (p[pixel[2]] > cb) { - goto is_a_corner; - } else if (p[pixel[11]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[4]] < c_b) - if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[3]] < c_b) - if (p[pixel[4]] < c_b) - if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[2]] < c_b) - if (p[pixel[3]] < c_b) - if (p[pixel[4]] < c_b) - if (p[pixel[5]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[7]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[7]] > cb) - if (p[pixel[8]] > cb) - if (p[pixel[9]] > cb) - if (p[pixel[6]] > cb) - if (p[pixel[5]] > cb) - if (p[pixel[4]] > cb) - if (p[pixel[3]] > cb) - if (p[pixel[2]] > cb) - if (p[pixel[1]] > cb) { - goto is_a_corner; - } else if (p[pixel[10]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] > cb) - if (p[pixel[11]] > cb) - if (p[pixel[12]] > cb) - if (p[pixel[13]] > cb) - if (p[pixel[14]] > cb) - if (p[pixel[15]] > cb) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[7]] < c_b) - if (p[pixel[8]] < c_b) - if (p[pixel[9]] < c_b) - if (p[pixel[6]] < c_b) - if (p[pixel[5]] < c_b) - if (p[pixel[4]] < c_b) - if (p[pixel[3]] < c_b) - if (p[pixel[2]] < c_b) - if (p[pixel[1]] < c_b) { - goto is_a_corner; - } else if (p[pixel[10]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else if (p[pixel[10]] < c_b) - if (p[pixel[11]] < c_b) - if (p[pixel[12]] < c_b) - if (p[pixel[13]] < c_b) - if (p[pixel[14]] < c_b) - if (p[pixel[15]] < c_b) { - goto is_a_corner; - } else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - else { - goto is_not_a_corner; - } - -is_a_corner: - bmin = b; - goto end_if; - -is_not_a_corner: - bmax = b; - goto end_if; - -end_if: - - if (bmin == bmax - 1 || bmin == bmax) { - return bmin; - } - b = (bmin + bmax) / 2; - } -} - -static void make_offsets(int pixel[], int row_stride) -{ - pixel[0] = 0 + row_stride * 3; - pixel[1] = 1 + row_stride * 3; - pixel[2] = 2 + row_stride * 2; - pixel[3] = 3 + row_stride * 1; - pixel[4] = 3 + row_stride * 0; - pixel[5] = 3 + row_stride * -1; - pixel[6] = 2 + row_stride * -2; - pixel[7] = 1 + row_stride * -3; - pixel[8] = 0 + row_stride * -3; - pixel[9] = -1 + row_stride * -3; - pixel[10] = -2 + row_stride * -2; - pixel[11] = -3 + row_stride * -1; - pixel[12] = -3 + row_stride * 0; - pixel[13] = -3 + row_stride * 1; - pixel[14] = -2 + row_stride * 2; - pixel[15] = -1 + row_stride * 3; -} - - - -int *fast9_score(const byte *i, int stride, xyFAST *corners, int num_corners, int b) -{ - int *scores = (int *)malloc(sizeof(int) * num_corners); - int n; - - int pixel[16]; - make_offsets(pixel, stride); - - for (n = 0; n < num_corners; n++) { - scores[n] = fast9_corner_score(i + corners[n].y * stride + corners[n].x, pixel, b); - } - - return scores; -} - - -xyFAST *fast9_detect(const byte *im, int xsize, int ysize, int stride, int b, int *ret_num_corners) -{ - int num_corners = 0; - xyFAST *ret_corners; + int corner_cnt = 0; int rsize = 512; int pixel[16]; int x, y; + struct point_t *ret_corners = malloc(sizeof(struct point_t) * rsize); + + // Set the pixel size + uint8_t pixel_size = 1; + if(img->type == IMAGE_YUV422) + pixel_size = 2; - ret_corners = (xyFAST *)malloc(sizeof(xyFAST) * rsize); - make_offsets(pixel, stride); + // Calculate the pixel offsets + fast_make_offsets(pixel, img->w*pixel_size); - for (y = 3; y < ysize - 3; y++) - for (x = 3; x < xsize - 3; x++) { - const byte *p = im + y * stride + x; + // Go trough all the pixels (minus the borders) + for (y = 3; y < img->h - 3; y++) + for (x = 3; x < img->w - 3; x++) { + const uint8_t *p = (uint8_t *)img->buf + y * img->w * pixel_size + x + pixel_size/2; - int cb = *p + b; - int c_b = *p - b; + // Calculate the threshold values + int16_t cb = *p + threshold; + int16_t c_b = *p - threshold; + + // Do the checks if it is a corner if (p[pixel[0]] > cb) if (p[pixel[1]] > cb) if (p[pixel[2]] > cb) @@ -7303,18 +3596,44 @@ xyFAST *fast9_detect(const byte *im, int xsize, int ysize, int stride, int b, in else { continue; } - if (num_corners == rsize) { + + // When we have more corner than allocted space reallocate + if (corner_cnt == rsize) { rsize *= 2; - ret_corners = (xyFAST *)realloc(ret_corners, sizeof(xyFAST) * rsize); + ret_corners = realloc(ret_corners, sizeof(struct point_t) * rsize); } - ret_corners[num_corners].x = x; - ret_corners[num_corners].y = y; - num_corners++; + + ret_corners[corner_cnt].x = x; + ret_corners[corner_cnt].y = y; + corner_cnt++; } - *ret_num_corners = num_corners; + *num_corners = corner_cnt; return ret_corners; - } +/** + * Make offsets for FAST9 calculation + * @param[out] *pixel The offset array of the different pixels + * @param[in] row_stride The row stride in the image + */ +static void fast_make_offsets(int32_t *pixel, uint16_t row_stride) +{ + pixel[0] = 0 + row_stride * 3; + pixel[1] = 1 + row_stride * 3; + pixel[2] = 2 + row_stride * 2; + pixel[3] = 3 + row_stride * 1; + pixel[4] = 3 + row_stride * 0; + pixel[5] = 3 + row_stride * -1; + pixel[6] = 2 + row_stride * -2; + pixel[7] = 1 + row_stride * -3; + pixel[8] = 0 + row_stride * -3; + pixel[9] = -1 + row_stride * -3; + pixel[10] = -2 + row_stride * -2; + pixel[11] = -3 + row_stride * -1; + pixel[12] = -3 + row_stride * 0; + pixel[13] = -3 + row_stride * 1; + pixel[14] = -2 + row_stride * 2; + pixel[15] = -1 + row_stride * 3; +} diff --git a/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.h b/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.h index 683bee37c78..3ec414890ac 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.h +++ b/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.h @@ -34,18 +34,9 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef FAST_H #define FAST_H -typedef struct { int x, y; } xyFAST; -typedef unsigned char byte; - -int fast9_corner_score(const byte *p, const int pixel[], int bstart); - -xyFAST *fast9_detect(const byte *im, int xsize, int ysize, int stride, int b, int *ret_num_corners); - -int *fast9_score(const byte *i, int stride, xyFAST *corners, int num_corners, int b); - -xyFAST *fast9_detect_nonmax(const byte *im, int xsize, int ysize, int stride, int b, int *ret_num_corners); - -xyFAST *nonmax_suppression(const xyFAST *corners, const int *scores, int num_corners, int *ret_num_nonmax); +#include "std.h" +#include "lib/vision/image.h" +struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint32_t *num_corners); #endif diff --git a/sw/airborne/modules/computer_vision/lib/vision/image.c b/sw/airborne/modules/computer_vision/lib/vision/image.c index 086c3d195c4..9c4b0a8e03f 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/image.c +++ b/sw/airborne/modules/computer_vision/lib/vision/image.c @@ -44,11 +44,13 @@ void image_create(struct image_t *img, uint16_t width, uint16_t height, enum ima // Depending on the type the size differs if(type == IMAGE_YUV422) - img->buf = malloc(sizeof(uint8_t)*2 * width * height); + img->buf_size = sizeof(uint8_t)*2 * width * height; else if(type == IMAGE_JPEG) - img->buf = malloc(sizeof(uint8_t)*1.1 * width * height); // At maximum quality this is enough + img->buf_size = sizeof(uint8_t)*1.1 * width * height; // At maximum quality this is enough else - img->buf = malloc(sizeof(uint8_t) * width * height); + img->buf_size = sizeof(uint8_t) * width * height; + + img->buf = malloc(img->buf_size); } /** @@ -60,6 +62,24 @@ void image_free(struct image_t *img) free(img->buf); } +/** + * Copy an image from inut to output + * This will only work if the formats are the same + * @param[in] *input The input image to copy from + * @param[out] *output The out image to copy to + */ +void image_copy(struct image_t *input, struct image_t *output) +{ + if(input->type != output->type) + return; + + output->w = input->w; + output->h = input->h; + output->buf_size = input->buf_size; + memcpy(&output->ts, &input->ts, sizeof(struct timeval)); + memcpy(input->buf, output->buf, input->buf_size); +} + /** * Convert an image to grayscale. * Depending on the output type the U/V bytes are removed diff --git a/sw/airborne/modules/computer_vision/lib/vision/image.h b/sw/airborne/modules/computer_vision/lib/vision/image.h index 8dd8054fc4d..ebf03b5e1bc 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/image.h +++ b/sw/airborne/modules/computer_vision/lib/vision/image.h @@ -49,9 +49,16 @@ struct image_t { void *buf; //< Image buffer (depending on the image_type) }; +/* Image point structure */ +struct point_t { + uint16_t x; //< The x coordinate of the point + uint16_t y; //< The y coordinate of the point +}; + /* Usefull image functions */ void image_create(struct image_t *img, uint16_t width, uint16_t height, enum image_type type); void image_free(struct image_t *img); +void image_copy(struct image_t *input, struct image_t *output); void image_to_grayscale(struct image_t *input, struct image_t *output); uint16_t image_yuv422_colorfilt(struct image_t *input, struct image_t *output, uint8_t y_m, uint8_t y_M, uint8_t u_m, uint8_t u_M, uint8_t v_m, uint8_t v_M); diff --git a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c index af7ed226d20..923ddda8f59 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c +++ b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c @@ -404,119 +404,3 @@ int opticFlowLK(unsigned char *new_image_buf, unsigned char *old_image_buf, uint // no errors: return OK; } - -void quick_sort(float *a, int n) -{ - if (n < 2) { - return; - } - float p = a[n / 2]; - float *l = a; - float *r = a + n - 1; - while (l <= r) { - if (*l < p) { - l++; - continue; - } - if (*r > p) { - r--; - continue; // we need to check the condition (l <= r) every time we change the value of l or r - } - float t = *l; - *l++ = *r; - *r-- = t; - } - quick_sort(a, r - a + 1); - quick_sort(l, a + n - l); -} - -void quick_sort_int(int *a, int n) -{ - if (n < 2) { - return; - } - int p = a[n / 2]; - int *l = a; - int *r = a + n - 1; - while (l <= r) { - if (*l < p) { - l++; - continue; - } - if (*r > p) { - r--; - continue; - } - int t = *l; - *l++ = *r; - *r-- = t; - } - quick_sort_int(a, r - a + 1); - quick_sort_int(l, a + n - l); -} - -void CvtYUYV2Gray(unsigned char *grayframe, unsigned char *frame, int imW, int imH) -{ - int x, y; - unsigned char *Y, *gray; - for (y = 0; y < imH; y++) { - Y = frame + (imW * 2 * y) + 1; - gray = grayframe + (imW * y); - for (x = 0; x < imW; x += 2) { - gray[x] = *Y; - Y += 2; - gray[x + 1] = *Y; - Y += 2; - } - } -} - -unsigned int OF_buf_point = 0; -unsigned int OF_buf_point2 = 0; -float x_avg, y_avg, x_buf[24], y_buf[24], x_buf2[24], y_buf2[24]; - -void OFfilter(float *OFx, float *OFy, float dx, float dy, int count, int OF_FilterType) -{ - - if (OF_FilterType == 1) { //1. moving average 2. moving median - - x_avg = 0.0; - y_avg = 0.0; - - if (count) { - x_buf[OF_buf_point] = dx; - y_buf[OF_buf_point] = dy; - } else { - x_buf[OF_buf_point] = 0.0; - y_buf[OF_buf_point] = 0.0; - } - OF_buf_point = (OF_buf_point + 1) % 20; - - for (int i = 0; i < 20; i++) { - x_avg += x_buf[i] * 0.05; - y_avg += y_buf[i] * 0.05; - } - - *OFx = x_avg; - *OFy = y_avg; - - } else if (OF_FilterType == 2) { - if (count) { - x_buf2[OF_buf_point2] = dx; - y_buf2[OF_buf_point2] = dy; - } else { - x_buf2[OF_buf_point2] = 0.0; - y_buf2[OF_buf_point2] = 0.0; - } - OF_buf_point2 = (OF_buf_point2 + 1) % 11; // 11 - - quick_sort(x_buf2, 11); // 11 - quick_sort(y_buf2, 11); // 11 - - *OFx = x_buf2[6]; // 6 - *OFy = y_buf2[6]; // 6 - } else { - printf("no filter type selected!\n"); - } -} - diff --git a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h index 9d1be401bd4..0f9ac7ed0f5 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h +++ b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h @@ -39,9 +39,6 @@ int calculateG(int *G, int *DX, int *DY, int half_window_size); int calculateError(int *ImC, int width, int height); int opticFlowLK(unsigned char *new_image_buf, unsigned char *old_image_buf, uint16_t *p_x, uint16_t *p_y, uint16_t n_found_points, uint16_t imW, uint16_t imH, uint16_t *new_x, uint16_t *new_y, bool_t *status, uint16_t half_window_size, uint8_t max_iterations); -void quick_sort(float *a, int n); -void quick_sort_int(int *a, int n); -void CvtYUYV2Gray(unsigned char *grayframe, unsigned char *frame, int imW, int imH); void OFfilter(float *OFx, float *OFy, float dx, float dy, int count, int OF_FilterType); #endif /* OPTIC_FLOW_INT_H */ diff --git a/sw/airborne/modules/computer_vision/opticflow/inter_thread_data.h b/sw/airborne/modules/computer_vision/opticflow/inter_thread_data.h index 87ab62f99b3..74c9ebb3972 100644 --- a/sw/airborne/modules/computer_vision/opticflow/inter_thread_data.h +++ b/sw/airborne/modules/computer_vision/opticflow/inter_thread_data.h @@ -31,14 +31,15 @@ /* The result calculated from the opticflow */ struct opticflow_result_t { - int cnt; // Number of processed frames + uint16_t cnt; //< Number of processed frames + uint32_t corner_cnt; //< The amount of coners found by FAST9 + float Velx; // Velocity as measured by camera float Vely; int flow_count; float cam_h; // Debug parameters - int count; float OFx, OFy, dx_sum, dy_sum; float diff_roll; float diff_pitch; diff --git a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c index 43d015c0faf..1d9ccc7e360 100644 --- a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c +++ b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c @@ -61,15 +61,15 @@ static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishti * @param[in] *w The image width * @param[in] *h The image height */ -void opticflow_calc_init(struct opticflow_t *opticflow, unsigned int w, unsigned int h) +void opticflow_calc_init(struct opticflow_t *opticflow, uint16_t w, uint16_t h) { /* Set the width/height of the image */ opticflow->img_w = w; opticflow->img_h = h; /* Create the image buffers */ - opticflow->gray_frame = (unsigned char *) calloc(w * h, sizeof(uint8_t)); - opticflow->prev_gray_frame = (unsigned char *) calloc(w * h, sizeof(uint8_t)); + image_create(&opticflow->img_gray, w, h, IMAGE_GRAYSCALE); + image_create(&opticflow->prev_img_gray, w, h, IMAGE_GRAYSCALE); /* Set the previous values */ opticflow->got_first_img = FALSE; @@ -98,8 +98,9 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ result->fps = 1 / (timeval_diff(&opticflow->prev_timestamp, &img->ts) / 1000.); memcpy(&opticflow->prev_timestamp, &img->ts, sizeof(struct timeval)); + // Conver the first image to gray if (!opticflow->got_first_img) { - CvtYUYV2Gray(opticflow->prev_gray_frame, img->buf, opticflow->img_w, opticflow->img_h); + image_to_grayscale(img, &opticflow->prev_img_gray); opticflow->got_first_img = TRUE; } @@ -107,13 +108,10 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ // Corner detection // ************************************************************************************* - // FAST corner detection - int fast_threshold = 5; - xyFAST *pnts_fast = fast9_detect((const byte *)opticflow->prev_gray_frame, opticflow->img_w, opticflow->img_h, opticflow->img_w, - fast_threshold, &result->count); - if (result->count > MAX_COUNT) { result->count = MAX_COUNT; } + // FAST corner detection (TODO: non fixed threashold) + struct point_t *pnts_fast = fast9_detect(&opticflow->prev_img_gray, 5, &result->corner_cnt); - // Copy the points and remove neighboring corners + /*// Copy the points and remove neighboring corners const float min_distance2 = 10 * 10; bool_t remove_points[MAX_COUNT]; uint16_t count_fil = 0; @@ -135,12 +133,12 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ } free(pnts_fast); if(count_fil > 25) count_fil = 25; - result->count = count_fil; + result->count = count_fil;*/ uint8_t *im = (uint8_t *)img->buf; - for(int i = 0; i < result->count; i++) { - uint16_t idx = 2*y[i]*opticflow->img_w + 2*x[i]; + for(int i = 0; i < result->corner_cnt; i++) { + uint16_t idx = 2*pnts_fast[i].y*opticflow->img_w + 2*pnts_fast[i].x; im[idx] = 255; idx = idx+1 % (opticflow->img_w*opticflow->img_h*2); im[idx] = 255; @@ -151,10 +149,10 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ // ************************************************************************************* // Corner Tracking // ************************************************************************************* - CvtYUYV2Gray(opticflow->gray_frame, img->buf, opticflow->img_w, opticflow->img_h); + //CvtYUYV2Gray(opticflow->gray_frame, img->buf, opticflow->img_w, opticflow->img_h); - opticFlowLK(opticflow->gray_frame, opticflow->prev_gray_frame, x, y, - count_fil, opticflow->img_w, opticflow->img_h, new_x, new_y, status, 5, 100); + //opticFlowLK(opticflow->gray_frame, opticflow->prev_gray_frame, x, y, + // count_fil, opticflow->img_w, opticflow->img_h, new_x, new_y, status, 5, 100); // Remove points if we lost tracking /* for (int i = count_fil - 1; i >= 0; i--) { @@ -170,7 +168,7 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ } }*/ - result->dx_sum = 0.0; + /*result->dx_sum = 0.0; result->dy_sum = 0.0; // Optical Flow Computation @@ -189,7 +187,7 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ } else { result->dx_sum = 0.0; result->dy_sum = 0.0; - } + }*/ // Flow Derotation /*result->diff_pitch = (state->theta - opticflow->prev_pitch) * opticflow->img_h / FOV_H; @@ -239,7 +237,7 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ // Next Loop Preparation // ************************************************************************************* */ - memcpy(opticflow->prev_gray_frame, opticflow->gray_frame, opticflow->img_w * opticflow->img_h); + image_copy(&opticflow->img_gray, &opticflow->prev_img_gray); } /** diff --git a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h index a8d9538a10a..a704d46b34c 100644 --- a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h +++ b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h @@ -31,16 +31,16 @@ #include "std.h" #include "inter_thread_data.h" +#include "lib/vision/image.h" #include "lib/v4l/v4l2.h" -#include struct opticflow_t { unsigned int img_w; //< The image width unsigned int img_h; //< The image width - uint8_t *gray_frame; //< Current gray image frame - uint8_t *prev_gray_frame; //< Previous gray image frame + struct image_t img_gray; //< Current gray image frame + struct image_t prev_img_gray; //< Previous gray image frame bool_t got_first_img; //< If we got a image to work with @@ -51,7 +51,7 @@ struct opticflow_t }; -void opticflow_calc_init(struct opticflow_t *opticflow, unsigned int w, unsigned int h); +void opticflow_calc_init(struct opticflow_t *opticflow, uint16_t w, uint16_t h); void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_t *state, struct image_t *img, struct opticflow_result_t *result); #endif /* OPTICFLOW_CALCULATOR_H */ diff --git a/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c b/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c index f5a1f168c49..36ddb52e465 100644 --- a/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c +++ b/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c @@ -137,7 +137,7 @@ void stabilization_opticflow_update(struct opticflow_result_t* result) &result->diff_roll, &result->diff_pitch, &result->Velx, &result->Vely, &test, &test, - &result->cam_h, &result->count); + &result->cam_h, &result->corner_cnt); /* Check if we are in the correct AP_MODE before setting commands */ if (autopilot_mode != AP_MODE_MODULE) { From 715b925d89a53a87e6f48cdc148af0186dd0f6fa Mon Sep 17 00:00:00 2001 From: Freek van Tienen Date: Thu, 12 Mar 2015 14:32:05 +0100 Subject: [PATCH 04/17] [vision] Optic flow debugging and small fixes --- .../computer_vision/lib/vision/fast_rosten.c | 40 +++++++++---------- .../computer_vision/lib/vision/image.c | 2 +- .../opticflow/opticflow_calculator.c | 17 +++++--- 3 files changed, 32 insertions(+), 27 deletions(-) diff --git a/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c b/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c index 0ff567e62bc..fbdb16d1531 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c +++ b/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c @@ -34,7 +34,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include "fast_rosten.h" -static void fast_make_offsets(int32_t *pixel, uint16_t row_stride); +static void fast_make_offsets(int32_t *pixel, uint16_t row_stride, uint8_t pixel_size); /** * Do a FAST9 corner detection @@ -57,12 +57,12 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint32_t *n pixel_size = 2; // Calculate the pixel offsets - fast_make_offsets(pixel, img->w*pixel_size); + fast_make_offsets(pixel, img->w, pixel_size); // Go trough all the pixels (minus the borders) for (y = 3; y < img->h - 3; y++) for (x = 3; x < img->w - 3; x++) { - const uint8_t *p = (uint8_t *)img->buf + y * img->w * pixel_size + x + pixel_size/2; + const uint8_t *p = ((uint8_t *)img->buf) + y * img->w * pixel_size + x * pixel_size + pixel_size/2; // Calculate the threshold values int16_t cb = *p + threshold; @@ -3618,22 +3618,22 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint32_t *n * @param[out] *pixel The offset array of the different pixels * @param[in] row_stride The row stride in the image */ -static void fast_make_offsets(int32_t *pixel, uint16_t row_stride) +static void fast_make_offsets(int32_t *pixel, uint16_t row_stride, uint8_t pixel_size) { - pixel[0] = 0 + row_stride * 3; - pixel[1] = 1 + row_stride * 3; - pixel[2] = 2 + row_stride * 2; - pixel[3] = 3 + row_stride * 1; - pixel[4] = 3 + row_stride * 0; - pixel[5] = 3 + row_stride * -1; - pixel[6] = 2 + row_stride * -2; - pixel[7] = 1 + row_stride * -3; - pixel[8] = 0 + row_stride * -3; - pixel[9] = -1 + row_stride * -3; - pixel[10] = -2 + row_stride * -2; - pixel[11] = -3 + row_stride * -1; - pixel[12] = -3 + row_stride * 0; - pixel[13] = -3 + row_stride * 1; - pixel[14] = -2 + row_stride * 2; - pixel[15] = -1 + row_stride * 3; + pixel[0] = 0*pixel_size + row_stride * 3*pixel_size; + pixel[1] = 1*pixel_size + row_stride * 3*pixel_size; + pixel[2] = 2*pixel_size + row_stride * 2*pixel_size; + pixel[3] = 3*pixel_size + row_stride * 1*pixel_size; + pixel[4] = 3*pixel_size + row_stride * 0*pixel_size; + pixel[5] = 3*pixel_size + row_stride * -1*pixel_size; + pixel[6] = 2*pixel_size + row_stride * -2*pixel_size; + pixel[7] = 1*pixel_size + row_stride * -3*pixel_size; + pixel[8] = 0*pixel_size + row_stride * -3*pixel_size; + pixel[9] = -1*pixel_size + row_stride * -3*pixel_size; + pixel[10] = -2*pixel_size + row_stride * -2*pixel_size; + pixel[11] = -3*pixel_size + row_stride * -1*pixel_size; + pixel[12] = -3*pixel_size + row_stride * 0*pixel_size; + pixel[13] = -3*pixel_size + row_stride * 1*pixel_size; + pixel[14] = -2*pixel_size + row_stride * 2*pixel_size; + pixel[15] = -1*pixel_size + row_stride * 3*pixel_size; } diff --git a/sw/airborne/modules/computer_vision/lib/vision/image.c b/sw/airborne/modules/computer_vision/lib/vision/image.c index 9c4b0a8e03f..e9b9b66c1a9 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/image.c +++ b/sw/airborne/modules/computer_vision/lib/vision/image.c @@ -77,7 +77,7 @@ void image_copy(struct image_t *input, struct image_t *output) output->h = input->h; output->buf_size = input->buf_size; memcpy(&output->ts, &input->ts, sizeof(struct timeval)); - memcpy(input->buf, output->buf, input->buf_size); + memcpy(output->buf, input->buf, input->buf_size); } /** diff --git a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c index 1d9ccc7e360..65f3e88d5b1 100644 --- a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c +++ b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c @@ -98,9 +98,12 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ result->fps = 1 / (timeval_diff(&opticflow->prev_timestamp, &img->ts) / 1000.); memcpy(&opticflow->prev_timestamp, &img->ts, sizeof(struct timeval)); - // Conver the first image to gray + // Convert image to grayscale + image_to_grayscale(img, &opticflow->img_gray); + + // Copy to previous image if not set if (!opticflow->got_first_img) { - image_to_grayscale(img, &opticflow->prev_img_gray); + image_copy(&opticflow->img_gray, &opticflow->prev_img_gray); opticflow->got_first_img = TRUE; } @@ -109,7 +112,7 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ // ************************************************************************************* // FAST corner detection (TODO: non fixed threashold) - struct point_t *pnts_fast = fast9_detect(&opticflow->prev_img_gray, 5, &result->corner_cnt); + struct point_t *pnts_fast = fast9_detect(img, 20, &result->corner_cnt); /*// Copy the points and remove neighboring corners const float min_distance2 = 10 * 10; @@ -136,15 +139,17 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ result->count = count_fil;*/ + image_to_grayscale(img, img); uint8_t *im = (uint8_t *)img->buf; for(int i = 0; i < result->corner_cnt; i++) { - uint16_t idx = 2*pnts_fast[i].y*opticflow->img_w + 2*pnts_fast[i].x; + uint32_t idx = 2*pnts_fast[i].y*opticflow->img_w + pnts_fast[i].x*2; im[idx] = 255; - idx = idx+1 % (opticflow->img_w*opticflow->img_h*2); + /*idx = idx+1 % (opticflow->img_w*opticflow->img_h*2); im[idx] = 255; idx = idx+1 % (opticflow->img_w*opticflow->img_h*2); - im[idx] = 255; + im[idx] = 255;*/ } + free(pnts_fast); // ************************************************************************************* // Corner Tracking From 54145a2e8819084a513ad5ca7650defa6c39d7a7 Mon Sep 17 00:00:00 2001 From: Freek van Tienen Date: Thu, 12 Mar 2015 15:13:10 +0100 Subject: [PATCH 05/17] [vision] Better FAST9 corner detection --- conf/modules/cv_opticflow.xml | 19 ++++++++++ .../computer_vision/lib/vision/fast_rosten.c | 26 +++++++++++--- .../computer_vision/lib/vision/fast_rosten.h | 2 +- .../opticflow/opticflow_calculator.c | 2 +- .../computer_vision/opticflow_module.c | 35 ++++++------------- 5 files changed, 53 insertions(+), 31 deletions(-) diff --git a/conf/modules/cv_opticflow.xml b/conf/modules/cv_opticflow.xml index 199e3c6462d..05a7828cfcd 100644 --- a/conf/modules/cv_opticflow.xml +++ b/conf/modules/cv_opticflow.xml @@ -64,6 +64,25 @@ + + + VIEWVIDEO_DEV ?= UDP1 + VIEWVIDEO_HOST ?= $(MODEM_HOST) + VIEWVIDEO_PORT_OUT ?= 5000 + VIEWVIDEO_PORT_IN ?= 4999 + VIEWVIDEO_BROADCAST ?= $(MODEM_BROADCAST) + VIEWVIDEO_DEV_LOWER = $(shell echo $(VIEWVIDEO_DEV) | tr A-Z a-z) + + VIEWVID_G_CFLAGS = -DVIEWVIDEO_HOST=\"$(VIEWVIDEO_HOST)\" -DVIEWVIDEO_PORT_OUT=$(VIEWVIDEO_PORT_OUT) + VIEWVID_CFLAGS = -DUSE_$(VIEWVIDEO_DEV) -DVIEWVIDEO_DEV=$(VIEWVIDEO_DEV_LOWER) + VIEWVID_CFLAGS += -D$(VIEWVIDEO_DEV)_PORT_OUT=$(VIEWVIDEO_PORT_OUT) -D$(VIEWVIDEO_DEV)_PORT_IN=$(VIEWVIDEO_PORT_IN) + VIEWVID_CFLAGS += -D$(VIEWVIDEO_DEV)_BROADCAST=$(VIEWVIDEO_BROADCAST) -D$(VIEWVIDEO_DEV)_HOST=\"$(VIEWVIDEO_HOST)\" + ifeq ($(VIEWVIDEO_USE_NC),) + ap.CFLAGS += $(VIEWVID_G_CFLAGS) $(VIEWVID_CFLAGS) + else + ap.CFLAGS += $(VIEWVID_G_CFLAGS) -DVIEWVIDEO_USE_NC + endif + diff --git a/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c b/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c index fbdb16d1531..50c17f740c9 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c +++ b/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c @@ -40,15 +40,16 @@ static void fast_make_offsets(int32_t *pixel, uint16_t row_stride, uint8_t pixel * Do a FAST9 corner detection * @param[in] *img The image to do the corner detection on * @param[in] threshold The threshold which we use for FAST9 + * @param[in] min_dist The minimum distance in pixels between detections * @param[out] *num_corner The amount of corners found * @return The corners found */ -struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint32_t *num_corners) +struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t min_dist, uint32_t *num_corners) { - int corner_cnt = 0; - int rsize = 512; + uint32_t corner_cnt = 0; + uint16_t rsize = 512; int pixel[16]; - int x, y; + uint16_t x, y, i; struct point_t *ret_corners = malloc(sizeof(struct point_t) * rsize); // Set the pixel size @@ -62,6 +63,21 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint32_t *n // Go trough all the pixels (minus the borders) for (y = 3; y < img->h - 3; y++) for (x = 3; x < img->w - 3; x++) { + // First check if we aren't in range vertical (TODO: fix less intensive way) + bool_t need_skip = FALSE; + for(i = 0; i < corner_cnt; i++) { + if(x-min_dist < ret_corners[i].x && ret_corners[i].x < x+min_dist + && y-min_dist < ret_corners[i].y && ret_corners[i].y < y+min_dist) { + need_skip = TRUE; + break; + } + } + + if(need_skip) { + x += min_dist; + continue; + } + const uint8_t *p = ((uint8_t *)img->buf) + y * img->w * pixel_size + x * pixel_size + pixel_size/2; // Calculate the threshold values @@ -3607,6 +3623,8 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint32_t *n ret_corners[corner_cnt].y = y; corner_cnt++; + // Skip some in the width direction + x += min_dist; } *num_corners = corner_cnt; diff --git a/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.h b/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.h index 3ec414890ac..08357649458 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.h +++ b/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.h @@ -37,6 +37,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "std.h" #include "lib/vision/image.h" -struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint32_t *num_corners); +struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t min_dist, uint32_t *num_corners); #endif diff --git a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c index 65f3e88d5b1..4002df2a602 100644 --- a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c +++ b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c @@ -112,7 +112,7 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ // ************************************************************************************* // FAST corner detection (TODO: non fixed threashold) - struct point_t *pnts_fast = fast9_detect(img, 20, &result->corner_cnt); + struct point_t *pnts_fast = fast9_detect(img, 20, 10, &result->corner_cnt); /*// Copy the points and remove neighboring corners const float min_distance2 = 10 * 10; diff --git a/sw/airborne/modules/computer_vision/opticflow_module.c b/sw/airborne/modules/computer_vision/opticflow_module.c index bd853425261..32f836f5c49 100644 --- a/sw/airborne/modules/computer_vision/opticflow_module.c +++ b/sw/airborne/modules/computer_vision/opticflow_module.c @@ -35,6 +35,7 @@ #include "lib/v4l/v4l2.h" #include "lib/encoding/jpeg.h" +#include "lib/encoding/rtp.h" /* default sonar/agl to use in opticflow visual_estimator */ #ifndef OPTICFLOW_AGL_ID @@ -170,31 +171,15 @@ static void *opticflow_module_calc(void *data __attribute__((unused))) { pthread_mutex_unlock(&opticflow_mutex); #ifdef OPTICFLOW_DEBUG - jpeg_encode_image(&img, &img_jpeg, 99, TRUE); - - // Open process to send using netcat (in a fork because sometimes kills itself???) - pid_t pid = fork(); - - if(pid < 0) { - printf("[viewvideo] Could not create netcat fork.\n"); - } - else if(pid ==0) { - // We are the child and want to send the image - FILE *netcat = popen("nc 192.168.1.2 5000 2>/dev/null", "w"); - if (netcat != NULL) { - fwrite(img_jpeg.buf, sizeof(uint8_t), img_jpeg.buf_size, netcat); - pclose(netcat); // Ignore output, because it is too much when not connected - } else { - printf("[viewvideo] Failed to open netcat process.\n"); - } - - // Exit the program since we don't want to continue after transmitting - exit(0); - } - else { - // We want to wait until the child is finished - wait(NULL); - } + jpeg_encode_image(&img, &img_jpeg, 50, FALSE); + rtp_frame_send( + &VIEWVIDEO_DEV, // UDP device + &img_jpeg, + 0, // Format 422 + 50, // Jpeg-Quality + 0, // DRI Header + 0 // 90kHz time increment + ); #endif // Free the image From 263b9eea1d5260000630de5d3308b1134cd8ee5d Mon Sep 17 00:00:00 2001 From: Freek van Tienen Date: Sun, 15 Mar 2015 17:30:35 +0100 Subject: [PATCH 06/17] [vision] Optical flow rewrite (still needs testing) --- .../computer_vision/lib/vision/fast_rosten.c | 28 +- .../computer_vision/lib/vision/image.c | 185 ++++++++ .../computer_vision/lib/vision/image.h | 15 +- .../computer_vision/lib/vision/lucas_kanade.c | 406 ++++-------------- .../computer_vision/lib/vision/lucas_kanade.h | 14 +- .../opticflow/opticflow_calculator.c | 57 +-- 6 files changed, 303 insertions(+), 402 deletions(-) diff --git a/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c b/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c index 50c17f740c9..272f589ff14 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c +++ b/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c @@ -64,23 +64,27 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi for (y = 3; y < img->h - 3; y++) for (x = 3; x < img->w - 3; x++) { // First check if we aren't in range vertical (TODO: fix less intensive way) - bool_t need_skip = FALSE; - for(i = 0; i < corner_cnt; i++) { - if(x-min_dist < ret_corners[i].x && ret_corners[i].x < x+min_dist - && y-min_dist < ret_corners[i].y && ret_corners[i].y < y+min_dist) { - need_skip = TRUE; - break; + if(min_dist > 0) { + bool_t need_skip = FALSE; + + // Go trough all the previous corners + for(i = 0; i < corner_cnt; i++) { + if(x-min_dist < ret_corners[i].x && ret_corners[i].x < x+min_dist + && y-min_dist < ret_corners[i].y && ret_corners[i].y < y+min_dist) { + need_skip = TRUE; + break; + } } - } - if(need_skip) { - x += min_dist; - continue; + // Skip the box if we found a pixel nearby + if(need_skip) { + x += min_dist; + continue; + } } - const uint8_t *p = ((uint8_t *)img->buf) + y * img->w * pixel_size + x * pixel_size + pixel_size/2; - // Calculate the threshold values + const uint8_t *p = ((uint8_t *)img->buf) + y * img->w * pixel_size + x * pixel_size + pixel_size/2; int16_t cb = *p + threshold; int16_t c_b = *p - threshold; diff --git a/sw/airborne/modules/computer_vision/lib/vision/image.c b/sw/airborne/modules/computer_vision/lib/vision/image.c index e9b9b66c1a9..823073a580e 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/image.c +++ b/sw/airborne/modules/computer_vision/lib/vision/image.c @@ -47,6 +47,8 @@ void image_create(struct image_t *img, uint16_t width, uint16_t height, enum ima img->buf_size = sizeof(uint8_t)*2 * width * height; else if(type == IMAGE_JPEG) img->buf_size = sizeof(uint8_t)*1.1 * width * height; // At maximum quality this is enough + else if(type == IMAGE_GRADIENT) + img->buf_size = sizeof(int16_t) * width * height; else img->buf_size = sizeof(uint8_t) * width * height; @@ -206,3 +208,186 @@ void image_yuv422_downsample(struct image_t *input, struct image_t *output, uint source += (downsample-1) * input->w * 2; } } + +/** + * This outputs a subpixel window image in grayscale + * Currently only works with Grayscale images as input but could be upgraded to + * also support YUV422 images. + * @param[in] *input Input image (grayscale only) + * @param[out] *output Window output (width and height is used to calculate the window size) + * @param[in] *center Center point in subpixel coordinates + * @param[in] subpixel_factor The subpixel factor per pixel + */ +void image_subpixel_window(struct image_t *input, struct image_t *output, struct point_t *center, uint16_t subpixel_factor) +{ + uint8_t *input_buf = (uint8_t *)input->buf; + uint8_t *output_buf = (uint8_t *)output->buf; + + // Calculate the window size + uint16_t half_window = output->w / 2; + uint16_t subpixel_w = output->w * subpixel_factor; + uint16_t subpixel_h = output->h * subpixel_factor; + + // Go through the whole window size in normal coordinates + for(uint16_t i = 0; i < output->w; i++) { + for(uint16_t j = 0; j < output->w; j++) { + // Calculate the subpixel coordinate + uint16_t x = center->x + (i - half_window) * subpixel_factor; + uint16_t y = center->y + (j - half_window) * subpixel_factor; + Bound(x, 0, subpixel_w); + Bound(y, 0, subpixel_h); + + // Calculate the original pixel coordinate + uint16_t orig_x = x / subpixel_factor; + uint16_t orig_y = y / subpixel_factor; + + // Calculate top left (in subpixel coordinates) + uint16_t tl_x = orig_x * subpixel_factor; + uint16_t tl_y = orig_y * subpixel_factor; + + // Check if it is the top left pixel + uint32_t output_idx = output->w*y + x; + if(tl_x == x && tl_y == y) { + output_buf[output_idx] = input_buf[input->w*orig_y + orig_x]; + } + else { + // Calculate the difference from the top left + uint16_t alpha_x = (x - tl_x); + uint16_t alpha_y = (y - tl_y); + + // Blend from the 4 surrounding pixels + uint32_t blend = (subpixel_factor - alpha_x) * (subpixel_factor - alpha_y) * input_buf[input->w*orig_y + orig_x]; + blend += alpha_x * (subpixel_factor - alpha_y) * input_buf[input->w*orig_y + (orig_x+1)]; + blend += (subpixel_factor - alpha_x) * alpha_y * input_buf[input->w*(orig_y+1) + orig_x]; + blend += alpha_x * alpha_y * input_buf[input->w*(orig_y+1) + (orig_x+1)]; + + // Set the normalized pixel blend + output_buf[output_idx] = blend / (subpixel_factor * subpixel_factor); + } + } + } +} + +/** + * Calculate the gradients using the following matrix: + * [0 0 0; -1 0 1; 0 0 0] + * @param[in] *input Input grayscale image + * @param[out] *dx Output gradient in the X direction (dx->w = input->w-2, dx->h = input->h-2) + * @param[out] *dy Output gradient in the Y direction (dx->w = input->w-2, dx->h = input->h-2) + */ +void image_gradients(struct image_t *input, struct image_t *dx, struct image_t *dy) +{ + // Fetch the buffers in the correct format + uint8_t *input_buf = (uint8_t *)input->buf; + int16_t *dx_buf = (int16_t *)dx->buf; + int16_t *dy_buf = (int16_t *)dx->buf; + + // Go trough all pixels except the borders + for(uint16_t x = 1; x < input->w-1; x++) { + for(uint16_t y = 1; y < input->h-1; x++) { + dx_buf[(y-1)*dx->w + (x-1)] = -input_buf[y*input->w + x-1] + input_buf[y*input->w + x+1]; + dy_buf[(y-1)*dx->w + (x-1)] = -input_buf[(y-1)*input->w + x] + input_buf[(y+1)*input->w + x]; + } + } +} + +/** + * Calculate the G vector of an image gradient + * This is used for optical flow calculation. + * @param[in] *dx The gradient in the X direction + * @param[in] *dy The gradient in the Y direction + * @param[out] *g The G[4] vector devided by 255 to keep in range + */ +void image_calculate_g(struct image_t *dx, struct image_t *dy, int32_t *g) +{ + int32_t sum_dxx = 0, sum_dxy = 0, sum_dyy = 0; + + // Fetch the buffers in the correct format + int16_t *dx_buf = (int16_t *)dx->buf; + int16_t *dy_buf = (int16_t *)dx->buf; + + // Calculate the different sums + for(uint16_t x = 0; x < dx->w; x++) { + for(uint16_t y = 0; y < dy->h; y++) { + sum_dxx += (dx_buf[y*dx->w + x] * dx_buf[y*dx->w + x]); + sum_dxy += (dx_buf[y*dx->w + x] * dy_buf[y*dy->w + x]); + sum_dyy += (dy_buf[y*dy->w + x] * dy_buf[y*dy->w + x]); + } + } + + // ouput the G vector + g[0] = sum_dxx / 255; + g[1] = sum_dxy / 255; + g[2] = sum_dxy / 255; + g[3] = sum_dyy / 255; +} + +/** + * Calculate the difference between two images and return the error + * This will only work with grayscale images + * @param[in] *img_a The image to substract from + * @param[in] *img_b The image to substract from img_a + * @param[out] *diff The image difference (if not needed can be NULL) + * @return The squared difference summed + */ +uint32_t image_difference(struct image_t *img_a, struct image_t *img_b, struct image_t *diff) +{ + uint32_t sum_diff2 = 0; + int16_t *diff_buf = NULL; + + // Fetch the buffers in the correct format + uint8_t *img_a_buf = (uint8_t *)img_a->buf; + int16_t *img_b_buf = (int16_t *)img_b->buf; + + // If we want the difference image back + if(diff != NULL) + diff_buf = (int16_t *)diff->buf; + + // Go trough the imagge pixels and calculate the difference + for(uint16_t x = 0; x < img_a->w; x++) { + for(uint16_t y = 0; y < img_a->h; y++) { + int16_t diff_c = img_a_buf[y*img_a->w +x] - img_b_buf[y*img_b->w +x]; + sum_diff2 += diff_c*diff_c; + + // Set the difference image + if(diff_buf != NULL) + diff_buf[y*diff->w + x] = diff_c; + } + } + + return sum_diff2; +} + +/** + * Calculate the multiplication between two images and return the error + * This will only work with image gradients + * @param[in] *img_a The image to multiply + * @param[in] *img_b The image to multiply with + * @param[out] *mult The image multiplication (if not needed can be NULL) + * @return The sum of the multiplcation + */ +int32_t image_multiply(struct image_t *img_a, struct image_t *img_b, struct image_t *mult) +{ + int32_t sum = 0; + int16_t *img_a_buf = (int16_t *)img_a->buf; + int16_t *img_b_buf = (int16_t *)img_b->buf; + int16_t *mult_buf = NULL; + + // When we want an output + if(mult != NULL) + mult_buf = (int16_t *)mult->buf; + + // Calculate the multiplication + for(uint16_t x = 0; x < img_a->w; x++) { + for(uint16_t y = 0; y < img_a->h; y++) { + int16_t mult_c = img_a_buf[y*img_a->w +x] * img_b_buf[y*img_b->w +x]; + sum += mult_c; + + // Set the difference image + if(mult_buf != NULL) + mult_buf[y*mult->w + x] = mult_c; + } + } + + return sum; +} diff --git a/sw/airborne/modules/computer_vision/lib/vision/image.h b/sw/airborne/modules/computer_vision/lib/vision/image.h index ebf03b5e1bc..db88db5b5b2 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/image.h +++ b/sw/airborne/modules/computer_vision/lib/vision/image.h @@ -32,9 +32,10 @@ /* The different type of images we currently support */ enum image_type { - IMAGE_YUV422, //< UYVY format - IMAGE_GRAYSCALE, //< Grayscale image with only the Y part - IMAGE_JPEG //< An JPEG encoded image + IMAGE_YUV422, //< UYVY format (uint16 per pixel) + IMAGE_GRAYSCALE, //< Grayscale image with only the Y part (uint8 per pixel) + IMAGE_JPEG, //< An JPEG encoded image (not per pixel encoded) + IMAGE_GRADIENT //< An image gradient (int16 per pixel) }; /* Main image structure */ @@ -60,8 +61,12 @@ void image_create(struct image_t *img, uint16_t width, uint16_t height, enum ima void image_free(struct image_t *img); void image_copy(struct image_t *input, struct image_t *output); void image_to_grayscale(struct image_t *input, struct image_t *output); -uint16_t image_yuv422_colorfilt(struct image_t *input, struct image_t *output, uint8_t y_m, uint8_t y_M, uint8_t u_m, - uint8_t u_M, uint8_t v_m, uint8_t v_M); +uint16_t image_yuv422_colorfilt(struct image_t *input, struct image_t *output, uint8_t y_m, uint8_t y_M, uint8_t u_m, uint8_t u_M, uint8_t v_m, uint8_t v_M); void image_yuv422_downsample(struct image_t *input, struct image_t *output, uint16_t downsample); +void image_subpixel_window(struct image_t *input, struct image_t *output, struct point_t *center, uint16_t subpixel_factor); +void image_gradients(struct image_t *input, struct image_t *dx, struct image_t *dy); +void image_calculate_g(struct image_t *dx, struct image_t *dy, int32_t *g); +uint32_t image_difference(struct image_t *img_a, struct image_t *img_b, struct image_t *diff); +int32_t image_multiply(struct image_t *img_a, struct image_t *img_b, struct image_t *mult); #endif diff --git a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c index 923ddda8f59..6896e85acc7 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c +++ b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c @@ -33,218 +33,9 @@ #include #include "lucas_kanade.h" -#define int_index(x,y) (y * IMG_WIDTH + x) -#define uint_index(xx, yy) (((yy * IMG_WIDTH + xx) * 2) & 0xFFFFFFFC) -#define NO_MEMORY -1 -#define OK 0 -#define N_VISUAL_INPUTS 51 -#define N_ACTIONS 3 -#define MAX_COUNT_PT 50 -unsigned int IMG_WIDTH, IMG_HEIGHT; - -void multiplyImages(int *ImA, int *ImB, int *ImC, int width, int height) -{ - int x, y; - unsigned int ix; - - // printf("W = %d, H = %d\n\r", IMG_WIDTH, IMG_HEIGHT); - - for (x = 0; x < width; x++) { - for (y = 0; y < height; y++) { - ix = (y * width + x); - ImC[ix] = ImA[ix] * ImB[ix]; - // If we want to keep the values in [0, 255]: - // ImC[ix] /= 255; - } - } -} - -void getImageDifference(int *ImA, int *ImB, int *ImC, int width, int height) -{ - int x, y; - unsigned int ix; - - // printf("W = %d, H = %d\n\r", IMG_WIDTH, IMG_HEIGHT); - - for (x = 0; x < width; x++) { - for (y = 0; y < height; y++) { - ix = (y * width + x); - ImC[ix] = ImA[ix] - ImB[ix]; - } - } - -} - -void getSubPixel_gray(int *Patch, unsigned char *frame_buf, int center_x, int center_y, int half_window_size, - int subpixel_factor) -{ - int x, y, x_0, y_0, x_0_or, y_0_or, i, j, window_size, alpha_x, alpha_y, max_x, max_y; - unsigned int ix1, ix2, Y; - window_size = half_window_size * 2 + 1; - max_x = (IMG_WIDTH - 1) * subpixel_factor; - max_y = (IMG_HEIGHT - 1) * subpixel_factor; - - for (i = 0; i < window_size; i++) { - for (j = 0; j < window_size; j++) { - // index for this position in the patch: - ix1 = (j * window_size + i); - - // determine subpixel coordinates of the current pixel: - x = center_x + (i - half_window_size) * subpixel_factor; - if (x < 0) { x = 0; } - if (x > max_x) { x = max_x; } - y = center_y + (j - half_window_size) * subpixel_factor; - if (y < 0) { y = 0; } - if (y > max_y) { y = max_y; } - // pixel to the top left: - x_0_or = (x / subpixel_factor); - x_0 = x_0_or * subpixel_factor; - y_0_or = (y / subpixel_factor); - y_0 = y_0_or * subpixel_factor; - - - if (x == x_0 && y == y_0) { - ix2 = y_0_or * IMG_WIDTH + x_0_or; - Y = (unsigned int)frame_buf[ix2 + 1]; - Patch[ix1] = (int) Y; - } else { - // blending according to how far the subpixel coordinates are from the pixel coordinates - alpha_x = (x - x_0); - alpha_y = (y - y_0); - - // the patch pixel is a blend from the four surrounding pixels: - ix2 = y_0_or * IMG_WIDTH + x_0_or; - Y = (unsigned int)frame_buf[ix2 + 1]; - Patch[ix1] = (subpixel_factor - alpha_x) * (subpixel_factor - alpha_y) * ((int) Y); - - ix2 = y_0_or * IMG_WIDTH + (x_0_or + 1); - Y = (unsigned int)frame_buf[ix2 + 1]; - Patch[ix1] += alpha_x * (subpixel_factor - alpha_y) * ((int) Y); - - ix2 = (y_0_or + 1) * IMG_WIDTH + x_0_or; - Y = (unsigned int)frame_buf[ix2 + 1]; - Patch[ix1] += (subpixel_factor - alpha_x) * alpha_y * ((int) Y); - - ix2 = (y_0_or + 1) * IMG_WIDTH + (x_0_or + 1); - Y = (unsigned int)frame_buf[ix2 + 1]; - Patch[ix1] += alpha_x * alpha_y * ((int) Y); - - // normalize patch value - Patch[ix1] /= (subpixel_factor * subpixel_factor); - } - } - } - - return; -} - -void getGradientPatch(int *Patch, int *DX, int *DY, int half_window_size) -{ - unsigned int ix1, ix2; - int x, y, padded_patch_size, patch_size, Y1, Y2; - // int printed; printed = 0; - - padded_patch_size = 2 * (half_window_size + 1) + 1; - patch_size = 2 * half_window_size + 1; - // currently we use [0 0 0; -1 0 1; 0 0 0] as mask for dx - for (x = 1; x < padded_patch_size - 1; x++) { - for (y = 1; y < padded_patch_size - 1; y++) { - // index in DX, DY: - ix2 = (unsigned int)((y - 1) * patch_size + (x - 1)); - - ix1 = (unsigned int)(y * padded_patch_size + x - 1); - Y1 = Patch[ix1]; - ix1 = (unsigned int)(y * padded_patch_size + x + 1); - Y2 = Patch[ix1]; - DX[ix2] = (Y2 - Y1) / 2; - - ix1 = (unsigned int)((y - 1) * padded_patch_size + x); - Y1 = Patch[ix1]; - ix1 = (unsigned int)((y + 1) * padded_patch_size + x); - Y2 = Patch[ix1]; - DY[ix2] = (Y2 - Y1) / 2; - - - } - } - - return; -} - -int getSumPatch(int *Patch, int size) -{ - int x, y, sum; // , threshold - unsigned int ix; - - // in order to keep the sum within range: - //threshold = 50000; // typical values are far below this threshold - sum = 0; - for (x = 0; x < size; x++) { - for (y = 0; y < size; y++) { - ix = (y * size) + x; - sum += Patch[ix]; // do not check thresholds - } - } - - return sum; -} - -int calculateG(int *G, int *DX, int *DY, int half_window_size) -{ - int patch_size; - int *DXX; int *DXY; int *DYY; - - patch_size = 2 * half_window_size + 1; - - // allocate memory: - DXX = (int *) malloc(patch_size * patch_size * sizeof(int)); - DXY = (int *) malloc(patch_size * patch_size * sizeof(int)); - DYY = (int *) malloc(patch_size * patch_size * sizeof(int)); - - if (DXX == 0 || DXY == 0 || DYY == 0) { - return NO_MEMORY; - } - - // then determine the second order gradients - multiplyImages(DX, DX, DXX, patch_size, patch_size); - multiplyImages(DX, DY, DXY, patch_size, patch_size); - multiplyImages(DY, DY, DYY, patch_size, patch_size); - - // calculate G: - G[0] = getSumPatch(DXX, patch_size); - G[1] = getSumPatch(DXY, patch_size); - G[2] = G[1]; - G[3] = getSumPatch(DYY, patch_size); - - // free memory: - free((char *) DXX); free((char *) DXY); free((char *) DYY); - - // no errors: - return OK; -} - - - -int calculateError(int *ImC, int width, int height) -{ - int x, y, error; - unsigned int ix; - - error = 0; - - for (x = 0; x < width; x++) { - for (y = 0; y < height; y++) { - ix = (y * width + x); - error += ImC[ix] * ImC[ix]; - } - } - - return error; -} - -int opticFlowLK(unsigned char *new_image_buf, unsigned char *old_image_buf, uint16_t *p_x, uint16_t *p_y, uint16_t n_found_points, - uint16_t imW, uint16_t imH, uint16_t *new_x, uint16_t *new_y, bool_t *status, uint16_t half_window_size, uint8_t max_iterations) +void opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_t *points, uint16_t points_cnt, + struct point_t *new_points, bool_t *status, uint16_t half_window_size, uint8_t max_iterations, uint8_t step_threshold) { // A straightforward one-level implementation of Lucas-Kanade. // For all points: @@ -256,151 +47,106 @@ int opticFlowLK(unsigned char *new_image_buf, unsigned char *old_image_buf, uint // [b] determine the image difference between the two neighborhoods // [c] calculate the 'b'-vector // [d] calculate the additional flow step and possibly terminate the iteration - int p, subpixel_factor, x, y, it, step_threshold, step_x, step_y, v_x, v_y, Det; - int b_x, b_y, patch_size, padded_patch_size, error; - unsigned int ix1, ix2; - int *I_padded_neighborhood; int *I_neighborhood; int *J_neighborhood; - int *DX; int *DY; int *ImDiff; int *IDDX; int *IDDY; - int G[4]; - int error_threshold; - // set the image width and height - IMG_WIDTH = imW; - IMG_HEIGHT = imH; - // spatial resolution of flow is 1 / subpixel_factor - subpixel_factor = 10; - // determine patch sizes and initialize neighborhoods - patch_size = (2 * half_window_size + 1); - error_threshold = (25 * 25) * (patch_size * patch_size); - padded_patch_size = (2 * half_window_size + 3); - I_padded_neighborhood = (int *) malloc(padded_patch_size * padded_patch_size * sizeof(int)); - I_neighborhood = (int *) malloc(patch_size * patch_size * sizeof(int)); - J_neighborhood = (int *) malloc(patch_size * patch_size * sizeof(int)); - if (I_padded_neighborhood == 0 || I_neighborhood == 0 || J_neighborhood == 0) { - return NO_MEMORY; - } - - DX = (int *) malloc(patch_size * patch_size * sizeof(int)); - DY = (int *) malloc(patch_size * patch_size * sizeof(int)); - IDDX = (int *) malloc(patch_size * patch_size * sizeof(int)); - IDDY = (int *) malloc(patch_size * patch_size * sizeof(int)); - ImDiff = (int *) malloc(patch_size * patch_size * sizeof(int)); - if (DX == 0 || DY == 0 || ImDiff == 0 || IDDX == 0 || IDDY == 0) { - return NO_MEMORY; - } + // spatial resolution of flow is 1 / subpixel_factor (TODO: set ourself) + uint16_t subpixel_factor = 10; - for (p = 0; p < n_found_points; p++) { - // status: point is not yet lost: - status[p] = 1; - - // We want to be able to take steps in the image of 1 / subpixel_factor: - p_x[p] *= subpixel_factor; - p_y[p] *= subpixel_factor; - - // if the pixel is outside the ROI in the image, do not track it: - if (!(p_x[p] > ((half_window_size + 1) * subpixel_factor) && p_x[p] < (IMG_WIDTH - half_window_size) * subpixel_factor - && p_y[p] > ((half_window_size + 1) * subpixel_factor) && p_y[p] < (IMG_HEIGHT - half_window_size)*subpixel_factor)) { - status[p] = 0; + // determine patch sizes and initialize neighborhoods + uint16_t patch_size = 2 * half_window_size; + uint32_t error_threshold = (25 * 25) * (patch_size * patch_size); + uint16_t padded_patch_size = patch_size + 2; + + // Create the window images + struct image_t window_I, window_J, window_DX, window_DY, window_diff; + image_create(&window_I, padded_patch_size, padded_patch_size, IMAGE_GRAYSCALE); + image_create(&window_J, patch_size, patch_size, IMAGE_GRAYSCALE); + image_create(&window_DX, patch_size, patch_size, IMAGE_GRADIENT); + image_create(&window_DY, patch_size, patch_size, IMAGE_GRADIENT); + image_create(&window_diff, patch_size, patch_size, IMAGE_GRADIENT); + + for (uint16_t p = 0; p < points_cnt; p++) { + // Update the status that the point isn't lost yet + status[p] = TRUE; + + // If the pixel is outside ROI, do not track it + if(points[p].x < half_window_size || (old_img->w - points[p].x) < half_window_size + || points[p].y < half_window_size || (old_img->h - points[p].y) < half_window_size) + { + status[p] = FALSE; + continue; } - // (1) determine the subpixel neighborhood in the old image - // we determine a padded neighborhood with the aim of subsequent gradient processing: - getSubPixel_gray(I_padded_neighborhood, old_image_buf, p_x[p], p_y[p], half_window_size + 1, subpixel_factor); + // Convert the point to a subpixel coordinate + points[p].x *= subpixel_factor; + points[p].y *= subpixel_factor; - // Also get the original-sized neighborhood - for (x = 1; x < padded_patch_size - 1; x++) { - for (y = 1; y < padded_patch_size - 1; y++) { - ix1 = (y * padded_patch_size + x); - ix2 = ((y - 1) * patch_size + (x - 1)); - I_neighborhood[ix2] = I_padded_neighborhood[ix1]; - } - } + // (1) determine the subpixel neighborhood in the old image + image_subpixel_window(old_img, &window_I, &points[p], subpixel_factor); // (2) get the x- and y- gradients - getGradientPatch(I_padded_neighborhood, DX, DY, half_window_size); + image_gradients(&window_I, &window_DX, &window_DY); // (3) determine the 'G'-matrix [sum(Axx) sum(Axy); sum(Axy) sum(Ayy)], where sum is over the window - error = calculateG(G, DX, DY, half_window_size); - if (error == NO_MEMORY) { return NO_MEMORY; } + int32_t G[4]; + image_calculate_g(&window_DX, &window_DY, G); - for (it = 0; it < 4; it++) { - G[it] /= 255; // to keep values in range - } - // calculate G's determinant: - Det = G[0] * G[3] - G[1] * G[2]; - Det = Det / subpixel_factor; // so that the steps will be expressed in subpixel units + // calculate G's determinant in subpixel units: + int32_t Det = (G[0] * G[3] - G[1] * G[2]) / subpixel_factor; + + // Check if the determinant is bigger than 1 if (Det < 1) { - status[p] = 0; + status[p] = FALSE; + continue; } // (4) iterate over taking steps in the image to minimize the error: - it = 0; - step_threshold = 2; // 0.2 as smallest step (L1) - v_x = 0; - v_y = 0; - step_x = step_threshold + 1; - step_y = step_threshold + 1; - - while (status[p] == 1 && it < max_iterations && (abs(step_x) >= step_threshold || abs(step_y) >= step_threshold)) { - // if the pixel goes outside the ROI in the image, stop tracking: - if (!(p_x[p] + v_x > ((half_window_size + 1) * subpixel_factor) - && p_x[p] + v_x < ((int)IMG_WIDTH - half_window_size) * subpixel_factor - && p_y[p] + v_y > ((half_window_size + 1) * subpixel_factor) - && p_y[p] + v_y < ((int)IMG_HEIGHT - half_window_size)*subpixel_factor)) { - status[p] = 0; + memcpy(&new_points[p], &points[p], sizeof(struct point_t)); + for(uint8_t it = 0; it < max_iterations; it++) + { + // If the pixel is outside ROI, do not track it + if(new_points[p].x/subpixel_factor < half_window_size || (old_img->w - new_points[p].x/subpixel_factor) < half_window_size + || new_points[p].y/subpixel_factor < half_window_size || (old_img->h - new_points[p].y/subpixel_factor) < half_window_size) + { + status[p] = FALSE; break; } // [a] get the subpixel neighborhood in the new image - // clear J: - for (x = 0; x < patch_size; x++) { - for (y = 0; y < patch_size; y++) { - ix2 = (y * patch_size + x); - J_neighborhood[ix2] = 0; - } - } - + image_subpixel_window(&window_J, new_img, &new_points[p], subpixel_factor); - getSubPixel_gray(J_neighborhood, new_image_buf, p_x[p] + v_x, p_y[p] + v_y, half_window_size, subpixel_factor); // [b] determine the image difference between the two neighborhoods - getImageDifference(I_neighborhood, J_neighborhood, ImDiff, patch_size, patch_size); - error = calculateError(ImDiff, patch_size, patch_size) / 255; - + uint32_t error = image_difference(&window_I, &window_J, &window_diff); if (error > error_threshold && it > max_iterations / 2) { - status[p] = 0; + status[p] = FALSE; break; } - multiplyImages(ImDiff, DX, IDDX, patch_size, patch_size); - b_x = getSumPatch(IDDX, patch_size) / 255; - b_y = getSumPatch(IDDY, patch_size) / 255; - //printf("b_x = %d; b_y = %d;\n\r", b_x, b_y); - // [d] calculate the additional flow step and possibly terminate the iteration - step_x = (G[3] * b_x - G[1] * b_y) / Det; - step_y = (G[0] * b_y - G[2] * b_x) / Det; - v_x += step_x; - v_y += step_y; // - (?) since the origin in the image is in the top left of the image, with y positive pointing down - // next iteration - it++; - } // iteration to find the right window in the new image - new_x[p] = (p_x[p] + v_x) / subpixel_factor; - new_y[p] = (p_y[p] + v_y) / subpixel_factor; - p_x[p] /= subpixel_factor; - p_y[p] /= subpixel_factor; - } + int32_t b_x = image_multiply(&window_diff, &window_DX, NULL) / 255; + int32_t b_y = image_multiply(&window_diff, &window_DY, NULL) / 255; + // [d] calculate the additional flow step and possibly terminate the iteration + int16_t step_x = (G[3] * b_x - G[1] * b_y) / Det; + int16_t step_y = (G[0] * b_y - G[2] * b_x) / Det; + new_points[p].x += step_x; + new_points[p].y += step_y; + // Check if we exceeded the treshold + if(abs(step_x) < step_threshold && abs(step_y) < step_threshold) + break; + } + + // Convert the point back to the original coordinate (TODO: maybe round it as it is closer to the original) + new_points[p].x /= subpixel_factor; + new_points[p].y /= subpixel_factor; + points[p].x /= subpixel_factor; + points[p].y /= subpixel_factor; + } - // free all allocated variables: - free((int *) I_padded_neighborhood); - free((int *) I_neighborhood); - free((int *) J_neighborhood); - free((int *) DX); - free((int *) DY); - free((int *) ImDiff); - free((int *) IDDX); - free((int *) IDDY); - // no errors: - return OK; + // Free the images + image_free(&window_I); + image_free(&window_J); + image_free(&window_DX); + image_free(&window_DY); + image_free(&window_diff); } diff --git a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h index 0f9ac7ed0f5..63ca1305dc7 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h +++ b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h @@ -28,17 +28,9 @@ #define OPTIC_FLOW_INT_H #include "std.h" +#include "image.h" -void multiplyImages(int *ImA, int *ImB, int *ImC, int width, int height); -void getImageDifference(int *ImA, int *ImB, int *ImC, int width, int height); -void getSubPixel_gray(int *Patch, unsigned char *frame_buf, int center_x, int center_y, int half_window_size, - int subpixel_factor); -void getGradientPatch(int *Patch, int *DX, int *DY, int half_window_size); -int getSumPatch(int *Patch, int size); -int calculateG(int *G, int *DX, int *DY, int half_window_size); -int calculateError(int *ImC, int width, int height); -int opticFlowLK(unsigned char *new_image_buf, unsigned char *old_image_buf, uint16_t *p_x, uint16_t *p_y, uint16_t n_found_points, - uint16_t imW, uint16_t imH, uint16_t *new_x, uint16_t *new_y, bool_t *status, uint16_t half_window_size, uint8_t max_iterations); -void OFfilter(float *OFx, float *OFy, float dx, float dy, int count, int OF_FilterType); +void opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_t *points, uint16_t points_cnt, + struct point_t *new_points, bool_t *status, uint16_t half_window_size, uint8_t max_iterations, uint8_t step_threshold); #endif /* OPTIC_FLOW_INT_H */ diff --git a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c index 4002df2a602..7d546d3e600 100644 --- a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c +++ b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c @@ -86,14 +86,6 @@ void opticflow_calc_init(struct opticflow_t *opticflow, uint16_t w, uint16_t h) */ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_t *state, struct image_t *img, struct opticflow_result_t *result) { - // Corner Tracking - // Working Variables - uint16_t borderx = 24, bordery = 24; - uint16_t x[MAX_COUNT], y[MAX_COUNT]; - uint16_t new_x[MAX_COUNT], new_y[MAX_COUNT]; - bool_t status[MAX_COUNT]; - int dx[MAX_COUNT], dy[MAX_COUNT]; - // Update FPS for information result->fps = 1 / (timeval_diff(&opticflow->prev_timestamp, &img->ts) / 1000.); memcpy(&opticflow->prev_timestamp, &img->ts, sizeof(struct timeval)); @@ -112,52 +104,26 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ // ************************************************************************************* // FAST corner detection (TODO: non fixed threashold) - struct point_t *pnts_fast = fast9_detect(img, 20, 10, &result->corner_cnt); - - /*// Copy the points and remove neighboring corners - const float min_distance2 = 10 * 10; - bool_t remove_points[MAX_COUNT]; - uint16_t count_fil = 0; - memset(&remove_points, FALSE, sizeof(bool_t) * MAX_COUNT); - - for (uint16_t i = 0; i < result->count; i++) { - if(remove_points[i]) - continue; - - x[count_fil] = pnts_fast[i].x; - y[count_fil++] = pnts_fast[i].y; - - // Skip some if they are too close - for(uint16_t j = i+1; j < result->count; j++) { - float distance2 = (x[i] - x[j]) * (x[i] - x[j]) + (y[i] - y[j]) * (y[i] - y[j]); - if(distance2 < min_distance2) - remove_points[j] = TRUE; - } - } - free(pnts_fast); - if(count_fil > 25) count_fil = 25; - result->count = count_fil;*/ - + struct point_t *fast9_points = fast9_detect(img, 20, 5, &result->corner_cnt); - image_to_grayscale(img, img); + /*image_to_grayscale(img, img); uint8_t *im = (uint8_t *)img->buf; for(int i = 0; i < result->corner_cnt; i++) { - uint32_t idx = 2*pnts_fast[i].y*opticflow->img_w + pnts_fast[i].x*2; - im[idx] = 255; - /*idx = idx+1 % (opticflow->img_w*opticflow->img_h*2); + uint32_t idx = 2*fast9_points[i].y*opticflow->img_w + fast9_points[i].x*2; im[idx] = 255; idx = idx+1 % (opticflow->img_w*opticflow->img_h*2); - im[idx] = 255;*/ - } - free(pnts_fast); + im[idx] = 255; + }*/ + // ************************************************************************************* // Corner Tracking // ************************************************************************************* - //CvtYUYV2Gray(opticflow->gray_frame, img->buf, opticflow->img_w, opticflow->img_h); - //opticFlowLK(opticflow->gray_frame, opticflow->prev_gray_frame, x, y, - // count_fil, opticflow->img_w, opticflow->img_h, new_x, new_y, status, 5, 100); + struct point_t *new_points = malloc(sizeof(struct point_t) * result->corner_cnt); + bool_t *tracked_points = malloc(sizeof(bool_t) * result->corner_cnt); + opticFlowLK(&opticflow->img_gray, &opticflow->prev_img_gray, fast9_points, result->corner_cnt, + new_points, tracked_points, 5, 100, 2); // Remove points if we lost tracking /* for (int i = count_fil - 1; i >= 0; i--) { @@ -242,6 +208,9 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ // Next Loop Preparation // ************************************************************************************* */ + free(fast9_points); + free(new_points); + free(tracked_points); image_copy(&opticflow->img_gray, &opticflow->prev_img_gray); } From da7f29ceffba9a34cee048f0d218fd3195d21030 Mon Sep 17 00:00:00 2001 From: Freek van Tienen Date: Sun, 15 Mar 2015 20:25:38 +0100 Subject: [PATCH 07/17] [vision] Add some more documentation --- .../modules/computer_vision/lib/v4l/v4l2.c | 47 ++++++++++++------- .../computer_vision/lib/vision/image.c | 25 ++++++++++ .../computer_vision/lib/vision/image.h | 1 + .../computer_vision/lib/vision/lucas_kanade.c | 21 +++++++-- .../computer_vision/lib/vision/lucas_kanade.h | 10 ++-- .../opticflow/opticflow_calculator.c | 12 ++--- .../opticflow/stabilization_opticflow.c | 4 +- .../computer_vision/opticflow_module.c | 2 + 8 files changed, 88 insertions(+), 34 deletions(-) diff --git a/sw/airborne/modules/computer_vision/lib/v4l/v4l2.c b/sw/airborne/modules/computer_vision/lib/v4l/v4l2.c index 12d81959972..5b28588f57c 100644 --- a/sw/airborne/modules/computer_vision/lib/v4l/v4l2.c +++ b/sw/airborne/modules/computer_vision/lib/v4l/v4l2.c @@ -46,6 +46,9 @@ static void *v4l2_capture_thread(void *data); * The main capturing thread * This thread handles the queue and dequeue of buffers, to make sure only the latest * image buffer is preserved for image processing. + * @param[in] *data The Video 4 Linux 2 device pointer + * @return 0 on succes, -1 if it isn able to fetch an image, + * -2 on timeout of taking an image, -3 on failing buffer dequeue */ static void *v4l2_capture_thread(void *data) { @@ -116,12 +119,13 @@ static void *v4l2_capture_thread(void *data) /** * Initialize a V4L2 subdevice. - * The subdevice name should be something like '/dev/v4l-subdev0' - * The pad and which indicate the way the subdevice should communicate - * with the real device. Which pad it should take. - * Code should be something like V4L2_MBUS_FMT_UYVY8_2X8. See the V4l2 - * manual for available codes. - * Width and height are the amount of pixels this subdevice must cover. + * @param[in] *subdev_name The subdevice name (like /dev/v4l-subdev0) + * @param[in] pad,which The way the subdevice should comminicate and be + * connected to the real device. + * @param[in] code The encoding the subdevice uses (like V4L2_MBUS_FMT_UYVY8_2X8, + * see the V4L2 manual for available encodings) + * @param[in] width,height The width and height of the images + * @return Whether the subdevice was successfully initialized */ bool_t v4l2_init_subdev(char *subdev_name, uint8_t pad, uint8_t which, uint16_t code, uint16_t width, uint16_t height) { @@ -163,11 +167,12 @@ bool_t v4l2_init_subdev(char *subdev_name, uint8_t pad, uint8_t which, uint16_t } /** - * Initialize a V4L2(Video for Linux 2) device - * The device name should be something like "/dev/video1" - * The subdevice name can be empty if there is no subdevice - * The buffer_cnt are the amount of buffers used in memory mapping - * Note that you need to close this device at the end of you program! + * Initialize a V4L2(Video for Linux 2) device. + * Note that the device must be closed with v4l2_close(dev) at the end. + * @param[in] device_name The video device name (like /dev/video1) + * @param[in] width,height The width and height of the images + * @param[in] buffer_cnt The amount of buffers used for mapping + * @return The newly create V4L2 device */ struct v4l2_device *v4l2_init(char *device_name, uint16_t width, uint16_t height, uint8_t buffers_cnt) { uint8_t i; @@ -280,7 +285,7 @@ struct v4l2_device *v4l2_init(char *device_name, uint16_t width, uint16_t height * Get the latest image buffer and lock it (Thread safe, BLOCKING) * This functions blocks until image access is granted. This should not take that long, because * it is only locked while enqueueing an image. - * Make sure you free the image after processing! + * Make sure you free the image after processing with v4l2_image_free()! * @param[in] *dev The V4L2 video device we want to get an image from * @param[out] *img The image that we got from the video device */ @@ -316,7 +321,7 @@ void v4l2_image_get(struct v4l2_device *dev, struct image_t *img) { /** * Get the latest image and lock it (Thread safe, NON BLOCKING) * This function returns NULL if it can't get access to the current image. - * Make sure you free the image after processing! + * Make sure you free the image after processing with v4l2_image_free())! * @param[in] *dev The V4L2 video device we want to get an image from * @param[out] *img The image that we got from the video device * @return Whether we got an image or not @@ -351,6 +356,8 @@ bool_t v4l2_image_get_nonblock(struct v4l2_device *dev, struct image_t *img) { /** * Free the image and enqueue the buffer (Thread safe) * This must be done after processing the image, because else all buffers are locked + * @param[in] *dev The video for linux device which the image is from + * @param[in] *img The image to free */ void v4l2_image_free(struct v4l2_device *dev, struct image_t *img) { @@ -368,8 +375,10 @@ void v4l2_image_free(struct v4l2_device *dev, struct image_t *img) /** * Start capturing images in streaming mode (Thread safe) - * Returns true when successfully started capturing. Not that it also returns - * FALSE when it already is in capturing mode. + * @param[in] *dev The video for linux device to start capturing from + * @return It resturns TRUE if it successfully started capture, + * but keep in mind that if it is already started it will + * return FALSE. */ bool_t v4l2_start_capture(struct v4l2_device *dev) { @@ -425,9 +434,10 @@ bool_t v4l2_start_capture(struct v4l2_device *dev) /** * Stop capturing of the image stream (Thread safe) - * Returns TRUE if it successfully stopped capturing. Note that it also returns FALSE - * when the capturing is already stopped. This function is blocking until capturing - * thread is closed. + * This function is blocking until capturing thread is closed. + * @param[in] *dev The video for linux device to stop capturing + * @return TRUE if it successfully stopped capturing. Note that it also returns FALSE + * when the capturing is already stopped. */ bool_t v4l2_stop_capture(struct v4l2_device *dev) { @@ -462,6 +472,7 @@ bool_t v4l2_stop_capture(struct v4l2_device *dev) * Close the V4L2 device (Thread safe) * This needs to be preformed to clean up all the buffers and close the device. * Note that this also stops the capturing if it is still capturing. + * @param[in] *dev The video for linux device to close(cleanup) */ void v4l2_close(struct v4l2_device *dev) { diff --git a/sw/airborne/modules/computer_vision/lib/vision/image.c b/sw/airborne/modules/computer_vision/lib/vision/image.c index 823073a580e..e3e8b912a38 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/image.c +++ b/sw/airborne/modules/computer_vision/lib/vision/image.c @@ -391,3 +391,28 @@ int32_t image_multiply(struct image_t *img_a, struct image_t *img_b, struct imag return sum; } + +/** + * Show points in an image by coloring them through giving + * the pixels the maximum value. + * @param[in,out] *img The image to place the points on + * @param[in] *points The points to sohw + * @param[in] *points_cnt The amount of points to show + */ +void image_show_points(struct image_t *img, struct point_t *points, uint16_t points_cnt) +{ + uint8_t *img_buf = (uint8_t *)img->buf; + uint8_t pixel_width = (img->type == IMAGE_YUV422)? 2 : 1; + + // Go trough all points and color them + for(int i = 0; i < points_cnt; i++) { + uint32_t idx = pixel_width*points[i].y*img->w + points[i].x*pixel_width; + img_buf[idx] = 255; + + // YUV422 consists of 2 pixels + if(img->type == IMAGE_YUV422) { + idx++; + img_buf[idx] = 255; + } + } +} diff --git a/sw/airborne/modules/computer_vision/lib/vision/image.h b/sw/airborne/modules/computer_vision/lib/vision/image.h index db88db5b5b2..0eb8d9c6fe5 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/image.h +++ b/sw/airborne/modules/computer_vision/lib/vision/image.h @@ -68,5 +68,6 @@ void image_gradients(struct image_t *input, struct image_t *dx, struct image_t * void image_calculate_g(struct image_t *dx, struct image_t *dy, int32_t *g); uint32_t image_difference(struct image_t *img_a, struct image_t *img_b, struct image_t *diff); int32_t image_multiply(struct image_t *img_a, struct image_t *img_b, struct image_t *mult); +void image_show_points(struct image_t *img, struct point_t *points, uint16_t points_cnt); #endif diff --git a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c index 6896e85acc7..6d6131d935a 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c +++ b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c @@ -1,5 +1,6 @@ /* - * Copyright (C) 2014 + * Copyright (C) 2014 G. de Croon + * 2015 Freek van Tienen * * This file is part of Paparazzi. * @@ -19,8 +20,8 @@ */ /** - * @file modules/computer_vision/cv/opticflow/lucas_kanade.c - * @brief efficient fixed-point optical-flow + * @file modules/computer_vision/lib/vision/lucas_kanade.c + * @brief efficient fixed-point optical-flow calculation * * - Initial fixed-point C implementation by G. de Croon * - Algorithm: Lucas-Kanade by Yves Bouguet @@ -34,6 +35,20 @@ #include "lucas_kanade.h" +/** + * Compute the optical flow of several points using the Lucas-Kanade algorithm by Yves Bouguet + * The initial fixed-point implementation is doen by G. de Croon and is adapted by + * Freek van Tienen for the implementation in Paparazzi. + * @param[in] *new_img The newest grayscale image (TODO: fix YUV422 support) + * @param[in] *old_img The old grayscale image (TODO: fix YUV422 support) + * @param[in] *points Points to start tracking from + * @param[in] points_cnt The amount of points + * @param[out] *new_points The new locations of the points + * @param[out] *status Whether the point was tracked or not + * @param[in] half_window_size Half the window size (in both x and y direction) to search inside + * @param[in] max_iteration Maximum amount of iterations to find the new point + * @param[in] step_threshold The threshold at which the iterations should stop + */ void opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_t *points, uint16_t points_cnt, struct point_t *new_points, bool_t *status, uint16_t half_window_size, uint8_t max_iterations, uint8_t step_threshold) { diff --git a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h index 63ca1305dc7..bdae85af829 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h +++ b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h @@ -1,5 +1,6 @@ /* - * Copyright (C) 2014 + * Copyright (C) 2014 G. de Croon + * 2015 Freek van Tienen * * This file is part of Paparazzi. * @@ -19,9 +20,12 @@ */ /** - * @file modules/computer_vision/cv/opticflow/lucas_kanade.h - * @brief efficient fixed-point optical-flow + * @file modules/computer_vision/lib/vision/lucas_kanade.c + * @brief efficient fixed-point optical-flow calculation * + * - Initial fixed-point C implementation by G. de Croon + * - Algorithm: Lucas-Kanade by Yves Bouguet + * - Publication: http://robots.stanford.edu/cs223b04/algo_tracking.pdf */ #ifndef OPTIC_FLOW_INT_H diff --git a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c index 7d546d3e600..128f2ce5402 100644 --- a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c +++ b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c @@ -106,15 +106,9 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ // FAST corner detection (TODO: non fixed threashold) struct point_t *fast9_points = fast9_detect(img, 20, 5, &result->corner_cnt); - /*image_to_grayscale(img, img); - uint8_t *im = (uint8_t *)img->buf; - for(int i = 0; i < result->corner_cnt; i++) { - uint32_t idx = 2*fast9_points[i].y*opticflow->img_w + fast9_points[i].x*2; - im[idx] = 255; - idx = idx+1 % (opticflow->img_w*opticflow->img_h*2); - im[idx] = 255; - }*/ - +#if OPTICFLOW_SHOW_CORNERS + image_show_points(img, fast9_points, result->corner_cnt); +#endif // ************************************************************************************* // Corner Tracking diff --git a/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c b/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c index 36ddb52e465..9714c453119 100644 --- a/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c +++ b/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c @@ -115,6 +115,7 @@ void guidance_h_module_read_rc(void) /** * Main guidance loop + * @param[in] in_flight Whether we are in flight or not */ void guidance_h_module_run(bool_t in_flight) { @@ -124,8 +125,9 @@ void guidance_h_module_run(bool_t in_flight) /** * Update the controls based on a vision result + * @param[in] *result The opticflow calculation result used for control */ -void stabilization_opticflow_update(struct opticflow_result_t* result) +void stabilization_opticflow_update(struct opticflow_result_t *result) { // ************************************************************************************* // Downlink Message diff --git a/sw/airborne/modules/computer_vision/opticflow_module.c b/sw/airborne/modules/computer_vision/opticflow_module.c index 32f836f5c49..9c088a7dee9 100644 --- a/sw/airborne/modules/computer_vision/opticflow_module.c +++ b/sw/airborne/modules/computer_vision/opticflow_module.c @@ -193,6 +193,8 @@ static void *opticflow_module_calc(void *data __attribute__((unused))) { /** * Get the altitude above ground of the drone + * @param[in] sender_id The id that send the ABI message (unused) + * @param[in] distance The distance above ground level in meters */ static void opticflow_agl_cb(uint8_t sender_id __attribute__((unused)), float distance) { From 10d7ce4a05974c772f05e0177b7cd5d4be51c080 Mon Sep 17 00:00:00 2001 From: Freek van Tienen Date: Mon, 16 Mar 2015 15:28:59 +0100 Subject: [PATCH 08/17] [vision] Update some stuff --- .../computer_vision/lib/encoding/jpeg.c | 10 +- .../computer_vision/lib/vision/image.c | 130 ++++++++++++++---- .../computer_vision/lib/vision/image.h | 2 + .../computer_vision/lib/vision/lucas_kanade.c | 4 + .../opticflow/opticflow_calculator.c | 11 +- .../computer_vision/opticflow_module.c | 30 +++- 6 files changed, 157 insertions(+), 30 deletions(-) diff --git a/sw/airborne/modules/computer_vision/lib/encoding/jpeg.c b/sw/airborne/modules/computer_vision/lib/encoding/jpeg.c index c0ede9eebcb..469f7c6977d 100644 --- a/sw/airborne/modules/computer_vision/lib/encoding/jpeg.c +++ b/sw/airborne/modules/computer_vision/lib/encoding/jpeg.c @@ -427,12 +427,16 @@ void jpeg_encode_image(struct image_t *in, struct image_t *out, uint32_t quality uint16_t i, j; uint8_t *output_ptr = out->buf; uint8_t *input_ptr = in->buf; + uint32_t image_format = FOUR_ZERO_ZERO; + + if(in->type == IMAGE_YUV422) + image_format = FOUR_TWO_TWO; JPEG_ENCODER_STRUCTURE JpegStruct; JPEG_ENCODER_STRUCTURE *jpeg_encoder_structure = &JpegStruct; /* Initialization of JPEG control structure */ - jpeg_initialization(jpeg_encoder_structure, FOUR_TWO_TWO, in->w, in->h); + jpeg_initialization(jpeg_encoder_structure, image_format, in->w, in->h); /* Quantization Table Initialization */ //jpeg_initialize_quantization_tables (quality_factor); @@ -441,7 +445,7 @@ void jpeg_encode_image(struct image_t *in, struct image_t *out, uint32_t quality /* Writing Marker Data */ if (add_dri_header) { - output_ptr = jpeg_write_markers(output_ptr, FOUR_TWO_TWO, in->w, in->h); + output_ptr = jpeg_write_markers(output_ptr, image_format, in->w, in->h); } for (i = 1; i <= jpeg_encoder_structure->vertical_mcus; i++) { @@ -463,7 +467,7 @@ void jpeg_encode_image(struct image_t *in, struct image_t *out, uint32_t quality read_format(jpeg_encoder_structure, input_ptr); /* Encode the data in MCU */ - output_ptr = jpeg_encodeMCU(jpeg_encoder_structure, FOUR_TWO_TWO, output_ptr); + output_ptr = jpeg_encodeMCU(jpeg_encoder_structure, image_format, output_ptr); input_ptr += jpeg_encoder_structure->mcu_width_size; } diff --git a/sw/airborne/modules/computer_vision/lib/vision/image.c b/sw/airborne/modules/computer_vision/lib/vision/image.c index e3e8b912a38..65e4280483c 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/image.c +++ b/sw/airborne/modules/computer_vision/lib/vision/image.c @@ -225,12 +225,12 @@ void image_subpixel_window(struct image_t *input, struct image_t *output, struct // Calculate the window size uint16_t half_window = output->w / 2; - uint16_t subpixel_w = output->w * subpixel_factor; - uint16_t subpixel_h = output->h * subpixel_factor; + uint16_t subpixel_w = input->w * subpixel_factor; + uint16_t subpixel_h = input->h * subpixel_factor; // Go through the whole window size in normal coordinates for(uint16_t i = 0; i < output->w; i++) { - for(uint16_t j = 0; j < output->w; j++) { + for(uint16_t j = 0; j < output->h; j++) { // Calculate the subpixel coordinate uint16_t x = center->x + (i - half_window) * subpixel_factor; uint16_t y = center->y + (j - half_window) * subpixel_factor; @@ -246,9 +246,8 @@ void image_subpixel_window(struct image_t *input, struct image_t *output, struct uint16_t tl_y = orig_y * subpixel_factor; // Check if it is the top left pixel - uint32_t output_idx = output->w*y + x; if(tl_x == x && tl_y == y) { - output_buf[output_idx] = input_buf[input->w*orig_y + orig_x]; + output_buf[output->w*j + i] = input_buf[input->w*orig_y + orig_x]; } else { // Calculate the difference from the top left @@ -262,7 +261,7 @@ void image_subpixel_window(struct image_t *input, struct image_t *output, struct blend += alpha_x * alpha_y * input_buf[input->w*(orig_y+1) + (orig_x+1)]; // Set the normalized pixel blend - output_buf[output_idx] = blend / (subpixel_factor * subpixel_factor); + output_buf[output->w*j + i] = blend / (subpixel_factor * subpixel_factor); } } } @@ -270,7 +269,7 @@ void image_subpixel_window(struct image_t *input, struct image_t *output, struct /** * Calculate the gradients using the following matrix: - * [0 0 0; -1 0 1; 0 0 0] + * [0 -1 0; -1 0 1; 0 1 0] * @param[in] *input Input grayscale image * @param[out] *dx Output gradient in the X direction (dx->w = input->w-2, dx->h = input->h-2) * @param[out] *dy Output gradient in the Y direction (dx->w = input->w-2, dx->h = input->h-2) @@ -280,13 +279,13 @@ void image_gradients(struct image_t *input, struct image_t *dx, struct image_t * // Fetch the buffers in the correct format uint8_t *input_buf = (uint8_t *)input->buf; int16_t *dx_buf = (int16_t *)dx->buf; - int16_t *dy_buf = (int16_t *)dx->buf; + int16_t *dy_buf = (int16_t *)dy->buf; // Go trough all pixels except the borders for(uint16_t x = 1; x < input->w-1; x++) { - for(uint16_t y = 1; y < input->h-1; x++) { - dx_buf[(y-1)*dx->w + (x-1)] = -input_buf[y*input->w + x-1] + input_buf[y*input->w + x+1]; - dy_buf[(y-1)*dx->w + (x-1)] = -input_buf[(y-1)*input->w + x] + input_buf[(y+1)*input->w + x]; + for(uint16_t y = 1; y < input->h-1; y++) { + dx_buf[(y-1)*dx->w + (x-1)] = (int16_t)input_buf[y*input->w + x+1] - (int16_t)input_buf[y*input->w + x-1]; + dy_buf[(y-1)*dy->w + (x-1)] = (int16_t)input_buf[(y+1)*input->w + x] - (int16_t)input_buf[(y-1)*input->w + x]; } } } @@ -304,22 +303,22 @@ void image_calculate_g(struct image_t *dx, struct image_t *dy, int32_t *g) // Fetch the buffers in the correct format int16_t *dx_buf = (int16_t *)dx->buf; - int16_t *dy_buf = (int16_t *)dx->buf; + int16_t *dy_buf = (int16_t *)dy->buf; // Calculate the different sums for(uint16_t x = 0; x < dx->w; x++) { for(uint16_t y = 0; y < dy->h; y++) { - sum_dxx += (dx_buf[y*dx->w + x] * dx_buf[y*dx->w + x]); - sum_dxy += (dx_buf[y*dx->w + x] * dy_buf[y*dy->w + x]); - sum_dyy += (dy_buf[y*dy->w + x] * dy_buf[y*dy->w + x]); + sum_dxx += ((int32_t)dx_buf[y*dx->w + x] * dx_buf[y*dx->w + x]); + sum_dxy += ((int32_t)dx_buf[y*dx->w + x] * dy_buf[y*dy->w + x]); + sum_dyy += ((int32_t)dy_buf[y*dy->w + x] * dy_buf[y*dy->w + x]); } } // ouput the G vector - g[0] = sum_dxx / 255; - g[1] = sum_dxy / 255; - g[2] = sum_dxy / 255; - g[3] = sum_dyy / 255; + g[0] = sum_dxx; + g[1] = sum_dxy; + g[2] = g[1]; + g[3] = sum_dyy; } /** @@ -337,16 +336,16 @@ uint32_t image_difference(struct image_t *img_a, struct image_t *img_b, struct i // Fetch the buffers in the correct format uint8_t *img_a_buf = (uint8_t *)img_a->buf; - int16_t *img_b_buf = (int16_t *)img_b->buf; + uint8_t *img_b_buf = (uint8_t *)img_b->buf; // If we want the difference image back if(diff != NULL) diff_buf = (int16_t *)diff->buf; // Go trough the imagge pixels and calculate the difference - for(uint16_t x = 0; x < img_a->w; x++) { - for(uint16_t y = 0; y < img_a->h; y++) { - int16_t diff_c = img_a_buf[y*img_a->w +x] - img_b_buf[y*img_b->w +x]; + for(uint16_t x = 0; x < img_b->w; x++) { + for(uint16_t y = 0; y < img_b->h; y++) { + int16_t diff_c = img_a_buf[(y+1)*img_a->w +(x+1)] - img_b_buf[y*img_b->w +x]; sum_diff2 += diff_c*diff_c; // Set the difference image @@ -395,6 +394,7 @@ int32_t image_multiply(struct image_t *img_a, struct image_t *img_b, struct imag /** * Show points in an image by coloring them through giving * the pixels the maximum value. + * This works with YUV422 and grayscale images * @param[in,out] *img The image to place the points on * @param[in] *points The points to sohw * @param[in] *points_cnt The amount of points to show @@ -416,3 +416,87 @@ void image_show_points(struct image_t *img, struct point_t *points, uint16_t poi } } } + +/** + * Shows the flow from a specific point to a new point + * This works on YUV422 and Grayscale images + * @param[in,out] *img The image to show the flow on + * @param[in] *points The initial point location + * @param[in] *new_points The new point locations + * @param[in] *points_cnt The amount of points to show + * @param[in] *status_points The status of the specific point (TRUE is tracked, FALSE is untracked) + */ +void image_show_flow(struct image_t *img, struct point_t *points, struct point_t *new_points, uint16_t points_cnt, bool_t *status_points) +{ + // Go through all the points + for(uint16_t i = 0; i < points_cnt; i++) { + // Check if we are still tracking + if(!status_points[i]) + continue; + + //printf("Drawing line\n"); + + // Draw a line from points[i] to new_points[i] + image_draw_line(img, &points[i], &new_points[i]); + } +} + +/** + * Draw a line on the image + * @param[in,out] *img The image to show the line on + * @param[in] *from The point to draw from + * @param[in] *to The point to draw to + */ +void image_draw_line(struct image_t *img, struct point_t *from, struct point_t *to) +{ + int xerr=0, yerr=0; + uint8_t *img_buf = (uint8_t *)img->buf; + uint8_t pixel_width = (img->type == IMAGE_YUV422)? 2 : 1; + uint16_t startx = from->x; + uint16_t starty = from->y; + + /* compute the distances in both directions */ + int32_t delta_x = from->x - to->x; + int32_t delta_y = from->y - to->y; + + /* Compute the direction of the increment, + an increment of 0 means either a horizontal or vertical + line. + */ + int8_t incx, incy; + if(delta_x>0) incx=1; + else if(delta_x==0) incx=0; + else incx=-1; + + if(delta_y>0) incy=1; + else if(delta_y==0) incy=0; + else incy=-1; + + /* determine which distance is greater */ + uint16_t distance = 0; + delta_x = abs(delta_x); + delta_y = abs(delta_y); + if(delta_x > delta_y) distance = delta_x*20; + else distance = delta_y*20; + + /* draw the line */ + for(uint16_t t = 0; t <= distance+1; t++) { + img_buf[img->w*pixel_width*starty + startx*pixel_width] = 255; + + if(img->type == IMAGE_YUV422) { + img_buf[img->w*pixel_width*starty + startx*pixel_width] = 255; + img_buf[img->w*pixel_width*starty + startx*pixel_width +1] = 255; + } + + xerr += delta_x; + yerr += delta_y; + if(xerr > distance) { + xerr -= distance; + startx += incx; + } + if(yerr > distance) { + yerr -= distance; + starty += incy; + } + } +} diff --git a/sw/airborne/modules/computer_vision/lib/vision/image.h b/sw/airborne/modules/computer_vision/lib/vision/image.h index 0eb8d9c6fe5..a86499f83ca 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/image.h +++ b/sw/airborne/modules/computer_vision/lib/vision/image.h @@ -69,5 +69,7 @@ void image_calculate_g(struct image_t *dx, struct image_t *dy, int32_t *g); uint32_t image_difference(struct image_t *img_a, struct image_t *img_b, struct image_t *diff); int32_t image_multiply(struct image_t *img_a, struct image_t *img_b, struct image_t *mult); void image_show_points(struct image_t *img, struct point_t *points, uint16_t points_cnt); +void image_show_flow(struct image_t *img, struct point_t *points, struct point_t *new_points, uint16_t points_cnt, bool_t *status_points); +void image_draw_line(struct image_t *img, struct point_t *from, struct point_t *to); #endif diff --git a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c index 6d6131d935a..ea095d20b2b 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c +++ b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c @@ -114,6 +114,7 @@ void opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_ status[p] = FALSE; continue; } + //printf("G[0]: %d, G[1]: %d, G[2]: %d, G[3]: %d, Det: %d\n", G[0], G[1], G[2], G[3], Det); // (4) iterate over taking steps in the image to minimize the error: memcpy(&new_points[p], &points[p], sizeof(struct point_t)); @@ -151,6 +152,9 @@ void opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_ break; } + //if(status[p]) + // printf("Got flow...\n"); + // Convert the point back to the original coordinate (TODO: maybe round it as it is closer to the original) new_points[p].x /= subpixel_factor; new_points[p].y /= subpixel_factor; diff --git a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c index 128f2ce5402..aa0a98a049d 100644 --- a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c +++ b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c @@ -106,9 +106,15 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ // FAST corner detection (TODO: non fixed threashold) struct point_t *fast9_points = fast9_detect(img, 20, 5, &result->corner_cnt); -#if OPTICFLOW_SHOW_CORNERS +//#if OPTICFLOW_SHOW_CORNERS image_show_points(img, fast9_points, result->corner_cnt); -#endif +//#endif + + if(result->corner_cnt < 1) { + free(fast9_points); + image_copy(&opticflow->img_gray, &opticflow->prev_img_gray); + return; + } // ************************************************************************************* // Corner Tracking @@ -118,6 +124,7 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ bool_t *tracked_points = malloc(sizeof(bool_t) * result->corner_cnt); opticFlowLK(&opticflow->img_gray, &opticflow->prev_img_gray, fast9_points, result->corner_cnt, new_points, tracked_points, 5, 100, 2); + image_show_flow(img, fast9_points, new_points, result->corner_cnt, tracked_points); // Remove points if we lost tracking /* for (int i = count_fil - 1; i >= 0; i--) { diff --git a/sw/airborne/modules/computer_vision/opticflow_module.c b/sw/airborne/modules/computer_vision/opticflow_module.c index 9c088a7dee9..c841189ba21 100644 --- a/sw/airborne/modules/computer_vision/opticflow_module.c +++ b/sw/airborne/modules/computer_vision/opticflow_module.c @@ -171,15 +171,41 @@ static void *opticflow_module_calc(void *data __attribute__((unused))) { pthread_mutex_unlock(&opticflow_mutex); #ifdef OPTICFLOW_DEBUG - jpeg_encode_image(&img, &img_jpeg, 50, FALSE); + jpeg_encode_image(&img, &img_jpeg, 80, FALSE); rtp_frame_send( &VIEWVIDEO_DEV, // UDP device &img_jpeg, 0, // Format 422 - 50, // Jpeg-Quality + 80, // Jpeg-Quality 0, // DRI Header 0 // 90kHz time increment ); + + // Open process to send using netcat (in a fork because sometimes kills itself???) + /*pid_t pid = fork(); + + if(pid < 0) { + printf("[viewvideo] Could not create netcat fork.\n"); + } + else if(pid ==0) { + // We are the child and want to send the image + FILE *netcat = popen("nc 192.168.1.2 5000 2>/dev/null", "w"); + if (netcat != NULL) { + fwrite(img_jpeg.buf, sizeof(uint8_t), img_jpeg.buf_size, netcat); + pclose(netcat); // Ignore output, because it is too much when not connected + } else { + printf("[viewvideo] Failed to open netcat process.\n"); + } + + // Exit the program since we don't want to continue after transmitting + exit(0); + } + else { + // We want to wait until the child is finished + wait(NULL); + }*/ + + #endif // Free the image From 712264cbee5a4050136ec9a361afc15ee66a46ed Mon Sep 17 00:00:00 2001 From: Freek van Tienen Date: Sun, 22 Mar 2015 17:06:58 +0100 Subject: [PATCH 09/17] [vision] Fix optic flow calculations --- conf/messages.xml | 39 ++-- conf/modules/cv_opticflow.xml | 9 +- sw/airborne/boards/ardrone/navdata.c | 2 +- .../computer_vision/lib/vision/fast_rosten.c | 8 +- .../computer_vision/lib/vision/fast_rosten.h | 2 +- .../computer_vision/lib/vision/image.c | 42 +++-- .../computer_vision/lib/vision/image.h | 9 +- .../computer_vision/lib/vision/lucas_kanade.c | 81 ++++---- .../computer_vision/lib/vision/lucas_kanade.h | 4 +- .../opticflow/inter_thread_data.h | 26 ++- .../opticflow/opticflow_calculator.c | 176 ++++++++---------- .../opticflow/opticflow_calculator.h | 14 +- .../opticflow/stabilization_opticflow.c | 67 +++---- .../opticflow/stabilization_opticflow.h | 17 +- .../computer_vision/opticflow_module.c | 34 +--- 15 files changed, 233 insertions(+), 297 deletions(-) diff --git a/conf/messages.xml b/conf/messages.xml index 42c451aba92..f4d873b9c6c 100644 --- a/conf/messages.xml +++ b/conf/messages.xml @@ -1972,30 +1972,21 @@ - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + diff --git a/conf/modules/cv_opticflow.xml b/conf/modules/cv_opticflow.xml index 05a7828cfcd..5113ee0d89b 100644 --- a/conf/modules/cv_opticflow.xml +++ b/conf/modules/cv_opticflow.xml @@ -59,12 +59,6 @@ - - - - - - VIEWVIDEO_DEV ?= UDP1 VIEWVIDEO_HOST ?= $(MODEM_HOST) @@ -82,6 +76,9 @@ else ap.CFLAGS += $(VIEWVID_G_CFLAGS) -DVIEWVIDEO_USE_NC endif + + ap.CFLAGS += -DGUIDANCE_V_MODE_MODULE_SETTING=GUIDANCE_V_MODE_HOVER + ap.CFLAGS += -DGUIDANCE_H_MODE_MODULE_SETTING=GUIDANCE_H_MODE_MODULE diff --git a/sw/airborne/boards/ardrone/navdata.c b/sw/airborne/boards/ardrone/navdata.c index 408910ef637..a453e42fada 100644 --- a/sw/airborne/boards/ardrone/navdata.c +++ b/sw/airborne/boards/ardrone/navdata.c @@ -292,10 +292,10 @@ static void *navdata_read(void *data __attribute__((unused))) if (pint != NULL) { memmove(navdata_buffer, pint, NAVDATA_PACKET_SIZE - (pint - navdata_buffer)); buffer_idx = pint - navdata_buffer; + fprintf(stderr, "[navdata] sync error, startbyte not found, resetting...\n"); } else { buffer_idx = 0; } - fprintf(stderr, "[navdata] sync error, startbyte not found, resetting...\n"); continue; } diff --git a/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c b/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c index 272f589ff14..1218b5932b9 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c +++ b/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c @@ -41,10 +41,12 @@ static void fast_make_offsets(int32_t *pixel, uint16_t row_stride, uint8_t pixel * @param[in] *img The image to do the corner detection on * @param[in] threshold The threshold which we use for FAST9 * @param[in] min_dist The minimum distance in pixels between detections + * @param[in] x_padding The padding in the x direction to not scan for corners + * @param[in] y_padding The padding in the y direction to not scan for corners * @param[out] *num_corner The amount of corners found * @return The corners found */ -struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t min_dist, uint32_t *num_corners) +struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t min_dist, uint16_t x_padding, uint16_t y_padding, uint16_t *num_corners) { uint32_t corner_cnt = 0; uint16_t rsize = 512; @@ -61,8 +63,8 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi fast_make_offsets(pixel, img->w, pixel_size); // Go trough all the pixels (minus the borders) - for (y = 3; y < img->h - 3; y++) - for (x = 3; x < img->w - 3; x++) { + for (y = 3 + y_padding; y < img->h - 3 - y_padding; y++) + for (x = 3 + x_padding; x < img->w - 3 - x_padding; x++) { // First check if we aren't in range vertical (TODO: fix less intensive way) if(min_dist > 0) { bool_t need_skip = FALSE; diff --git a/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.h b/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.h index 08357649458..47df527a722 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.h +++ b/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.h @@ -37,6 +37,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "std.h" #include "lib/vision/image.h" -struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t min_dist, uint32_t *num_corners); +struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t min_dist, uint16_t x_padding, uint16_t y_padding, uint16_t *num_corners); #endif diff --git a/sw/airborne/modules/computer_vision/lib/vision/image.c b/sw/airborne/modules/computer_vision/lib/vision/image.c index 65e4280483c..65597a30eec 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/image.c +++ b/sw/airborne/modules/computer_vision/lib/vision/image.c @@ -315,10 +315,10 @@ void image_calculate_g(struct image_t *dx, struct image_t *dy, int32_t *g) } // ouput the G vector - g[0] = sum_dxx; - g[1] = sum_dxy; + g[0] = sum_dxx / 255; + g[1] = sum_dxy / 255; g[2] = g[1]; - g[3] = sum_dyy; + g[3] = sum_dyy / 255; } /** @@ -421,23 +421,23 @@ void image_show_points(struct image_t *img, struct point_t *points, uint16_t poi * Shows the flow from a specific point to a new point * This works on YUV422 and Grayscale images * @param[in,out] *img The image to show the flow on - * @param[in] *points The initial point location - * @param[in] *new_points The new point locations - * @param[in] *points_cnt The amount of points to show - * @param[in] *status_points The status of the specific point (TRUE is tracked, FALSE is untracked) + * @param[in] *vectors The flow vectors to show + * @param[in] *points_cnt The amount of points and vectors to show */ -void image_show_flow(struct image_t *img, struct point_t *points, struct point_t *new_points, uint16_t points_cnt, bool_t *status_points) +void image_show_flow(struct image_t *img, struct flow_t *vectors, uint16_t points_cnt, uint8_t subpixel_factor) { // Go through all the points for(uint16_t i = 0; i < points_cnt; i++) { - // Check if we are still tracking - if(!status_points[i]) - continue; - - //printf("Drawing line\n"); - - // Draw a line from points[i] to new_points[i] - image_draw_line(img, &points[i], &new_points[i]); + // Draw a line from the original position with the flow vector + struct point_t from = { + vectors[i].pos.x / subpixel_factor, + vectors[i].pos.y / subpixel_factor + }; + struct point_t to = { + (vectors[i].pos.x + vectors[i].flow_x) / subpixel_factor, + (vectors[i].pos.y + vectors[i].flow_y) / subpixel_factor + }; + image_draw_line(img, &from, &to); } } @@ -480,12 +480,16 @@ void image_draw_line(struct image_t *img, struct point_t *from, struct point_t * else distance = delta_y*20; /* draw the line */ - for(uint16_t t = 0; t <= distance+1; t++) { - img_buf[img->w*pixel_width*starty + startx*pixel_width] = 255; + for(uint16_t t = 0; starty >= 0 && starty < img->h && startx >= 0 && startx < img->w && t <= distance+1; t++) { + img_buf[img->w*pixel_width*starty + startx*pixel_width] = (t <= 3)? 0 : 255; if(img->type == IMAGE_YUV422) { - img_buf[img->w*pixel_width*starty + startx*pixel_width] = 255; img_buf[img->w*pixel_width*starty + startx*pixel_width +1] = 255; + + if(startx+1 < img->w) { + img_buf[img->w*pixel_width*starty + startx*pixel_width +2] = (t <= 3)? 0 : 255; + img_buf[img->w*pixel_width*starty + startx*pixel_width +3] = 255; + } } xerr += delta_x; diff --git a/sw/airborne/modules/computer_vision/lib/vision/image.h b/sw/airborne/modules/computer_vision/lib/vision/image.h index a86499f83ca..3c1f679513c 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/image.h +++ b/sw/airborne/modules/computer_vision/lib/vision/image.h @@ -56,6 +56,13 @@ struct point_t { uint16_t y; //< The y coordinate of the point }; +/* Vector structure for point differences */ +struct flow_t { + struct point_t pos; //< The original position the flow comes from + int16_t flow_x; //< The x direction flow in subpixels + int16_t flow_y; //< The y direction flow in subpixels +}; + /* Usefull image functions */ void image_create(struct image_t *img, uint16_t width, uint16_t height, enum image_type type); void image_free(struct image_t *img); @@ -69,7 +76,7 @@ void image_calculate_g(struct image_t *dx, struct image_t *dy, int32_t *g); uint32_t image_difference(struct image_t *img_a, struct image_t *img_b, struct image_t *diff); int32_t image_multiply(struct image_t *img_a, struct image_t *img_b, struct image_t *mult); void image_show_points(struct image_t *img, struct point_t *points, uint16_t points_cnt); -void image_show_flow(struct image_t *img, struct point_t *points, struct point_t *new_points, uint16_t points_cnt, bool_t *status_points); +void image_show_flow(struct image_t *img, struct flow_t *vectors, uint16_t points_cnt, uint8_t subpixel_factor); void image_draw_line(struct image_t *img, struct point_t *from, struct point_t *to); #endif diff --git a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c index ea095d20b2b..5db2f789943 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c +++ b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c @@ -42,15 +42,16 @@ * @param[in] *new_img The newest grayscale image (TODO: fix YUV422 support) * @param[in] *old_img The old grayscale image (TODO: fix YUV422 support) * @param[in] *points Points to start tracking from - * @param[in] points_cnt The amount of points - * @param[out] *new_points The new locations of the points - * @param[out] *status Whether the point was tracked or not + * @param[in/out] points_cnt The amount of points and it returns the amount of points tracked * @param[in] half_window_size Half the window size (in both x and y direction) to search inside + * @param[in] subpixel_factor The subpixel factor which calculations should be based on * @param[in] max_iteration Maximum amount of iterations to find the new point * @param[in] step_threshold The threshold at which the iterations should stop + * @param[in] max_point The maximum amount of points to track, we skip x points and then take a point. + * @return The vectors from the original *points in subpixels */ -void opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_t *points, uint16_t points_cnt, - struct point_t *new_points, bool_t *status, uint16_t half_window_size, uint8_t max_iterations, uint8_t step_threshold) +struct flow_t *opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_t *points, uint16_t *points_cnt, + uint16_t half_window_size, uint16_t subpixel_factor, uint8_t max_iterations, uint8_t step_threshold, uint16_t max_points) { // A straightforward one-level implementation of Lucas-Kanade. // For all points: @@ -63,9 +64,11 @@ void opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_ // [c] calculate the 'b'-vector // [d] calculate the additional flow step and possibly terminate the iteration - - // spatial resolution of flow is 1 / subpixel_factor (TODO: set ourself) - uint16_t subpixel_factor = 10; + // Allocate some memory for returning the vectors + struct flow_t *vectors = malloc(sizeof(struct flow_t) * max_points); + uint16_t new_p = 0; + uint16_t points_orig = *points_cnt; + *points_cnt = 0; // determine patch sizes and initialize neighborhoods uint16_t patch_size = 2 * half_window_size; @@ -80,24 +83,28 @@ void opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_ image_create(&window_DY, patch_size, patch_size, IMAGE_GRADIENT); image_create(&window_diff, patch_size, patch_size, IMAGE_GRADIENT); - for (uint16_t p = 0; p < points_cnt; p++) { - // Update the status that the point isn't lost yet - status[p] = TRUE; + // Calculate the amount of points to skip + float skip_points = (points_orig > max_points)? points_orig/max_points : 1; + + // Go trough all points + for (uint16_t i = 0; i < max_points && i < points_orig; i++) { + uint16_t p = i * skip_points; // If the pixel is outside ROI, do not track it if(points[p].x < half_window_size || (old_img->w - points[p].x) < half_window_size || points[p].y < half_window_size || (old_img->h - points[p].y) < half_window_size) { - status[p] = FALSE; continue; } // Convert the point to a subpixel coordinate - points[p].x *= subpixel_factor; - points[p].y *= subpixel_factor; + vectors[new_p].pos.x = points[p].x*subpixel_factor; + vectors[new_p].pos.y = points[p].y*subpixel_factor; + vectors[new_p].flow_x = 0; + vectors[new_p].flow_y = 0; // (1) determine the subpixel neighborhood in the old image - image_subpixel_window(old_img, &window_I, &points[p], subpixel_factor); + image_subpixel_window(old_img, &window_I, &vectors[new_p].pos, subpixel_factor); // (2) get the x- and y- gradients image_gradients(&window_I, &window_DX, &window_DY); @@ -111,30 +118,36 @@ void opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_ // Check if the determinant is bigger than 1 if (Det < 1) { - status[p] = FALSE; continue; } - //printf("G[0]: %d, G[1]: %d, G[2]: %d, G[3]: %d, Det: %d\n", G[0], G[1], G[2], G[3], Det); + + // a * (Ax - Bx) + (1-a) * (Ax+1 - Bx+1) + // a * Ax - a * Bx + (1-a) * Ax+1 - (1-a) * Bx+1 + // (a * Ax + (1-a) * Ax+1) - (a * Bx + (1-a) * Bx+1) // (4) iterate over taking steps in the image to minimize the error: - memcpy(&new_points[p], &points[p], sizeof(struct point_t)); + bool_t tracked = TRUE; for(uint8_t it = 0; it < max_iterations; it++) { + struct point_t new_point = { + vectors[new_p].pos.x + vectors[new_p].flow_x, + vectors[new_p].pos.y + vectors[new_p].flow_y + }; // If the pixel is outside ROI, do not track it - if(new_points[p].x/subpixel_factor < half_window_size || (old_img->w - new_points[p].x/subpixel_factor) < half_window_size - || new_points[p].y/subpixel_factor < half_window_size || (old_img->h - new_points[p].y/subpixel_factor) < half_window_size) + if(new_point.x/subpixel_factor < half_window_size || (old_img->w - new_point.x/subpixel_factor) < half_window_size + || new_point.y/subpixel_factor < half_window_size || (old_img->h - new_point.y/subpixel_factor) < half_window_size) { - status[p] = FALSE; + tracked = FALSE; break; } // [a] get the subpixel neighborhood in the new image - image_subpixel_window(&window_J, new_img, &new_points[p], subpixel_factor); + image_subpixel_window(new_img, &window_J, &new_point, subpixel_factor); // [b] determine the image difference between the two neighborhoods uint32_t error = image_difference(&window_I, &window_J, &window_diff); if (error > error_threshold && it > max_iterations / 2) { - status[p] = FALSE; + tracked = FALSE; break; } @@ -144,22 +157,19 @@ void opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_ // [d] calculate the additional flow step and possibly terminate the iteration int16_t step_x = (G[3] * b_x - G[1] * b_y) / Det; int16_t step_y = (G[0] * b_y - G[2] * b_x) / Det; - new_points[p].x += step_x; - new_points[p].y += step_y; + vectors[new_p].flow_x += step_x; + vectors[new_p].flow_y += step_y; // Check if we exceeded the treshold - if(abs(step_x) < step_threshold && abs(step_y) < step_threshold) + if((abs(step_x) + abs(step_y)) < step_threshold) break; } - //if(status[p]) - // printf("Got flow...\n"); - - // Convert the point back to the original coordinate (TODO: maybe round it as it is closer to the original) - new_points[p].x /= subpixel_factor; - new_points[p].y /= subpixel_factor; - points[p].x /= subpixel_factor; - points[p].y /= subpixel_factor; + // If we tracked the point we update the index and the count + if(tracked) { + new_p++; + (*points_cnt)++; + } } // Free the images @@ -168,4 +178,7 @@ void opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_ image_free(&window_DX); image_free(&window_DY); image_free(&window_diff); + + // Return the vectors + return vectors; } diff --git a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h index bdae85af829..2a71ebf48bd 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h +++ b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h @@ -34,7 +34,7 @@ #include "std.h" #include "image.h" -void opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_t *points, uint16_t points_cnt, - struct point_t *new_points, bool_t *status, uint16_t half_window_size, uint8_t max_iterations, uint8_t step_threshold); +struct flow_t *opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_t *points, uint16_t *points_cnt, + uint16_t half_window_size, uint16_t subpixel_factor, uint8_t max_iterations, uint8_t step_threshold, uint16_t max_points); #endif /* OPTIC_FLOW_INT_H */ diff --git a/sw/airborne/modules/computer_vision/opticflow/inter_thread_data.h b/sw/airborne/modules/computer_vision/opticflow/inter_thread_data.h index 74c9ebb3972..f1195d24ec3 100644 --- a/sw/airborne/modules/computer_vision/opticflow/inter_thread_data.h +++ b/sw/airborne/modules/computer_vision/opticflow/inter_thread_data.h @@ -31,26 +31,24 @@ /* The result calculated from the opticflow */ struct opticflow_result_t { - uint16_t cnt; //< Number of processed frames - uint32_t corner_cnt; //< The amount of coners found by FAST9 + float fps; //< Frames per second of the optical flow calculation + uint16_t corner_cnt; //< The amount of coners found by FAST9 + uint16_t tracked_cnt; //< The amount of tracked corners + int16_t flow_x; //< Flow in x direction from the camera (in subpixels) + int16_t flow_y; //< Flow in y direction from the camera (in subpixels) + int16_t flow_der_x; //< The derotated flow calculation in the x direction (in subpixels) + int16_t flow_der_y; //< The derotated flow calculation in the y direction (in subpixels) - float Velx; // Velocity as measured by camera - float Vely; - int flow_count; - - float cam_h; // Debug parameters - float OFx, OFy, dx_sum, dy_sum; - float diff_roll; - float diff_pitch; - float fps; //< Frames per second of the optical flow calculation + float vel_x; //< The velocity in the x direction + float vel_y; //< The velocity in the y direction }; /* The state of the drone when it took an image */ struct opticflow_state_t { - float phi; // roll [rad] - float theta; // pitch [rad] - float agl; // height above ground [m] + float phi; //< roll [rad] + float theta; //< pitch [rad] + float agl; //< height above ground [m] }; #endif diff --git a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c index aa0a98a049d..72d0c8b3083 100644 --- a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c +++ b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c @@ -46,14 +46,9 @@ #define Fx_ARdrone 343.1211 #define Fy_ARdrone 348.5053 -// Corner Detection -#define MAX_COUNT 100 - -// Flow Derotation -#define FLOW_DEROTATION - -/* Usefull for calculating FPS */ +/* Functions only used here */ static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime); +static int cmp_flow(const void *a, const void *b); /** * Initialize the opticflow calculator @@ -63,18 +58,14 @@ static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishti */ void opticflow_calc_init(struct opticflow_t *opticflow, uint16_t w, uint16_t h) { - /* Set the width/height of the image */ - opticflow->img_w = w; - opticflow->img_h = h; - /* Create the image buffers */ image_create(&opticflow->img_gray, w, h, IMAGE_GRAYSCALE); image_create(&opticflow->prev_img_gray, w, h, IMAGE_GRAYSCALE); /* Set the previous values */ opticflow->got_first_img = FALSE; - opticflow->prev_pitch = 0.0; - opticflow->prev_roll = 0.0; + opticflow->prev_phi = 0.0; + opticflow->prev_theta = 0.0; } /** @@ -104,14 +95,22 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ // ************************************************************************************* // FAST corner detection (TODO: non fixed threashold) - struct point_t *fast9_points = fast9_detect(img, 20, 5, &result->corner_cnt); + static uint8_t threshold = 20; + struct point_t *corners = fast9_detect(img, threshold, 10, 20, 20, &result->corner_cnt); -//#if OPTICFLOW_SHOW_CORNERS - image_show_points(img, fast9_points, result->corner_cnt); -//#endif + // Adaptive threshold + if(result->corner_cnt < 40 && threshold > 5) + threshold--; + else if(result->corner_cnt > 50 && threshold < 60) + threshold++; +#if OPTICFLOW_DEBUG && OPTICFLOW_SHOW_CORNERS + image_show_points(img, corners, result->corner_cnt); +#endif + + // Check if we found some corners to track if(result->corner_cnt < 1) { - free(fast9_points); + free(corners); image_copy(&opticflow->img_gray, &opticflow->prev_img_gray); return; } @@ -120,98 +119,59 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ // Corner Tracking // ************************************************************************************* - struct point_t *new_points = malloc(sizeof(struct point_t) * result->corner_cnt); - bool_t *tracked_points = malloc(sizeof(bool_t) * result->corner_cnt); - opticFlowLK(&opticflow->img_gray, &opticflow->prev_img_gray, fast9_points, result->corner_cnt, - new_points, tracked_points, 5, 100, 2); - image_show_flow(img, fast9_points, new_points, result->corner_cnt, tracked_points); - - // Remove points if we lost tracking - /* for (int i = count_fil - 1; i >= 0; i--) { - if (!status[i] || new_x[i] < borderx || new_x[i] > (opticflow->img_w - 1 - borderx) || - new_y[i] < bordery || new_y[i] > (opticflow->img_h - 1 - bordery)) { - for (int c = i; c < result->flow_count - 1; c++) { - x[c] = x[c + 1]; - y[c] = y[c + 1]; - new_x[c] = new_x[c + 1]; - new_y[c] = new_y[c + 1]; - } - result->flow_count--; - } - }*/ - - /*result->dx_sum = 0.0; - result->dy_sum = 0.0; - - // Optical Flow Computation - for (int i = 0; i < result->flow_count; i++) { - dx[i] = new_x[i] - x[i]; - dy[i] = new_y[i] - y[i]; - } - - // Median Filter - if (result->flow_count) { - quick_sort_int(dx, result->flow_count); // 11 - quick_sort_int(dy, result->flow_count); // 11 - - result->dx_sum = (float) dx[result->flow_count / 2]; - result->dy_sum = (float) dy[result->flow_count / 2]; - } else { - result->dx_sum = 0.0; - result->dy_sum = 0.0; - }*/ +#define MAX_TRACK_CORNERS 25 +#define HALF_WINDOW_SIZE 5 +#define SUBPIXEL_FACTOR 10 +#define MAX_ITERATIONS 10 +#define THRESHOLD_VEC 2 + // Execute a Lucas Kanade optical flow + result->tracked_cnt = result->corner_cnt; + struct flow_t *vectors = opticFlowLK(&opticflow->img_gray, &opticflow->prev_img_gray, corners, &result->tracked_cnt, + HALF_WINDOW_SIZE, SUBPIXEL_FACTOR, MAX_ITERATIONS, THRESHOLD_VEC, MAX_TRACK_CORNERS); + +#if OPTICFLOW_DEBUG && OPTICFLOW_SHOW_FLOW + image_show_flow(img, vectors, result->tracked_cnt, SUBPIXEL_FACTOR); +#endif - // Flow Derotation - /*result->diff_pitch = (state->theta - opticflow->prev_pitch) * opticflow->img_h / FOV_H; - result->diff_roll = (state->phi - opticflow->prev_roll) * opticflow->img_w / FOV_W; - opticflow->prev_pitch = state->theta; - opticflow->prev_roll = state->phi; - - float OFx_trans, OFy_trans; -#ifdef FLOW_DEROTATION - if (result->flow_count) { - OFx_trans = result->dx_sum - result->diff_roll; - OFy_trans = result->dy_sum - result->diff_pitch; - - if ((OFx_trans <= 0) != (result->dx_sum <= 0)) { - OFx_trans = 0; - OFy_trans = 0; - } + // Get the median flow + qsort(vectors, result->tracked_cnt, sizeof(struct flow_t), cmp_flow); + if(result->tracked_cnt == 0) { + // We got no flow + result->flow_x = 0; + result->flow_y = 0; + } else if(result->tracked_cnt > 3) { + // Take the average of the 3 median points + result->flow_x = vectors[result->tracked_cnt/2 -1].flow_x; + result->flow_y = vectors[result->tracked_cnt/2 -1].flow_y; + result->flow_x += vectors[result->tracked_cnt/2].flow_x; + result->flow_y += vectors[result->tracked_cnt/2].flow_y; + result->flow_x += vectors[result->tracked_cnt/2 +1].flow_x; + result->flow_y += vectors[result->tracked_cnt/2 +1].flow_y; + result->flow_x /= 3; + result->flow_y /= 3; } else { - OFx_trans = result->dx_sum; - OFy_trans = result->dy_sum; + // Take the median point + result->flow_x = vectors[result->tracked_cnt/2].flow_x; + result->flow_y = vectors[result->tracked_cnt/2].flow_y; } -#else - OFx_trans = result->dx_sum; - OFy_trans = result->dy_sum; -#endif - // Average Filter - OFfilter(&result->OFx, &result->OFy, OFx_trans, OFy_trans, result->flow_count, 1); - - // Velocity Computation - if (state->agl < 0.01) { - result->cam_h = 0.01; - } - else { - result->cam_h = state->agl; - } + // Flow Derotation + float diff_flow_x = (state->phi - opticflow->prev_phi) * img->w / FOV_W; + float diff_flow_y = (state->theta - opticflow->prev_theta) * img->h / FOV_H; + result->flow_der_x = result->flow_x - diff_flow_x * SUBPIXEL_FACTOR; + result->flow_der_y = result->flow_y - diff_flow_y * SUBPIXEL_FACTOR; + opticflow->prev_phi = state->phi; + opticflow->prev_theta = state->theta; - if (result->flow_count) { - result->Velx = result->OFy * result->cam_h * result->fps / Fy_ARdrone + 0.05; - result->Vely = -result->OFx * result->cam_h * result->fps / Fx_ARdrone - 0.1; - } else { - result->Velx = 0.0; - result->Vely = 0.0; - } + // Velocity calculation + result->vel_x = -result->flow_der_x * result->fps / SUBPIXEL_FACTOR * img->w / Fx_ARdrone; + result->vel_y = result->flow_der_y * result->fps / SUBPIXEL_FACTOR * img->h / Fy_ARdrone; // ************************************************************************************* // Next Loop Preparation // ************************************************************************************* - */ - free(fast9_points); - free(new_points); - free(tracked_points); + free(corners); + free(vectors); image_copy(&opticflow->img_gray, &opticflow->prev_img_gray); } @@ -228,3 +188,17 @@ static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishti msec+=(finishtime->tv_usec-starttime->tv_usec)/1000; return msec; } + +/** + * Compare two flow vectors based on flow distance + * Used for sorting. + * @param[in] *a The first flow vector (should be vect flow_t) + * @param[in] *b The second flow vector (should be vect flow_t) + * @return Negative if b has more flow than a, 0 if the same and positive if a has more flow than b + */ +static int cmp_flow(const void *a, const void *b) +{ + const struct flow_t *a_p = (const struct flow_t *)a; + const struct flow_t *b_p = (const struct flow_t *)b; + return (a_p->flow_x*a_p->flow_x + a_p->flow_y*a_p->flow_y) - (b_p->flow_x*b_p->flow_x + b_p->flow_y*b_p->flow_y); +} diff --git a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h index a704d46b34c..f010fc8284e 100644 --- a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h +++ b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h @@ -36,18 +36,12 @@ struct opticflow_t { - unsigned int img_w; //< The image width - unsigned int img_h; //< The image width - + bool_t got_first_img; //< If we got a image to work with + float prev_phi; //< Phi from the previous image frame + float prev_theta; //< Theta from the previous image frame struct image_t img_gray; //< Current gray image frame struct image_t prev_img_gray; //< Previous gray image frame - - bool_t got_first_img; //< If we got a image to work with - - // Store previous values - float prev_pitch; - float prev_roll; - struct timeval prev_timestamp; //< Frames per second of the optical flow calculation + struct timeval prev_timestamp; //< Timestamp of the previous frame, used for FPS calculation }; diff --git a/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c b/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c index 9714c453119..fa150d8bb22 100644 --- a/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c +++ b/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c @@ -39,22 +39,22 @@ #define CMD_OF_SAT 1500 // 40 deg = 2859.1851 #ifndef VISION_PHI_PGAIN -#define VISION_PHI_PGAIN 500 +#define VISION_PHI_PGAIN 400 #endif PRINT_CONFIG_VAR(VISION_PHI_PGAIN); #ifndef VISION_PHI_IGAIN -#define VISION_PHI_IGAIN 10 +#define VISION_PHI_IGAIN 20 #endif PRINT_CONFIG_VAR(VISION_PHI_IGAIN); #ifndef VISION_THETA_PGAIN -#define VISION_THETA_PGAIN 500 +#define VISION_THETA_PGAIN 400 #endif PRINT_CONFIG_VAR(VISION_THETA_PGAIN); #ifndef VISION_THETA_IGAIN -#define VISION_THETA_IGAIN 10 +#define VISION_THETA_IGAIN 20 #endif PRINT_CONFIG_VAR(VISION_THETA_IGAIN); @@ -92,17 +92,14 @@ struct opticflow_stab_t opticflow_stab = { */ void guidance_h_module_enter(void) { - // Set the errors to 0 - opticflow_stab.err_vx = 0; + /* Reset the integrated errors */ opticflow_stab.err_vx_int = 0; - opticflow_stab.err_vy = 0; opticflow_stab.err_vy_int = 0; - // Set the euler command to 0 - INT_EULERS_ZERO(opticflow_stab.cmd); - - // GUIDANCE: Set Hover-z-hold (1 meter???) - guidance_v_z_sp = -1; + /* Set rool/pitch to 0 degrees and psi to current heading */ + opticflow_stab.cmd.phi = 0; + opticflow_stab.cmd.theta = 0; + opticflow_stab.cmd.psi = stateGetNedToBodyEulers_i()->psi; } /** @@ -119,7 +116,10 @@ void guidance_h_module_read_rc(void) */ void guidance_h_module_run(bool_t in_flight) { - // Run the default attitude stabilization + /* Update the setpoint */ + stabilization_attitude_set_rpy_setpoint_i(&opticflow_stab.cmd); + + /* Run the default attitude stabilization */ stabilization_attitude_run(in_flight); } @@ -129,46 +129,35 @@ void guidance_h_module_run(bool_t in_flight) */ void stabilization_opticflow_update(struct opticflow_result_t *result) { - // ************************************************************************************* - // Downlink Message - // ************************************************************************************* - - float test = 1.5; - DOWNLINK_SEND_OF_HOVER(DefaultChannel, DefaultDevice, - &result->fps, &result->dx_sum, &result->dy_sum, &result->OFx, &result->OFy, - &result->diff_roll, &result->diff_pitch, - &result->Velx, &result->Vely, - &test, &test, - &result->cam_h, &result->corner_cnt); - /* Check if we are in the correct AP_MODE before setting commands */ if (autopilot_mode != AP_MODE_MODULE) { return; } /* Calculate the error if we have enough flow */ - if (result->flow_count > 0) { - opticflow_stab.err_vx = result->Velx - opticflow_stab.desired_vx; - opticflow_stab.err_vy = result->Vely - opticflow_stab.desired_vy; - } else { - opticflow_stab.err_vx = 0; - opticflow_stab.err_vy = 0; + float err_vx = 0; + float err_vy = 0; + if (result->tracked_cnt > 0) { + err_vx = opticflow_stab.desired_vx - result->vel_x; + err_vy = opticflow_stab.desired_vy - result->vel_y; } /* Calculate the integrated errors (TODO: bound??) */ - opticflow_stab.err_vx_int += opticflow_stab.theta_igain * opticflow_stab.err_vx; - opticflow_stab.err_vy_int += opticflow_stab.phi_igain * opticflow_stab.err_vy; + opticflow_stab.err_vx_int += err_vx / 100; + opticflow_stab.err_vy_int += err_vy / 100; /* Calculate the commands */ - opticflow_stab.cmd.theta = (opticflow_stab.theta_pgain * opticflow_stab.err_vx + opticflow_stab.err_vx_int); - opticflow_stab.cmd.phi = -(opticflow_stab.phi_pgain * opticflow_stab.err_vy + opticflow_stab.err_vy_int); + opticflow_stab.cmd.phi = opticflow_stab.phi_pgain * err_vx / 100 + + opticflow_stab.phi_igain * opticflow_stab.err_vx_int; + opticflow_stab.cmd.theta = -(opticflow_stab.theta_pgain * err_vy / 100 + + opticflow_stab.err_vy_int * opticflow_stab.err_vy_int); /* Bound the roll and pitch commands */ BoundAbs(opticflow_stab.cmd.phi, CMD_OF_SAT); BoundAbs(opticflow_stab.cmd.theta, CMD_OF_SAT); - /* Update the setpoint */ - stabilization_attitude_set_rpy_setpoint_i(&opticflow_stab.cmd); - //DOWNLINK_SEND_VISION_STABILIZATION(DefaultChannel, DefaultDevice, &result->Velx, &result->Vely, &Velx_Int, - // &Vely_Int, &cmd_euler.phi, &cmd_euler.theta); + DOWNLINK_SEND_OPTIC_FLOW_EST(DefaultChannel, DefaultDevice, + &result->fps, &result->corner_cnt, &result->tracked_cnt, &result->flow_x, &result->flow_y, + &result->flow_der_x, &result->flow_der_y, &result->vel_x, &result->vel_y, + &opticflow_stab.cmd.phi, &opticflow_stab.cmd.theta); } diff --git a/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.h b/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.h index 2391bc0b524..a6a9adefcb7 100644 --- a/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.h +++ b/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.h @@ -41,24 +41,15 @@ struct opticflow_stab_t { int32_t phi_igain; //< The roll I gain on the err_vx_int int32_t theta_pgain; //< The pitch P gain on the err_vy int32_t theta_igain; //< The pitch I gain on the err_vy_int - float desired_vx; //< The desired velocity in the x direction (m/s??) - float desired_vy; //< The desired velocity in the y direction (m/s??) - - float err_vx; //< The velocity error in x direction (m/s??) - float err_vx_int; //< The integrated velocity error in x direction (m/s??) - float err_vy; //< The velocity error in y direction (m/s??) - float err_vy_int; //< The integrated velocity error in y direction (m/s??) + float desired_vx; //< The desired velocity in the x direction (cm/s) + float desired_vy; //< The desired velocity in the y direction (cm/s) + float err_vx_int; //< The integrated velocity error in x direction (m/s) + float err_vy_int; //< The integrated velocity error in y direction (m/s) struct Int32Eulers cmd; //< The commands that are send to the hover loop }; extern struct opticflow_stab_t opticflow_stab; -// Vertical loop re-uses Alt-hold -#define GUIDANCE_V_MODE_MODULE_SETTING GUIDANCE_V_MODE_HOVER - -// Horizontal mode is a specific controller -#define GUIDANCE_H_MODE_MODULE_SETTING GUIDANCE_H_MODE_MODULE - // Implement own Horizontal loops extern void guidance_h_module_enter(void); extern void guidance_h_module_read_rc(void); diff --git a/sw/airborne/modules/computer_vision/opticflow_module.c b/sw/airborne/modules/computer_vision/opticflow_module.c index c841189ba21..7461fcd1386 100644 --- a/sw/airborne/modules/computer_vision/opticflow_module.c +++ b/sw/airborne/modules/computer_vision/opticflow_module.c @@ -76,7 +76,8 @@ void opticflow_module_init(void) opticflow_got_result = FALSE; /* Try to initialize the video device */ - opticflow_dev = v4l2_init("/dev/video2", 320, 240, 10); //TODO: Fix defines + //v4l2_init_subdev("/dev/v4l-subdev0", 0, 1, V4L2_MBUS_FMT_UYVY8_2X8, 320, 240); + opticflow_dev = v4l2_init("/dev/video2", 320, 240, 60); //TODO: Fix defines if (opticflow_dev == NULL) { printf("[opticflow_module] Could not initialize the video device\n"); } @@ -135,6 +136,7 @@ void opticflow_module_stop(void) * This thread passes the images trough the optical flow * calculator based on Lucas Kanade */ +#include "errno.h" static void *opticflow_module_calc(void *data __attribute__((unused))) { // Start the streaming on the V4L2 device if(!v4l2_start_capture(opticflow_dev)) { @@ -171,41 +173,15 @@ static void *opticflow_module_calc(void *data __attribute__((unused))) { pthread_mutex_unlock(&opticflow_mutex); #ifdef OPTICFLOW_DEBUG - jpeg_encode_image(&img, &img_jpeg, 80, FALSE); + jpeg_encode_image(&img, &img_jpeg, 70, FALSE); rtp_frame_send( &VIEWVIDEO_DEV, // UDP device &img_jpeg, 0, // Format 422 - 80, // Jpeg-Quality + 70, // Jpeg-Quality 0, // DRI Header 0 // 90kHz time increment ); - - // Open process to send using netcat (in a fork because sometimes kills itself???) - /*pid_t pid = fork(); - - if(pid < 0) { - printf("[viewvideo] Could not create netcat fork.\n"); - } - else if(pid ==0) { - // We are the child and want to send the image - FILE *netcat = popen("nc 192.168.1.2 5000 2>/dev/null", "w"); - if (netcat != NULL) { - fwrite(img_jpeg.buf, sizeof(uint8_t), img_jpeg.buf_size, netcat); - pclose(netcat); // Ignore output, because it is too much when not connected - } else { - printf("[viewvideo] Failed to open netcat process.\n"); - } - - // Exit the program since we don't want to continue after transmitting - exit(0); - } - else { - // We want to wait until the child is finished - wait(NULL); - }*/ - - #endif // Free the image From e8895ae6cd46181412fc3d0a3c7e545053fb0d43 Mon Sep 17 00:00:00 2001 From: Freek van Tienen Date: Sun, 22 Mar 2015 17:38:34 +0100 Subject: [PATCH 10/17] [linux] Static link LIBC library --- conf/Makefile.arm-linux | 6 +++--- conf/boards/ardrone2_raw.makefile | 4 ++++ conf/boards/bebop.makefile | 4 ++++ sw/airborne/arch/linux/udp_socket.c | 8 +++++++- 4 files changed, 18 insertions(+), 4 deletions(-) diff --git a/conf/Makefile.arm-linux b/conf/Makefile.arm-linux index 98027327bb3..3efce866b21 100644 --- a/conf/Makefile.arm-linux +++ b/conf/Makefile.arm-linux @@ -39,12 +39,12 @@ else FLOAT_ABI ?= -mfloat-abi=softfp -mfpu=vfp endif -ARCH_FLAGS ?= -mtune=cortex-a8 -march=armv7-a +ARCH_CFLAGS ?= -mtune=cortex-a8 -march=armv7-a # add ARM specifc flags to CFLAGS, LDFLAGS -CFLAGS += $(FLOAT_ABI) $(ARCH_FLAGS) +CFLAGS += $(FLOAT_ABI) $(ARCH_CFLAGS) LDFLAGS += $(FLOAT_ABI) -CXXFLAGS += $(FLOAT_ABI) $(ARCH_FLAGS) +CXXFLAGS += $(FLOAT_ABI) $(ARCH_CFLAGS) # include the common linux Makefile (common CFLAGS, actual targets) include $(PAPARAZZI_SRC)/conf/Makefile.linux diff --git a/conf/boards/ardrone2_raw.makefile b/conf/boards/ardrone2_raw.makefile index a747b653afa..eed9e4c4154 100644 --- a/conf/boards/ardrone2_raw.makefile +++ b/conf/boards/ardrone2_raw.makefile @@ -42,6 +42,10 @@ $(TARGET).CFLAGS +=-DARDRONE2_RAW # handle linux signals by hand $(TARGET).CFLAGS += -DUSE_LINUX_SIGNAL +# Link static (Done for GLIBC) +$(TARGET).CFLAGS += -DLINUX_LINK_STATIC +$(TARGET).LDFLAGS += -static + # ----------------------------------------------------------------------- # default LED configuration diff --git a/conf/boards/bebop.makefile b/conf/boards/bebop.makefile index fbc695aac37..6f00e27cffc 100644 --- a/conf/boards/bebop.makefile +++ b/conf/boards/bebop.makefile @@ -34,6 +34,10 @@ $(TARGET).CFLAGS += -DUSE_LINUX_SIGNAL # Compile the video specific parts $(TARGET).srcs += $(SRC_BOARD)/video.c +# Link static (Done for GLIBC) +$(TARGET).CFLAGS += -DLINUX_LINK_STATIC +$(TARGET).LDFLAGS += -static + # ----------------------------------------------------------------------- # default LED configuration diff --git a/sw/airborne/arch/linux/udp_socket.c b/sw/airborne/arch/linux/udp_socket.c index ac20f53babb..64be511c2b0 100644 --- a/sw/airborne/arch/linux/udp_socket.c +++ b/sw/airborne/arch/linux/udp_socket.c @@ -40,7 +40,7 @@ /** * Create UDP socket and bind it. * @param[out] sock pointer to already allocated UdpSocket struct - * @param[in] host hostname/address + * @param[in] host ip address or hostname (hostname not possible if static linking) * @param[in] port_out output port * @param[in] port_in input port (set to < 0 to disable) * @param[in] broadcast if TRUE enable broadcasting @@ -52,6 +52,7 @@ int udp_socket_create(struct UdpSocket *sock, char *host, int port_out, int port return -1; } +#ifndef LINUX_LINK_STATIC /* try to convert host ipv4 address to binary format */ struct in_addr host_ip; if (!inet_aton(host, &host_ip)) { @@ -71,6 +72,7 @@ int udp_socket_create(struct UdpSocket *sock, char *host, int port_out, int port return -1; } } +#endif // Create the socket with the correct protocl sock->sockfd = socket(PF_INET, SOCK_DGRAM, 0); @@ -96,7 +98,11 @@ int udp_socket_create(struct UdpSocket *sock, char *host, int port_out, int port // set the output/destination address for use in sendto later sock->addr_out.sin_family = PF_INET; sock->addr_out.sin_port = htons(port_out); +#ifndef LINUX_LINK_STATIC sock->addr_out.sin_addr.s_addr = host_ip.s_addr; +#else + sock->addr_out.sin_addr.s_addr = inet_addr(host); +#endif return 0; } From ee83a5f325326f46778a8829acab9e5099ce262d Mon Sep 17 00:00:00 2001 From: Freek van Tienen Date: Mon, 23 Mar 2015 09:56:32 +0100 Subject: [PATCH 11/17] [vision] Add image switch function --- .../computer_vision/lib/vision/image.c | 20 +++++++++++++++++++ .../computer_vision/lib/vision/image.h | 1 + .../opticflow/opticflow_calculator.c | 2 +- 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/sw/airborne/modules/computer_vision/lib/vision/image.c b/sw/airborne/modules/computer_vision/lib/vision/image.c index 65597a30eec..969dd9a9b88 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/image.c +++ b/sw/airborne/modules/computer_vision/lib/vision/image.c @@ -82,6 +82,26 @@ void image_copy(struct image_t *input, struct image_t *output) memcpy(output->buf, input->buf, input->buf_size); } +/** + * This will switch image *a and *b + * This is faster as image_copy because it doesn't copy the + * whole image buffer. + * @param[in,out] *a The image to switch + * @param[in,out] *b The image to switch with + */ +void image_switch(struct image_t *a, struct image_t *b) +{ + /* Remember everything from image a */ + struct image_t old_a; + memcpy(&old_a, a, sizeof(struct image_t)); + + /* Copy everything from b to a */ + memcpy(a, b, sizeof(struct image_t)); + + /* Copy everything from the remembered a to b */ + memcpy(b, &old_a, sizeof(struct image_t)); +} + /** * Convert an image to grayscale. * Depending on the output type the U/V bytes are removed diff --git a/sw/airborne/modules/computer_vision/lib/vision/image.h b/sw/airborne/modules/computer_vision/lib/vision/image.h index 3c1f679513c..270ea0a3f98 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/image.h +++ b/sw/airborne/modules/computer_vision/lib/vision/image.h @@ -67,6 +67,7 @@ struct flow_t { void image_create(struct image_t *img, uint16_t width, uint16_t height, enum image_type type); void image_free(struct image_t *img); void image_copy(struct image_t *input, struct image_t *output); +void image_switch(struct image_t *a, struct image_t *b); void image_to_grayscale(struct image_t *input, struct image_t *output); uint16_t image_yuv422_colorfilt(struct image_t *input, struct image_t *output, uint8_t y_m, uint8_t y_M, uint8_t u_m, uint8_t u_M, uint8_t v_m, uint8_t v_M); void image_yuv422_downsample(struct image_t *input, struct image_t *output, uint16_t downsample); diff --git a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c index 72d0c8b3083..e4ff24095d3 100644 --- a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c +++ b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c @@ -172,7 +172,7 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ // ************************************************************************************* free(corners); free(vectors); - image_copy(&opticflow->img_gray, &opticflow->prev_img_gray); + image_switch(&opticflow->img_gray, &opticflow->prev_img_gray); } /** From f1b30a32ddb2687a6bd8efbbc2dd01a07df8e823 Mon Sep 17 00:00:00 2001 From: Freek van Tienen Date: Mon, 23 Mar 2015 10:13:07 +0100 Subject: [PATCH 12/17] [vision] Fix some comments --- .../computer_vision/lib/encoding/rtp.c | 20 ++++++++++++++++++- .../computer_vision/lib/encoding/rtp.h | 11 ++-------- .../computer_vision/lib/vision/lucas_kanade.h | 2 +- .../opticflow/stabilization_opticflow.c | 2 +- .../opticflow/stabilization_opticflow.h | 2 +- 5 files changed, 24 insertions(+), 13 deletions(-) diff --git a/sw/airborne/modules/computer_vision/lib/encoding/rtp.c b/sw/airborne/modules/computer_vision/lib/encoding/rtp.c index 047e574d52d..5be8bcd7136 100644 --- a/sw/airborne/modules/computer_vision/lib/encoding/rtp.c +++ b/sw/airborne/modules/computer_vision/lib/encoding/rtp.c @@ -62,6 +62,7 @@ uint8_t JpegScanDataCh2B[KJpegCh2ScanDataLen] = { /** * Send a test RTP frame + * @param[in] *udp The udp connection to send the test frame over */ void rtp_frame_test(struct udp_periph *udp) { @@ -86,6 +87,12 @@ void rtp_frame_test(struct udp_periph *udp) /** * Send an RTP frame + * @param[in] *udp The UDP connection to send the frame over + * @param[in] *img The image to send over the RTP connection + * @param[in] format_code 0 for YUV422 and 1 for YUV421 + * @param[in] quality_code The JPEG encoding quality + * @param[in] has_dri_header Whether we have an DRI header or not + * @param[in] delta_t Time between images (if set to 0 or less it is calculated) */ void rtp_frame_send(struct udp_periph *udp, struct image_t *img, uint8_t format_code, uint8_t quality_code, uint8_t has_dri_header, uint32_t delta_t) @@ -135,7 +142,18 @@ void rtp_frame_send(struct udp_periph *udp, struct image_t *img, uint8_t format_ * The RTP timestamp is in units of 90000Hz. The same timestamp MUST appear in each fragment of a given frame. The RTP marker bit MUST be set in the last packet of a frame. - * + * @param[in] *udp The UDP socket to send the RTP packet over + * @param[in] *Jpeg JPEG encoded image byte buffer + * @param[in] JpegLen The length of the byte buffer + * @param[in] m_SequenceNumber RTP sequence number + * @param[in] m_Timestamp Timestamp of the image + * @param[in] m_offset 3 byte fragmentation offset for fragmented images + * @param[in] marker_bit RTP marker bit + * @param[in] w The width of the JPEG image + * @param[in] h The height of the image + * @param[in] format_code 0 for YUV422 and 1 for YUV421 + * @param[in] quality_code The JPEG encoding quality + * @param[in] has_dri_header Whether we have an DRI header or not */ static void rtp_packet_send( struct udp_periph *udp, diff --git a/sw/airborne/modules/computer_vision/lib/encoding/rtp.h b/sw/airborne/modules/computer_vision/lib/encoding/rtp.h index d099d1a1d83..69f7f714c50 100644 --- a/sw/airborne/modules/computer_vision/lib/encoding/rtp.h +++ b/sw/airborne/modules/computer_vision/lib/encoding/rtp.h @@ -32,15 +32,8 @@ #include "lib/vision/image.h" #include "mcu_periph/udp.h" -void rtp_frame_send( - struct udp_periph *udp, // socket - struct image_t *img, // The image to send - uint8_t format_code, // 0=422, 1=421 - uint8_t quality_code, // 0-99 of 128 for custom (include - uint8_t has_dri_header, // Does Jpeg data include Header Info? - uint32_t delta_t // time step 90kHz -); - +void rtp_frame_send(struct udp_periph *udp, struct image_t *img, uint8_t format_code, uint8_t quality_code, + uint8_t has_dri_header, uint32_t delta_t); void rtp_frame_test(struct udp_periph *udp); #endif /* _CV_ENCODING_RTP_H */ diff --git a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h index 2a71ebf48bd..dada79f7fdf 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h +++ b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h @@ -20,7 +20,7 @@ */ /** - * @file modules/computer_vision/lib/vision/lucas_kanade.c + * @file modules/computer_vision/lib/vision/lucas_kanade.h * @brief efficient fixed-point optical-flow calculation * * - Initial fixed-point C implementation by G. de Croon diff --git a/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c b/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c index fa150d8bb22..f4742a00c89 100644 --- a/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c +++ b/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c @@ -20,7 +20,7 @@ */ /** - * @file modules/computer_vision/opticflow/hover_stabilization.c + * @file modules/computer_vision/opticflow/stabilization_opticflow.c * @brief Optical-flow based control for Linux based systems * * Control loops for optic flow based hovering. diff --git a/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.h b/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.h index a6a9adefcb7..13eee913168 100644 --- a/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.h +++ b/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.h @@ -20,7 +20,7 @@ */ /** - * @file modules/computer_vision/opticflow/hover_stabilization.h + * @file modules/computer_vision/opticflow/stabilization_opticflow.h * @brief Optical-flow based control for Linux based systems * * Control loops for optic flow based hovering. From 38f7c98ba147e4403ee557ae0d9e526598b069a0 Mon Sep 17 00:00:00 2001 From: Freek van Tienen Date: Tue, 31 Mar 2015 09:07:25 +0200 Subject: [PATCH 13/17] [vision] Make optic flow device adjustable --- .../computer_vision/opticflow_module.c | 34 +++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/sw/airborne/modules/computer_vision/opticflow_module.c b/sw/airborne/modules/computer_vision/opticflow_module.c index 7461fcd1386..92312b985f4 100644 --- a/sw/airborne/modules/computer_vision/opticflow_module.c +++ b/sw/airborne/modules/computer_vision/opticflow_module.c @@ -43,6 +43,26 @@ #endif PRINT_CONFIG_VAR(OPTICFLOW_AGL_ID); +/* The video device */ +#ifndef OPTICFLOW_DEVICE +#define OPTICFLOW_DEVICE /dev/video2 +#endif +PRINT_CONFIG_VAR(OPTICFLOW_DEVICE); + +/* The video device size (width, height) */ +#ifndef OPTICFLOW_DEVICE_SIZE +#define OPTICFLOW_DEVICE_SIZE 320,240 +#endif +#define __SIZE_HELPER(x, y) #x", "#y +#define _SIZE_HELPER(x) __SIZE_HELPER(x) +PRINT_CONFIG_MSG("OPTICFLOW_DEVICE_SIZE = " _SIZE_HELPER(OPTICFLOW_DEVICE_SIZE)); + +/* The video device buffers (the amount of V4L2 buffers) */ +#ifndef OPTICFLOW_DEVICE_BUFFERS +#define OPTICFLOW_DEVICE_BUFFERS 15 +#endif +PRINT_CONFIG_VAR(VIEWVIDEO_DEVICE_BUFFERS); + /* The main opticflow variables */ static struct opticflow_t opticflow; //< Opticflow calculations static struct opticflow_result_t opticflow_result; //< The opticflow result @@ -75,9 +95,19 @@ void opticflow_module_init(void) opticflow_calc_init(&opticflow, 320, 240); opticflow_got_result = FALSE; +#ifdef OPTICFLOW_SUBDEV + PRINT_CONFIG_MSG("[opticflow_module] Configuring a subdevice!"); + PRINT_CONFIG_VAR(OPTICFLOW_SUBDEV); + + /* Initialize the V4L2 subdevice (TODO: fix hardcoded path, which and code) */ + if (!v4l2_init_subdev(STRINGIFY(OPTICFLOW_SUBDEV), 0, 1, V4L2_MBUS_FMT_UYVY8_2X8, OPTICFLOW_DEVICE_SIZE)) { + printf("[opticflow_module] Could not initialize the %s subdevice.\n", STRINGIFY(OPTICFLOW_SUBDEV)); + return; + } +#endif + /* Try to initialize the video device */ - //v4l2_init_subdev("/dev/v4l-subdev0", 0, 1, V4L2_MBUS_FMT_UYVY8_2X8, 320, 240); - opticflow_dev = v4l2_init("/dev/video2", 320, 240, 60); //TODO: Fix defines + opticflow_dev = v4l2_init(STRINGIFY(OPTICFLOW_DEVICE), OPTICFLOW_DEVICE_SIZE, OPTICFLOW_DEVICE_BUFFERS); if (opticflow_dev == NULL) { printf("[opticflow_module] Could not initialize the video device\n"); } From 5f49f54faa72f0a7441cfc6bca18e339031b93eb Mon Sep 17 00:00:00 2001 From: Freek van Tienen Date: Tue, 31 Mar 2015 10:37:50 +0200 Subject: [PATCH 14/17] [vision] Add more settings to optical flow --- conf/modules/cv_opticflow.xml | 43 ++++++++-- .../opticflow/opticflow_calculator.c | 86 +++++++++++++++---- .../opticflow/opticflow_calculator.h | 10 +++ .../computer_vision/opticflow_module.c | 2 +- .../computer_vision/opticflow_module.h | 3 + 5 files changed, 117 insertions(+), 27 deletions(-) diff --git a/conf/modules/cv_opticflow.xml b/conf/modules/cv_opticflow.xml index 5113ee0d89b..a2cd0b37ae5 100644 --- a/conf/modules/cv_opticflow.xml +++ b/conf/modules/cv_opticflow.xml @@ -13,18 +13,31 @@
- - - - - - + + + + + + +
+ +
+ + + + + + + + + +
- + - - + + @@ -32,6 +45,18 @@ + + + + + + + + + + + + diff --git a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c index e4ff24095d3..8e037fcee8d 100644 --- a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c +++ b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c @@ -46,6 +46,47 @@ #define Fx_ARdrone 343.1211 #define Fy_ARdrone 348.5053 +/* Set the default values */ +#ifndef OPTICFLOW_MAX_TRACK_CORNERS +#define OPTICFLOW_MAX_TRACK_CORNERS 25 +#endif +PRINT_CONFIG_VAR(OPTICFLOW_MAX_TRACK_CORNERS); + +#ifndef OPTICFLOW_WINDOW_SIZE +#define OPTICFLOW_WINDOW_SIZE 10 +#endif +PRINT_CONFIG_VAR(OPTICFLOW_WINDOW_SIZE); + +#ifndef OPTICFLOW_SUBPIXEL_FACTOR +#define OPTICFLOW_SUBPIXEL_FACTOR 10 +#endif +PRINT_CONFIG_VAR(OPTICFLOW_SUBPIXEL_FACTOR); + +#ifndef OPTICFLOW_MAX_ITERATIONS +#define OPTICFLOW_MAX_ITERATIONS 10 +#endif +PRINT_CONFIG_VAR(OPTICFLOW_MAX_ITERATIONS); + +#ifndef OPTICFLOW_THRESHOLD_VEC +#define OPTICFLOW_THRESHOLD_VEC 2 +#endif +PRINT_CONFIG_VAR(OPTICFLOW_THRESHOLD_VEC); + +#ifndef OPTICFLOW_FAST9_ADAPTIVE +#define OPTICFLOW_FAST9_ADAPTIVE TRUE +#endif +PRINT_CONFIG_VAR(OPTICFLOW_FAST9_ADAPTIVE); + +#ifndef OPTICFLOW_FAST9_THRESHOLD +#define OPTICFLOW_FAST9_THRESHOLD 20 +#endif +PRINT_CONFIG_VAR(OPTICFLOW_FAST9_THRESHOLD); + +#ifndef OPTICFLOW_FAST9_MIN_DISTANCE +#define OPTICFLOW_FAST9_MIN_DISTANCE 10 +#endif +PRINT_CONFIG_VAR(OPTICFLOW_FAST9_MIN_DISTANCE); + /* Functions only used here */ static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime); static int cmp_flow(const void *a, const void *b); @@ -66,6 +107,17 @@ void opticflow_calc_init(struct opticflow_t *opticflow, uint16_t w, uint16_t h) opticflow->got_first_img = FALSE; opticflow->prev_phi = 0.0; opticflow->prev_theta = 0.0; + + /* Set the default values */ + opticflow->max_track_corners = OPTICFLOW_MAX_TRACK_CORNERS; + opticflow->window_size = OPTICFLOW_WINDOW_SIZE; + opticflow->subpixel_factor = OPTICFLOW_SUBPIXEL_FACTOR; + opticflow->max_iterations = OPTICFLOW_MAX_ITERATIONS; + opticflow->threshold_vec = OPTICFLOW_THRESHOLD_VEC; + + opticflow->fast9_adaptive = OPTICFLOW_FAST9_ADAPTIVE; + opticflow->fast9_threshold = OPTICFLOW_FAST9_THRESHOLD; + opticflow->fast9_min_distance = OPTICFLOW_FAST9_MIN_DISTANCE; } /** @@ -95,14 +147,18 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ // ************************************************************************************* // FAST corner detection (TODO: non fixed threashold) - static uint8_t threshold = 20; - struct point_t *corners = fast9_detect(img, threshold, 10, 20, 20, &result->corner_cnt); + struct point_t *corners = fast9_detect(img, opticflow->fast9_threshold, opticflow->fast9_min_distance, + 20, 20, &result->corner_cnt); // Adaptive threshold - if(result->corner_cnt < 40 && threshold > 5) - threshold--; - else if(result->corner_cnt > 50 && threshold < 60) - threshold++; + if(opticflow->fast9_adaptive) { + + // Decrease and increase the threshold based on previous values + if(result->corner_cnt < 40 && opticflow->fast9_threshold > 5) + opticflow->fast9_threshold--; + else if(result->corner_cnt > 50 && opticflow->fast9_threshold < 60) + opticflow->fast9_threshold++; + } #if OPTICFLOW_DEBUG && OPTICFLOW_SHOW_CORNERS image_show_points(img, corners, result->corner_cnt); @@ -119,18 +175,14 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ // Corner Tracking // ************************************************************************************* -#define MAX_TRACK_CORNERS 25 -#define HALF_WINDOW_SIZE 5 -#define SUBPIXEL_FACTOR 10 -#define MAX_ITERATIONS 10 -#define THRESHOLD_VEC 2 // Execute a Lucas Kanade optical flow result->tracked_cnt = result->corner_cnt; struct flow_t *vectors = opticFlowLK(&opticflow->img_gray, &opticflow->prev_img_gray, corners, &result->tracked_cnt, - HALF_WINDOW_SIZE, SUBPIXEL_FACTOR, MAX_ITERATIONS, THRESHOLD_VEC, MAX_TRACK_CORNERS); + opticflow->window_size/2, opticflow->subpixel_factor, opticflow->max_iterations, + opticflow->threshold_vec, opticflow->max_track_corners); #if OPTICFLOW_DEBUG && OPTICFLOW_SHOW_FLOW - image_show_flow(img, vectors, result->tracked_cnt, SUBPIXEL_FACTOR); + image_show_flow(img, vectors, result->tracked_cnt, opticflow->subpixel_factor); #endif // Get the median flow @@ -158,14 +210,14 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ // Flow Derotation float diff_flow_x = (state->phi - opticflow->prev_phi) * img->w / FOV_W; float diff_flow_y = (state->theta - opticflow->prev_theta) * img->h / FOV_H; - result->flow_der_x = result->flow_x - diff_flow_x * SUBPIXEL_FACTOR; - result->flow_der_y = result->flow_y - diff_flow_y * SUBPIXEL_FACTOR; + result->flow_der_x = result->flow_x - diff_flow_x * opticflow->subpixel_factor; + result->flow_der_y = result->flow_y - diff_flow_y * opticflow->subpixel_factor; opticflow->prev_phi = state->phi; opticflow->prev_theta = state->theta; // Velocity calculation - result->vel_x = -result->flow_der_x * result->fps / SUBPIXEL_FACTOR * img->w / Fx_ARdrone; - result->vel_y = result->flow_der_y * result->fps / SUBPIXEL_FACTOR * img->h / Fy_ARdrone; + result->vel_x = -result->flow_der_x * result->fps / opticflow->subpixel_factor * img->w / Fx_ARdrone; + result->vel_y = result->flow_der_y * result->fps / opticflow->subpixel_factor * img->h / Fy_ARdrone; // ************************************************************************************* // Next Loop Preparation diff --git a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h index f010fc8284e..f30c0dc566a 100644 --- a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h +++ b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h @@ -42,6 +42,16 @@ struct opticflow_t struct image_t img_gray; //< Current gray image frame struct image_t prev_img_gray; //< Previous gray image frame struct timeval prev_timestamp; //< Timestamp of the previous frame, used for FPS calculation + + uint8_t max_track_corners; //< Maximum amount of corners Lucas Kanade should track + uint16_t window_size; //< Window size of the Lucas Kanade calculation (needs to be even) + uint8_t subpixel_factor; //< The amount of subpixels per pixel + uint8_t max_iterations; //< The maximum amount of iterations the Lucas Kanade algorithm should do + uint8_t threshold_vec; //< The threshold in x, y subpixels which the algorithm should stop + + bool_t fast9_adaptive; //< Whether the FAST9 threshold should be adaptive + uint8_t fast9_threshold; //< FAST9 corner detection threshold + uint16_t fast9_min_distance; //< Minimum distance in pixels between corners }; diff --git a/sw/airborne/modules/computer_vision/opticflow_module.c b/sw/airborne/modules/computer_vision/opticflow_module.c index 92312b985f4..b1071a51676 100644 --- a/sw/airborne/modules/computer_vision/opticflow_module.c +++ b/sw/airborne/modules/computer_vision/opticflow_module.c @@ -64,7 +64,7 @@ PRINT_CONFIG_MSG("OPTICFLOW_DEVICE_SIZE = " _SIZE_HELPER(OPTICFLOW_DEVICE_SIZE)) PRINT_CONFIG_VAR(VIEWVIDEO_DEVICE_BUFFERS); /* The main opticflow variables */ -static struct opticflow_t opticflow; //< Opticflow calculations +struct opticflow_t opticflow; //< Opticflow calculations static struct opticflow_result_t opticflow_result; //< The opticflow result static struct opticflow_state_t opticflow_state; //< State of the drone to communicate with the opticflow static struct v4l2_device *opticflow_dev; //< The opticflow camera V4L2 device diff --git a/sw/airborne/modules/computer_vision/opticflow_module.h b/sw/airborne/modules/computer_vision/opticflow_module.h index 5e583ce5a68..eb158cc83c2 100644 --- a/sw/airborne/modules/computer_vision/opticflow_module.h +++ b/sw/airborne/modules/computer_vision/opticflow_module.h @@ -32,6 +32,9 @@ #include "opticflow/opticflow_calculator.h" #include "opticflow/stabilization_opticflow.h" +// Needed for settings +extern struct opticflow_t opticflow; + // Module functions extern void opticflow_module_init(void); extern void opticflow_module_run(void); From abc2859fbe74bd59f35ca020d5b519d01b925a0d Mon Sep 17 00:00:00 2001 From: Freek van Tienen Date: Tue, 31 Mar 2015 11:35:44 +0200 Subject: [PATCH 15/17] [vision] Added more documentation about optical flow --- conf/modules/cv_opticflow.xml | 25 +++++++++++--- .../opticflow/opticflow_calculator.c | 33 ++++++++++++++----- 2 files changed, 45 insertions(+), 13 deletions(-) diff --git a/conf/modules/cv_opticflow.xml b/conf/modules/cv_opticflow.xml index a2cd0b37ae5..4eb1c8264ec 100644 --- a/conf/modules/cv_opticflow.xml +++ b/conf/modules/cv_opticflow.xml @@ -3,15 +3,14 @@ - Compute Optic Flow from Ardrone2 Bottom Camera - - Computes Pitch- and rollrate corrected optic flow from downward looking - ARDrone2 camera looking at a textured floor. + Hovers the drone based on optical flow made for Linux video Devices. + Computes Pitch- and roll attide from downward looking camera looking at a textured floor. - Sonar is required. - Controller can hold position +
@@ -21,14 +20,30 @@
+
+ + + + + + + + + + + + + + + @@ -37,6 +52,7 @@ + @@ -46,6 +62,7 @@ + diff --git a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c index 8e037fcee8d..75974178aad 100644 --- a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c +++ b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c @@ -40,11 +40,26 @@ #include "lib/vision/lucas_kanade.h" #include "lib/vision/fast_rosten.h" -// ARDrone Vertical Camera Parameters -#define FOV_H 0.67020643276 -#define FOV_W 0.89360857702 -#define Fx_ARdrone 343.1211 -#define Fy_ARdrone 348.5053 +// Camera parameters (defaults are from an ARDrone 2) +#ifndef OPTICFLOW_FOV_W +#define OPTICFLOW_FOV_W 0.89360857702 +#endif +PRINT_CONFIG_VAR(OPTICFLOW_FOV_W); + +#ifndef OPTICFLOW_FOV_H +#define OPTICFLOW_FOV_H 0.67020643276 +#endif +PRINT_CONFIG_VAR(OPTICFLOW_FOV_H); + +#ifndef OPTICFLOW_FX +#define OPTICFLOW_FX 343.1211 +#endif +PRINT_CONFIG_VAR(OPTICFLOW_FX); + +#ifndef OPTICFLOW_FY +#define OPTICFLOW_FY 348.5053 +#endif +PRINT_CONFIG_VAR(OPTICFLOW_FY); /* Set the default values */ #ifndef OPTICFLOW_MAX_TRACK_CORNERS @@ -208,16 +223,16 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ } // Flow Derotation - float diff_flow_x = (state->phi - opticflow->prev_phi) * img->w / FOV_W; - float diff_flow_y = (state->theta - opticflow->prev_theta) * img->h / FOV_H; + float diff_flow_x = (state->phi - opticflow->prev_phi) * img->w / OPTICFLOW_FOV_W; + float diff_flow_y = (state->theta - opticflow->prev_theta) * img->h / OPTICFLOW_FOV_H; result->flow_der_x = result->flow_x - diff_flow_x * opticflow->subpixel_factor; result->flow_der_y = result->flow_y - diff_flow_y * opticflow->subpixel_factor; opticflow->prev_phi = state->phi; opticflow->prev_theta = state->theta; // Velocity calculation - result->vel_x = -result->flow_der_x * result->fps / opticflow->subpixel_factor * img->w / Fx_ARdrone; - result->vel_y = result->flow_der_y * result->fps / opticflow->subpixel_factor * img->h / Fy_ARdrone; + result->vel_x = -result->flow_der_x * result->fps / opticflow->subpixel_factor * img->w / OPTICFLOW_FX; + result->vel_y = result->flow_der_y * result->fps / opticflow->subpixel_factor * img->h / OPTICFLOW_FY; // ************************************************************************************* // Next Loop Preparation From 0f7dc6b093ca949c79526d48204d25b26bd2e1d4 Mon Sep 17 00:00:00 2001 From: Freek van Tienen Date: Mon, 6 Apr 2015 23:03:10 +0200 Subject: [PATCH 16/17] [vision] Small fixes and added periodic telemetry --- conf/telemetry/default_ardrone.xml | 1 + sw/airborne/boards/bebop/actuators.c | 33 ------------------- .../computer_vision/lib/vision/image.c | 4 +-- .../opticflow/stabilization_opticflow.c | 7 +--- .../computer_vision/opticflow_module.c | 30 +++++++++++++++-- 5 files changed, 31 insertions(+), 44 deletions(-) diff --git a/conf/telemetry/default_ardrone.xml b/conf/telemetry/default_ardrone.xml index ab3bc70dc37..7f96c175358 100644 --- a/conf/telemetry/default_ardrone.xml +++ b/conf/telemetry/default_ardrone.xml @@ -21,6 +21,7 @@ + diff --git a/sw/airborne/boards/bebop/actuators.c b/sw/airborne/boards/bebop/actuators.c index c647bcb4f24..6e725a5e628 100644 --- a/sw/airborne/boards/bebop/actuators.c +++ b/sw/airborne/boards/bebop/actuators.c @@ -144,36 +144,3 @@ static uint8_t actuators_bebop_checksum(uint8_t *bytes, uint8_t size) return checksum; } - -/*static void actuators_bebop_saturate(void) { - // Find the lowest and highest commands - int32_t max_cmd = 9000; // Should be gotton from airframe file per motor - int32_t min_cmd = 3000; // Should be gotton from airframe file per motor - for(int i = 0; i < 4; i++) { - if(actuators_bebop.rpm_ref[i] > max_cmd) - max_cmd = actuators_bebop.rpm_ref[i]; - if(actuators_bebop.rpm_ref[i] < min_cmd) - min_cmd = actuators_bebop.rpm_ref[i]; - } - - // Find the maximum motor command (Saturated motor or either MOTOR_MIXING_MAX_MOTOR) - int32_t max_motor = 9000; - for(int i = 0; i < 4; i++) { - if(actuators_bebop.rpm_obs[i] & (1<<15) && max_cmd > (actuators_bebop.rpm_obs[i] & ~(1<<15))) - max_motor = actuators_bebop.rpm_obs[i] & ~(1<<15); - } - - // Saturate the offsets - if(max_cmd > max_motor) { - int32_t saturation_offset = 9000 - max_cmd; - for(int i = 0; i < 4; i++) - actuators_bebop.rpm_ref[i] += saturation_offset; - motor_mixing.nb_saturation++; - } - else if(min_cmd < 3000) { - int32_t saturation_offset = 3000 - min_cmd; - for(int i = 0; i < 4; i++) - actuators_bebop.rpm_ref[i] += saturation_offset; - motor_mixing.nb_saturation++; - } -}*/ diff --git a/sw/airborne/modules/computer_vision/lib/vision/image.c b/sw/airborne/modules/computer_vision/lib/vision/image.c index 969dd9a9b88..752afcf2f2c 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/image.c +++ b/sw/airborne/modules/computer_vision/lib/vision/image.c @@ -476,8 +476,8 @@ void image_draw_line(struct image_t *img, struct point_t *from, struct point_t * uint16_t starty = from->y; /* compute the distances in both directions */ - int32_t delta_x = from->x - to->x; - int32_t delta_y = from->y - to->y; + int32_t delta_x = to->x - from->x; + int32_t delta_y = to->y - from->y; /* Compute the direction of the increment, an increment of 0 means either a horizontal or vertical diff --git a/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c b/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c index f4742a00c89..cff2aba7842 100644 --- a/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c +++ b/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c @@ -150,14 +150,9 @@ void stabilization_opticflow_update(struct opticflow_result_t *result) opticflow_stab.cmd.phi = opticflow_stab.phi_pgain * err_vx / 100 + opticflow_stab.phi_igain * opticflow_stab.err_vx_int; opticflow_stab.cmd.theta = -(opticflow_stab.theta_pgain * err_vy / 100 - + opticflow_stab.err_vy_int * opticflow_stab.err_vy_int); + + opticflow_stab.theta_igain * opticflow_stab.err_vy_int); /* Bound the roll and pitch commands */ BoundAbs(opticflow_stab.cmd.phi, CMD_OF_SAT); BoundAbs(opticflow_stab.cmd.theta, CMD_OF_SAT); - - DOWNLINK_SEND_OPTIC_FLOW_EST(DefaultChannel, DefaultDevice, - &result->fps, &result->corner_cnt, &result->tracked_cnt, &result->flow_x, &result->flow_y, - &result->flow_der_x, &result->flow_der_y, &result->vel_x, &result->vel_y, - &opticflow_stab.cmd.phi, &opticflow_stab.cmd.theta); } diff --git a/sw/airborne/modules/computer_vision/opticflow_module.c b/sw/airborne/modules/computer_vision/opticflow_module.c index b1071a51676..654e5416737 100644 --- a/sw/airborne/modules/computer_vision/opticflow_module.c +++ b/sw/airborne/modules/computer_vision/opticflow_module.c @@ -77,6 +77,26 @@ static pthread_mutex_t opticflow_mutex; //< Mutex lock fo thread saf static void *opticflow_module_calc(void *data); //< The main optical flow calculation thread static void opticflow_agl_cb(uint8_t sender_id, float distance); //< Callback function of the ground altitude +#if PERIODIC_TELEMETRY +#include "subsystems/datalink/telemetry.h" +/** + * Send optical flow telemetry information + * @param[in] *trans The transport structure to send the information over + * @param[in] *dev The link to send the data over + */ +static void opticflow_telem_send(struct transport_tx *trans, struct link_device *dev) +{ + pthread_mutex_lock(&opticflow_mutex); + pprz_msg_send_OPTIC_FLOW_EST(trans, dev, AC_ID, + &opticflow_result.fps, &opticflow_result.corner_cnt, + &opticflow_result.tracked_cnt, &opticflow_result.flow_x, + &opticflow_result.flow_y, &opticflow_result.flow_der_x, + &opticflow_result.flow_der_y, &opticflow_result.vel_x, + &opticflow_result.vel_y, + &opticflow_stab.cmd.phi, &opticflow_stab.cmd.theta); + pthread_mutex_unlock(&opticflow_mutex); +} +#endif /** * Initialize the optical flow module for the bottom camera @@ -111,6 +131,10 @@ void opticflow_module_init(void) if (opticflow_dev == NULL) { printf("[opticflow_module] Could not initialize the video device\n"); } + +#if PERIODIC_TELEMETRY + register_periodic_telemetry(DefaultPeriodic, "OPTIC_FLOW_EST", opticflow_telem_send); +#endif } /** @@ -174,7 +198,7 @@ static void *opticflow_module_calc(void *data __attribute__((unused))) { return 0; } -#ifdef OPTICFLOW_DEBUG +#if OPTICFLOW_DEBUG // Create a new JPEG image struct image_t img_jpeg; image_create(&img_jpeg, opticflow_dev->w, opticflow_dev->h, IMAGE_JPEG); @@ -202,7 +226,7 @@ static void *opticflow_module_calc(void *data __attribute__((unused))) { opticflow_got_result = TRUE; pthread_mutex_unlock(&opticflow_mutex); -#ifdef OPTICFLOW_DEBUG +#if OPTICFLOW_DEBUG jpeg_encode_image(&img, &img_jpeg, 70, FALSE); rtp_frame_send( &VIEWVIDEO_DEV, // UDP device @@ -218,7 +242,7 @@ static void *opticflow_module_calc(void *data __attribute__((unused))) { v4l2_image_free(opticflow_dev, &img); } -#ifdef OPTICFLOW_DEBUG +#if OPTICFLOW_DEBUG image_free(&img_jpeg); #endif } From dea8152f26e44fa13b2e9e1b2f1212cf37b6160b Mon Sep 17 00:00:00 2001 From: Freek van Tienen Date: Mon, 6 Apr 2015 23:18:27 +0200 Subject: [PATCH 17/17] [vision] Fix code style formatting --- sw/airborne/arch/linux/udp_socket.c | 5 +- sw/airborne/boards/ardrone/navdata.c | 3 +- .../computer_vision/lib/encoding/jpeg.c | 5 +- .../computer_vision/lib/encoding/rtp.c | 2 +- .../computer_vision/lib/encoding/rtp.h | 2 +- .../modules/computer_vision/lib/v4l/v4l2.c | 8 +- .../computer_vision/lib/vision/fast_rosten.c | 454 +++++++++--------- .../computer_vision/lib/vision/image.c | 160 +++--- .../computer_vision/lib/vision/lucas_kanade.c | 29 +- .../computer_vision/lib/vision/lucas_kanade.h | 2 +- .../opticflow/opticflow_calculator.c | 41 +- .../opticflow/opticflow_calculator.h | 3 +- .../opticflow/stabilization_opticflow.c | 4 +- .../computer_vision/opticflow_module.c | 23 +- .../modules/computer_vision/viewvideo.c | 14 +- 15 files changed, 379 insertions(+), 376 deletions(-) diff --git a/sw/airborne/arch/linux/udp_socket.c b/sw/airborne/arch/linux/udp_socket.c index 64be511c2b0..014a7d6febf 100644 --- a/sw/airborne/arch/linux/udp_socket.c +++ b/sw/airborne/arch/linux/udp_socket.c @@ -67,8 +67,7 @@ int udp_socket_create(struct UdpSocket *sock, char *host, int port_out, int port if (hp->h_addrtype == AF_INET && hp->h_length == 4) { /* simply use first address */ memcpy(&host_ip.s_addr, hp->h_addr_list[0], hp->h_length); - } - else { + } else { return -1; } } @@ -120,7 +119,7 @@ int udp_socket_send(struct UdpSocket *sock, uint8_t *buffer, uint16_t len) } ssize_t bytes_sent = sendto(sock->sockfd, buffer, len, 0, - (struct sockaddr *)&sock->addr_out, sizeof(sock->addr_out)); + (struct sockaddr *)&sock->addr_out, sizeof(sock->addr_out)); if (bytes_sent != len) { TRACE(TRACE_ERROR, "error sending to sock %d (%d)\n", (int)bytes_sent, strerror(errno)); } diff --git a/sw/airborne/boards/ardrone/navdata.c b/sw/airborne/boards/ardrone/navdata.c index a453e42fada..1dd51271adb 100644 --- a/sw/airborne/boards/ardrone/navdata.c +++ b/sw/airborne/boards/ardrone/navdata.c @@ -387,8 +387,7 @@ void navdata_update() navdata.imu_available = TRUE; navdata.packetsRead++; - } - else { + } else { // no new packet available, still unlock mutex again pthread_mutex_unlock(&navdata_mutex); } diff --git a/sw/airborne/modules/computer_vision/lib/encoding/jpeg.c b/sw/airborne/modules/computer_vision/lib/encoding/jpeg.c index 469f7c6977d..6ec9e3862e9 100644 --- a/sw/airborne/modules/computer_vision/lib/encoding/jpeg.c +++ b/sw/airborne/modules/computer_vision/lib/encoding/jpeg.c @@ -429,8 +429,9 @@ void jpeg_encode_image(struct image_t *in, struct image_t *out, uint32_t quality uint8_t *input_ptr = in->buf; uint32_t image_format = FOUR_ZERO_ZERO; - if(in->type == IMAGE_YUV422) + if (in->type == IMAGE_YUV422) { image_format = FOUR_TWO_TWO; + } JPEG_ENCODER_STRUCTURE JpegStruct; JPEG_ENCODER_STRUCTURE *jpeg_encoder_structure = &JpegStruct; @@ -479,7 +480,7 @@ void jpeg_encode_image(struct image_t *in, struct image_t *out, uint32_t quality output_ptr = jpeg_close_bitstream(output_ptr); out->w = in->w; out->h = in->h; - out->buf_size = output_ptr - (uint8_t*)out->buf; + out->buf_size = output_ptr - (uint8_t *)out->buf; } static uint8_t *jpeg_encodeMCU(JPEG_ENCODER_STRUCTURE *jpeg_encoder_structure, uint32_t image_format, uint8_t *output_ptr) diff --git a/sw/airborne/modules/computer_vision/lib/encoding/rtp.c b/sw/airborne/modules/computer_vision/lib/encoding/rtp.c index 5be8bcd7136..9b8529afe25 100644 --- a/sw/airborne/modules/computer_vision/lib/encoding/rtp.c +++ b/sw/airborne/modules/computer_vision/lib/encoding/rtp.c @@ -123,7 +123,7 @@ void rtp_frame_send(struct udp_periph *udp, struct image_t *img, uint8_t format_ } rtp_packet_send(udp, jpeg_ptr, len, packetcounter, timecounter, offset, lastpacket, img->w, img->h, format_code, - quality_code, has_dri_header); + quality_code, has_dri_header); jpeg_size -= len; jpeg_ptr += len; diff --git a/sw/airborne/modules/computer_vision/lib/encoding/rtp.h b/sw/airborne/modules/computer_vision/lib/encoding/rtp.h index 69f7f714c50..f34fbc15268 100644 --- a/sw/airborne/modules/computer_vision/lib/encoding/rtp.h +++ b/sw/airborne/modules/computer_vision/lib/encoding/rtp.h @@ -33,7 +33,7 @@ #include "mcu_periph/udp.h" void rtp_frame_send(struct udp_periph *udp, struct image_t *img, uint8_t format_code, uint8_t quality_code, - uint8_t has_dri_header, uint32_t delta_t); + uint8_t has_dri_header, uint32_t delta_t); void rtp_frame_test(struct udp_periph *udp); #endif /* _CV_ENCODING_RTP_H */ diff --git a/sw/airborne/modules/computer_vision/lib/v4l/v4l2.c b/sw/airborne/modules/computer_vision/lib/v4l/v4l2.c index 5b28588f57c..ecd6d498dd5 100644 --- a/sw/airborne/modules/computer_vision/lib/v4l/v4l2.c +++ b/sw/airborne/modules/computer_vision/lib/v4l/v4l2.c @@ -289,7 +289,8 @@ struct v4l2_device *v4l2_init(char *device_name, uint16_t width, uint16_t height * @param[in] *dev The V4L2 video device we want to get an image from * @param[out] *img The image that we got from the video device */ -void v4l2_image_get(struct v4l2_device *dev, struct image_t *img) { +void v4l2_image_get(struct v4l2_device *dev, struct image_t *img) +{ uint16_t img_idx = V4L2_IMG_NONE; // Continu to wait for an image @@ -326,7 +327,8 @@ void v4l2_image_get(struct v4l2_device *dev, struct image_t *img) { * @param[out] *img The image that we got from the video device * @return Whether we got an image or not */ -bool_t v4l2_image_get_nonblock(struct v4l2_device *dev, struct image_t *img) { +bool_t v4l2_image_get_nonblock(struct v4l2_device *dev, struct image_t *img) +{ uint16_t img_idx = V4L2_IMG_NONE; // Try to get the current image @@ -341,7 +343,7 @@ bool_t v4l2_image_get_nonblock(struct v4l2_device *dev, struct image_t *img) { if (img_idx == V4L2_IMG_NONE) { return FALSE; } else { - // Set the image + // Set the image img->type = IMAGE_YUV422; img->w = dev->w; img->h = dev->h; diff --git a/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c b/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c index 1218b5932b9..d457d671e34 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c +++ b/sw/airborne/modules/computer_vision/lib/vision/fast_rosten.c @@ -46,8 +46,7 @@ static void fast_make_offsets(int32_t *pixel, uint16_t row_stride, uint8_t pixel * @param[out] *num_corner The amount of corners found * @return The corners found */ -struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t min_dist, uint16_t x_padding, uint16_t y_padding, uint16_t *num_corners) -{ +struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t min_dist, uint16_t x_padding, uint16_t y_padding, uint16_t *num_corners) { uint32_t corner_cnt = 0; uint16_t rsize = 512; int pixel[16]; @@ -56,8 +55,9 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi // Set the pixel size uint8_t pixel_size = 1; - if(img->type == IMAGE_YUV422) + if (img->type == IMAGE_YUV422) { pixel_size = 2; + } // Calculate the pixel offsets fast_make_offsets(pixel, img->w, pixel_size); @@ -66,27 +66,27 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi for (y = 3 + y_padding; y < img->h - 3 - y_padding; y++) for (x = 3 + x_padding; x < img->w - 3 - x_padding; x++) { // First check if we aren't in range vertical (TODO: fix less intensive way) - if(min_dist > 0) { + if (min_dist > 0) { bool_t need_skip = FALSE; // Go trough all the previous corners - for(i = 0; i < corner_cnt; i++) { - if(x-min_dist < ret_corners[i].x && ret_corners[i].x < x+min_dist - && y-min_dist < ret_corners[i].y && ret_corners[i].y < y+min_dist) { + for (i = 0; i < corner_cnt; i++) { + if (x - min_dist < ret_corners[i].x && ret_corners[i].x < x + min_dist + && y - min_dist < ret_corners[i].y && ret_corners[i].y < y + min_dist) { need_skip = TRUE; break; } } // Skip the box if we found a pixel nearby - if(need_skip) { + if (need_skip) { x += min_dist; continue; } } // Calculate the threshold values - const uint8_t *p = ((uint8_t *)img->buf) + y * img->w * pixel_size + x * pixel_size + pixel_size/2; + const uint8_t *p = ((uint8_t *)img->buf) + y * img->w * pixel_size + x * pixel_size + pixel_size / 2; int16_t cb = *p + threshold; int16_t c_b = *p - threshold; @@ -100,16 +100,16 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) if (p[pixel[8]] > cb) - {} + {} else if (p[pixel[15]] > cb) - {} + {} else { continue; } else if (p[pixel[7]] < c_b) if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else { continue; } @@ -121,7 +121,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[12]] < c_b) if (p[pixel[13]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else { continue; } @@ -148,7 +148,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi } else if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else { continue; } @@ -159,7 +159,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[15]] > cb) if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) - {} + {} else { continue; } @@ -171,7 +171,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[11]] < c_b) if (p[pixel[12]] < c_b) if (p[pixel[14]] < c_b) - {} + {} else { continue; } @@ -204,7 +204,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[12]] < c_b) if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) - {} + {} else { continue; } @@ -232,7 +232,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi else if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else { continue; } @@ -248,7 +248,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[12]] < c_b) if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else { continue; } @@ -281,14 +281,14 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[12]] > cb) if (p[pixel[13]] > cb) if (p[pixel[15]] > cb) - {} + {} else if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) if (p[pixel[8]] > cb) if (p[pixel[9]] > cb) if (p[pixel[10]] > cb) if (p[pixel[11]] > cb) - {} + {} else { continue; } @@ -318,7 +318,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[10]] < c_b) if (p[pixel[11]] < c_b) if (p[pixel[13]] < c_b) - {} + {} else { continue; } @@ -352,9 +352,9 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[12]] < c_b) if (p[pixel[13]] < c_b) if (p[pixel[6]] < c_b) - {} + {} else if (p[pixel[15]] < c_b) - {} + {} else { continue; } @@ -387,7 +387,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[11]] < c_b) if (p[pixel[12]] < c_b) if (p[pixel[13]] < c_b) - {} + {} else { continue; } @@ -416,14 +416,14 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) if (p[pixel[8]] > cb) if (p[pixel[9]] > cb) if (p[pixel[10]] > cb) if (p[pixel[11]] > cb) - {} + {} else { continue; } @@ -457,9 +457,9 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) if (p[pixel[6]] < c_b) - {} + {} else if (p[pixel[15]] < c_b) - {} + {} else { continue; } @@ -493,13 +493,13 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[12]] > cb) if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) if (p[pixel[8]] > cb) if (p[pixel[9]] > cb) if (p[pixel[10]] > cb) - {} + {} else { continue; } @@ -521,7 +521,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[8]] > cb) if (p[pixel[9]] > cb) if (p[pixel[10]] > cb) - {} + {} else { continue; } @@ -551,7 +551,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[9]] < c_b) if (p[pixel[10]] < c_b) if (p[pixel[12]] < c_b) - {} + {} else { continue; } @@ -585,15 +585,15 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[12]] < c_b) if (p[pixel[6]] < c_b) if (p[pixel[5]] < c_b) - {} + {} else if (p[pixel[14]] < c_b) - {} + {} else { continue; } else if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else { continue; } @@ -626,7 +626,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[10]] < c_b) if (p[pixel[11]] < c_b) if (p[pixel[12]] < c_b) - {} + {} else { continue; } @@ -656,13 +656,13 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) if (p[pixel[8]] > cb) if (p[pixel[9]] > cb) if (p[pixel[10]] > cb) - {} + {} else { continue; } @@ -684,7 +684,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[8]] > cb) if (p[pixel[9]] > cb) if (p[pixel[10]] > cb) - {} + {} else { continue; } @@ -718,15 +718,15 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] < c_b) if (p[pixel[6]] < c_b) if (p[pixel[5]] < c_b) - {} + {} else if (p[pixel[14]] < c_b) - {} + {} else { continue; } else if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else { continue; } @@ -761,12 +761,12 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) if (p[pixel[8]] > cb) if (p[pixel[9]] > cb) - {} + {} else { continue; } @@ -784,7 +784,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[7]] > cb) if (p[pixel[8]] > cb) if (p[pixel[9]] > cb) - {} + {} else { continue; } @@ -806,7 +806,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[7]] > cb) if (p[pixel[8]] > cb) if (p[pixel[9]] > cb) - {} + {} else { continue; } @@ -839,10 +839,10 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[6]] < c_b) if (p[pixel[5]] < c_b) if (p[pixel[4]] < c_b) - {} + {} else if (p[pixel[12]] < c_b) if (p[pixel[13]] < c_b) - {} + {} else { continue; } @@ -852,7 +852,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi else if (p[pixel[12]] < c_b) if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) - {} + {} else { continue; } @@ -866,7 +866,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else { continue; } @@ -900,12 +900,12 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) if (p[pixel[8]] > cb) if (p[pixel[9]] > cb) - {} + {} else { continue; } @@ -923,7 +923,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[7]] > cb) if (p[pixel[8]] > cb) if (p[pixel[9]] > cb) - {} + {} else { continue; } @@ -945,7 +945,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[7]] > cb) if (p[pixel[8]] > cb) if (p[pixel[9]] > cb) - {} + {} else { continue; } @@ -979,15 +979,15 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[6]] < c_b) if (p[pixel[5]] < c_b) if (p[pixel[4]] < c_b) - {} + {} else if (p[pixel[13]] < c_b) - {} + {} else { continue; } else if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) - {} + {} else { continue; } @@ -997,7 +997,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi else if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else { continue; } @@ -1033,11 +1033,11 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) if (p[pixel[8]] > cb) - {} + {} else { continue; } @@ -1051,7 +1051,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) if (p[pixel[8]] > cb) - {} + {} else { continue; } @@ -1069,7 +1069,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) if (p[pixel[8]] > cb) - {} + {} else { continue; } @@ -1091,7 +1091,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) if (p[pixel[8]] > cb) - {} + {} else { continue; } @@ -1124,10 +1124,10 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[5]] < c_b) if (p[pixel[4]] < c_b) if (p[pixel[3]] < c_b) - {} + {} else if (p[pixel[11]] < c_b) if (p[pixel[12]] < c_b) - {} + {} else { continue; } @@ -1137,7 +1137,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi else if (p[pixel[11]] < c_b) if (p[pixel[12]] < c_b) if (p[pixel[13]] < c_b) - {} + {} else { continue; } @@ -1151,7 +1151,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[12]] < c_b) if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) - {} + {} else { continue; } @@ -1169,7 +1169,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else { continue; } @@ -1204,11 +1204,11 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) if (p[pixel[8]] > cb) - {} + {} else { continue; } @@ -1222,7 +1222,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) if (p[pixel[8]] > cb) - {} + {} else { continue; } @@ -1240,7 +1240,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) if (p[pixel[8]] > cb) - {} + {} else { continue; } @@ -1262,7 +1262,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) if (p[pixel[8]] > cb) - {} + {} else { continue; } @@ -1296,15 +1296,15 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[5]] < c_b) if (p[pixel[4]] < c_b) if (p[pixel[3]] < c_b) - {} + {} else if (p[pixel[12]] < c_b) - {} + {} else { continue; } else if (p[pixel[12]] < c_b) if (p[pixel[13]] < c_b) - {} + {} else { continue; } @@ -1314,7 +1314,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi else if (p[pixel[12]] < c_b) if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) - {} + {} else { continue; } @@ -1328,7 +1328,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else { continue; } @@ -1365,10 +1365,10 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) - {} + {} else { continue; } @@ -1378,7 +1378,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi else if (p[pixel[5]] > cb) if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) - {} + {} else { continue; } @@ -1392,7 +1392,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[5]] > cb) if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) - {} + {} else { continue; } @@ -1410,7 +1410,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[5]] > cb) if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) - {} + {} else { continue; } @@ -1432,7 +1432,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[5]] > cb) if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) - {} + {} else { continue; } @@ -1465,10 +1465,10 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[4]] < c_b) if (p[pixel[3]] < c_b) if (p[pixel[2]] < c_b) - {} + {} else if (p[pixel[10]] < c_b) if (p[pixel[11]] < c_b) - {} + {} else { continue; } @@ -1478,7 +1478,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi else if (p[pixel[10]] < c_b) if (p[pixel[11]] < c_b) if (p[pixel[12]] < c_b) - {} + {} else { continue; } @@ -1492,7 +1492,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[11]] < c_b) if (p[pixel[12]] < c_b) if (p[pixel[13]] < c_b) - {} + {} else { continue; } @@ -1510,7 +1510,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[12]] < c_b) if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) - {} + {} else { continue; } @@ -1532,7 +1532,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else { continue; } @@ -1568,10 +1568,10 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) - {} + {} else { continue; } @@ -1581,7 +1581,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi else if (p[pixel[5]] > cb) if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) - {} + {} else { continue; } @@ -1595,7 +1595,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[5]] > cb) if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) - {} + {} else { continue; } @@ -1613,7 +1613,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[5]] > cb) if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) - {} + {} else { continue; } @@ -1635,7 +1635,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[5]] > cb) if (p[pixel[6]] > cb) if (p[pixel[7]] > cb) - {} + {} else { continue; } @@ -1669,15 +1669,15 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[4]] < c_b) if (p[pixel[3]] < c_b) if (p[pixel[2]] < c_b) - {} + {} else if (p[pixel[11]] < c_b) - {} + {} else { continue; } else if (p[pixel[11]] < c_b) if (p[pixel[12]] < c_b) - {} + {} else { continue; } @@ -1687,7 +1687,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi else if (p[pixel[11]] < c_b) if (p[pixel[12]] < c_b) if (p[pixel[13]] < c_b) - {} + {} else { continue; } @@ -1701,7 +1701,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[12]] < c_b) if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) - {} + {} else { continue; } @@ -1719,7 +1719,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else { continue; } @@ -1757,10 +1757,10 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[4]] > cb) if (p[pixel[3]] > cb) if (p[pixel[2]] > cb) - {} + {} else if (p[pixel[10]] > cb) if (p[pixel[11]] > cb) - {} + {} else { continue; } @@ -1770,7 +1770,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi else if (p[pixel[10]] > cb) if (p[pixel[11]] > cb) if (p[pixel[12]] > cb) - {} + {} else { continue; } @@ -1784,7 +1784,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[11]] > cb) if (p[pixel[12]] > cb) if (p[pixel[13]] > cb) - {} + {} else { continue; } @@ -1802,7 +1802,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[12]] > cb) if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) - {} + {} else { continue; } @@ -1824,7 +1824,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else { continue; } @@ -1857,10 +1857,10 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) - {} + {} else { continue; } @@ -1870,7 +1870,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi else if (p[pixel[5]] < c_b) if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) - {} + {} else { continue; } @@ -1884,7 +1884,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[5]] < c_b) if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) - {} + {} else { continue; } @@ -1902,7 +1902,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[5]] < c_b) if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) - {} + {} else { continue; } @@ -1924,7 +1924,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[5]] < c_b) if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) - {} + {} else { continue; } @@ -1962,10 +1962,10 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[5]] > cb) if (p[pixel[4]] > cb) if (p[pixel[3]] > cb) - {} + {} else if (p[pixel[11]] > cb) if (p[pixel[12]] > cb) - {} + {} else { continue; } @@ -1975,7 +1975,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi else if (p[pixel[11]] > cb) if (p[pixel[12]] > cb) if (p[pixel[13]] > cb) - {} + {} else { continue; } @@ -1989,7 +1989,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[12]] > cb) if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) - {} + {} else { continue; } @@ -2007,7 +2007,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else { continue; } @@ -2039,11 +2039,11 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) if (p[pixel[8]] < c_b) - {} + {} else { continue; } @@ -2057,7 +2057,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) if (p[pixel[8]] < c_b) - {} + {} else { continue; } @@ -2075,7 +2075,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) if (p[pixel[8]] < c_b) - {} + {} else { continue; } @@ -2097,7 +2097,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) if (p[pixel[8]] < c_b) - {} + {} else { continue; } @@ -2135,10 +2135,10 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[6]] > cb) if (p[pixel[5]] > cb) if (p[pixel[4]] > cb) - {} + {} else if (p[pixel[12]] > cb) if (p[pixel[13]] > cb) - {} + {} else { continue; } @@ -2148,7 +2148,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi else if (p[pixel[12]] > cb) if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) - {} + {} else { continue; } @@ -2162,7 +2162,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else { continue; } @@ -2193,12 +2193,12 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) if (p[pixel[8]] < c_b) if (p[pixel[9]] < c_b) - {} + {} else { continue; } @@ -2216,7 +2216,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[7]] < c_b) if (p[pixel[8]] < c_b) if (p[pixel[9]] < c_b) - {} + {} else { continue; } @@ -2238,7 +2238,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[7]] < c_b) if (p[pixel[8]] < c_b) if (p[pixel[9]] < c_b) - {} + {} else { continue; } @@ -2277,15 +2277,15 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[12]] > cb) if (p[pixel[6]] > cb) if (p[pixel[5]] > cb) - {} + {} else if (p[pixel[14]] > cb) - {} + {} else { continue; } else if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else { continue; } @@ -2319,7 +2319,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[9]] > cb) if (p[pixel[10]] > cb) if (p[pixel[12]] > cb) - {} + {} else { continue; } @@ -2345,13 +2345,13 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[12]] < c_b) if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) if (p[pixel[8]] < c_b) if (p[pixel[9]] < c_b) if (p[pixel[10]] < c_b) - {} + {} else { continue; } @@ -2373,7 +2373,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[8]] < c_b) if (p[pixel[9]] < c_b) if (p[pixel[10]] < c_b) - {} + {} else { continue; } @@ -2406,7 +2406,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[10]] > cb) if (p[pixel[11]] > cb) if (p[pixel[12]] > cb) - {} + {} else { continue; } @@ -2442,9 +2442,9 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[12]] > cb) if (p[pixel[13]] > cb) if (p[pixel[6]] > cb) - {} + {} else if (p[pixel[15]] > cb) - {} + {} else { continue; } @@ -2478,7 +2478,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[10]] > cb) if (p[pixel[11]] > cb) if (p[pixel[13]] > cb) - {} + {} else { continue; } @@ -2503,14 +2503,14 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi else if (p[pixel[12]] < c_b) if (p[pixel[13]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) if (p[pixel[8]] < c_b) if (p[pixel[9]] < c_b) if (p[pixel[10]] < c_b) if (p[pixel[11]] < c_b) - {} + {} else { continue; } @@ -2543,7 +2543,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[11]] > cb) if (p[pixel[12]] > cb) if (p[pixel[13]] > cb) - {} + {} else { continue; } @@ -2579,7 +2579,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[11]] > cb) if (p[pixel[12]] > cb) if (p[pixel[14]] > cb) - {} + {} else { continue; } @@ -2603,7 +2603,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi } else if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) - {} + {} else { continue; } @@ -2618,7 +2618,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[12]] > cb) if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) - {} + {} else { continue; } @@ -2653,7 +2653,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[12]] > cb) if (p[pixel[13]] > cb) if (p[pixel[15]] > cb) - {} + {} else { continue; } @@ -2677,7 +2677,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi } else if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else { continue; } @@ -2686,15 +2686,15 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi } else if (p[pixel[7]] < c_b) if (p[pixel[8]] < c_b) - {} + {} else if (p[pixel[15]] < c_b) - {} + {} else { continue; } else if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else { continue; } @@ -2710,7 +2710,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[12]] > cb) if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else { continue; } @@ -2738,7 +2738,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi else if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else { continue; } @@ -2757,9 +2757,9 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) if (p[pixel[6]] > cb) - {} + {} else if (p[pixel[15]] > cb) - {} + {} else { continue; } @@ -2788,14 +2788,14 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) if (p[pixel[8]] < c_b) if (p[pixel[9]] < c_b) if (p[pixel[10]] < c_b) if (p[pixel[11]] < c_b) - {} + {} else { continue; } @@ -2832,15 +2832,15 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] > cb) if (p[pixel[6]] > cb) if (p[pixel[5]] > cb) - {} + {} else if (p[pixel[14]] > cb) - {} + {} else { continue; } else if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else { continue; } @@ -2870,13 +2870,13 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) if (p[pixel[8]] < c_b) if (p[pixel[9]] < c_b) if (p[pixel[10]] < c_b) - {} + {} else { continue; } @@ -2898,7 +2898,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[8]] < c_b) if (p[pixel[9]] < c_b) if (p[pixel[10]] < c_b) - {} + {} else { continue; } @@ -2935,15 +2935,15 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[6]] > cb) if (p[pixel[5]] > cb) if (p[pixel[4]] > cb) - {} + {} else if (p[pixel[13]] > cb) - {} + {} else { continue; } else if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) - {} + {} else { continue; } @@ -2953,7 +2953,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi else if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else { continue; } @@ -2984,12 +2984,12 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) if (p[pixel[8]] < c_b) if (p[pixel[9]] < c_b) - {} + {} else { continue; } @@ -3007,7 +3007,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[7]] < c_b) if (p[pixel[8]] < c_b) if (p[pixel[9]] < c_b) - {} + {} else { continue; } @@ -3029,7 +3029,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[7]] < c_b) if (p[pixel[8]] < c_b) if (p[pixel[9]] < c_b) - {} + {} else { continue; } @@ -3066,15 +3066,15 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[5]] > cb) if (p[pixel[4]] > cb) if (p[pixel[3]] > cb) - {} + {} else if (p[pixel[12]] > cb) - {} + {} else { continue; } else if (p[pixel[12]] > cb) if (p[pixel[13]] > cb) - {} + {} else { continue; } @@ -3084,7 +3084,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi else if (p[pixel[12]] > cb) if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) - {} + {} else { continue; } @@ -3098,7 +3098,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else { continue; } @@ -3130,11 +3130,11 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) if (p[pixel[8]] < c_b) - {} + {} else { continue; } @@ -3148,7 +3148,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) if (p[pixel[8]] < c_b) - {} + {} else { continue; } @@ -3166,7 +3166,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) if (p[pixel[8]] < c_b) - {} + {} else { continue; } @@ -3188,7 +3188,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) if (p[pixel[8]] < c_b) - {} + {} else { continue; } @@ -3225,15 +3225,15 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[4]] > cb) if (p[pixel[3]] > cb) if (p[pixel[2]] > cb) - {} + {} else if (p[pixel[11]] > cb) - {} + {} else { continue; } else if (p[pixel[11]] > cb) if (p[pixel[12]] > cb) - {} + {} else { continue; } @@ -3243,7 +3243,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi else if (p[pixel[11]] > cb) if (p[pixel[12]] > cb) if (p[pixel[13]] > cb) - {} + {} else { continue; } @@ -3257,7 +3257,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[12]] > cb) if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) - {} + {} else { continue; } @@ -3275,7 +3275,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else { continue; } @@ -3308,10 +3308,10 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) - {} + {} else { continue; } @@ -3321,7 +3321,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi else if (p[pixel[5]] < c_b) if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) - {} + {} else { continue; } @@ -3335,7 +3335,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[5]] < c_b) if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) - {} + {} else { continue; } @@ -3353,7 +3353,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[5]] < c_b) if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) - {} + {} else { continue; } @@ -3375,7 +3375,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[5]] < c_b) if (p[pixel[6]] < c_b) if (p[pixel[7]] < c_b) - {} + {} else { continue; } @@ -3412,15 +3412,15 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[3]] > cb) if (p[pixel[2]] > cb) if (p[pixel[1]] > cb) - {} + {} else if (p[pixel[10]] > cb) - {} + {} else { continue; } else if (p[pixel[10]] > cb) if (p[pixel[11]] > cb) - {} + {} else { continue; } @@ -3430,7 +3430,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi else if (p[pixel[10]] > cb) if (p[pixel[11]] > cb) if (p[pixel[12]] > cb) - {} + {} else { continue; } @@ -3444,7 +3444,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[11]] > cb) if (p[pixel[12]] > cb) if (p[pixel[13]] > cb) - {} + {} else { continue; } @@ -3462,7 +3462,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[12]] > cb) if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) - {} + {} else { continue; } @@ -3484,7 +3484,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] > cb) if (p[pixel[14]] > cb) if (p[pixel[15]] > cb) - {} + {} else { continue; } @@ -3518,15 +3518,15 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[3]] < c_b) if (p[pixel[2]] < c_b) if (p[pixel[1]] < c_b) - {} + {} else if (p[pixel[10]] < c_b) - {} + {} else { continue; } else if (p[pixel[10]] < c_b) if (p[pixel[11]] < c_b) - {} + {} else { continue; } @@ -3536,7 +3536,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi else if (p[pixel[10]] < c_b) if (p[pixel[11]] < c_b) if (p[pixel[12]] < c_b) - {} + {} else { continue; } @@ -3550,7 +3550,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[11]] < c_b) if (p[pixel[12]] < c_b) if (p[pixel[13]] < c_b) - {} + {} else { continue; } @@ -3568,7 +3568,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[12]] < c_b) if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) - {} + {} else { continue; } @@ -3590,7 +3590,7 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi if (p[pixel[13]] < c_b) if (p[pixel[14]] < c_b) if (p[pixel[15]] < c_b) - {} + {} else { continue; } @@ -3644,20 +3644,20 @@ struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t mi */ static void fast_make_offsets(int32_t *pixel, uint16_t row_stride, uint8_t pixel_size) { - pixel[0] = 0*pixel_size + row_stride * 3*pixel_size; - pixel[1] = 1*pixel_size + row_stride * 3*pixel_size; - pixel[2] = 2*pixel_size + row_stride * 2*pixel_size; - pixel[3] = 3*pixel_size + row_stride * 1*pixel_size; - pixel[4] = 3*pixel_size + row_stride * 0*pixel_size; - pixel[5] = 3*pixel_size + row_stride * -1*pixel_size; - pixel[6] = 2*pixel_size + row_stride * -2*pixel_size; - pixel[7] = 1*pixel_size + row_stride * -3*pixel_size; - pixel[8] = 0*pixel_size + row_stride * -3*pixel_size; - pixel[9] = -1*pixel_size + row_stride * -3*pixel_size; - pixel[10] = -2*pixel_size + row_stride * -2*pixel_size; - pixel[11] = -3*pixel_size + row_stride * -1*pixel_size; - pixel[12] = -3*pixel_size + row_stride * 0*pixel_size; - pixel[13] = -3*pixel_size + row_stride * 1*pixel_size; - pixel[14] = -2*pixel_size + row_stride * 2*pixel_size; - pixel[15] = -1*pixel_size + row_stride * 3*pixel_size; + pixel[0] = 0 * pixel_size + row_stride * 3 * pixel_size; + pixel[1] = 1 * pixel_size + row_stride * 3 * pixel_size; + pixel[2] = 2 * pixel_size + row_stride * 2 * pixel_size; + pixel[3] = 3 * pixel_size + row_stride * 1 * pixel_size; + pixel[4] = 3 * pixel_size + row_stride * 0 * pixel_size; + pixel[5] = 3 * pixel_size + row_stride * -1 * pixel_size; + pixel[6] = 2 * pixel_size + row_stride * -2 * pixel_size; + pixel[7] = 1 * pixel_size + row_stride * -3 * pixel_size; + pixel[8] = 0 * pixel_size + row_stride * -3 * pixel_size; + pixel[9] = -1 * pixel_size + row_stride * -3 * pixel_size; + pixel[10] = -2 * pixel_size + row_stride * -2 * pixel_size; + pixel[11] = -3 * pixel_size + row_stride * -1 * pixel_size; + pixel[12] = -3 * pixel_size + row_stride * 0 * pixel_size; + pixel[13] = -3 * pixel_size + row_stride * 1 * pixel_size; + pixel[14] = -2 * pixel_size + row_stride * 2 * pixel_size; + pixel[15] = -1 * pixel_size + row_stride * 3 * pixel_size; } diff --git a/sw/airborne/modules/computer_vision/lib/vision/image.c b/sw/airborne/modules/computer_vision/lib/vision/image.c index 752afcf2f2c..662a97b7919 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/image.c +++ b/sw/airborne/modules/computer_vision/lib/vision/image.c @@ -43,14 +43,15 @@ void image_create(struct image_t *img, uint16_t width, uint16_t height, enum ima img->h = height; // Depending on the type the size differs - if(type == IMAGE_YUV422) - img->buf_size = sizeof(uint8_t)*2 * width * height; - else if(type == IMAGE_JPEG) - img->buf_size = sizeof(uint8_t)*1.1 * width * height; // At maximum quality this is enough - else if(type == IMAGE_GRADIENT) + if (type == IMAGE_YUV422) { + img->buf_size = sizeof(uint8_t) * 2 * width * height; + } else if (type == IMAGE_JPEG) { + img->buf_size = sizeof(uint8_t) * 1.1 * width * height; // At maximum quality this is enough + } else if (type == IMAGE_GRADIENT) { img->buf_size = sizeof(int16_t) * width * height; - else + } else { img->buf_size = sizeof(uint8_t) * width * height; + } img->buf = malloc(img->buf_size); } @@ -72,8 +73,9 @@ void image_free(struct image_t *img) */ void image_copy(struct image_t *input, struct image_t *output) { - if(input->type != output->type) + if (input->type != output->type) { return; + } output->w = input->w; output->h = input->h; @@ -120,8 +122,9 @@ void image_to_grayscale(struct image_t *input, struct image_t *output) // Copy the pixels for (int y = 0; y < output->h; y++) { for (int x = 0; x < output->w; x++) { - if(output->type == IMAGE_YUV422) - *dest++ = 127; // U / V + if (output->type == IMAGE_YUV422) { + *dest++ = 127; // U / V + } *dest++ = *source; // Y source += 2; } @@ -141,7 +144,7 @@ void image_to_grayscale(struct image_t *input, struct image_t *output) * @return The amount of filtered pixels */ uint16_t image_yuv422_colorfilt(struct image_t *input, struct image_t *output, uint8_t y_m, uint8_t y_M, uint8_t u_m, - uint8_t u_M, uint8_t v_m, uint8_t v_M) + uint8_t u_M, uint8_t v_m, uint8_t v_M) { uint16_t cnt = 0; uint8_t *source = input->buf; @@ -225,7 +228,7 @@ void image_yuv422_downsample(struct image_t *input, struct image_t *output, uint source += pixelskip; } // read 1 in every 'downsample' rows, so skip (downsample-1) rows after reading the first - source += (downsample-1) * input->w * 2; + source += (downsample - 1) * input->w * 2; } } @@ -249,8 +252,8 @@ void image_subpixel_window(struct image_t *input, struct image_t *output, struct uint16_t subpixel_h = input->h * subpixel_factor; // Go through the whole window size in normal coordinates - for(uint16_t i = 0; i < output->w; i++) { - for(uint16_t j = 0; j < output->h; j++) { + for (uint16_t i = 0; i < output->w; i++) { + for (uint16_t j = 0; j < output->h; j++) { // Calculate the subpixel coordinate uint16_t x = center->x + (i - half_window) * subpixel_factor; uint16_t y = center->y + (j - half_window) * subpixel_factor; @@ -266,22 +269,21 @@ void image_subpixel_window(struct image_t *input, struct image_t *output, struct uint16_t tl_y = orig_y * subpixel_factor; // Check if it is the top left pixel - if(tl_x == x && tl_y == y) { - output_buf[output->w*j + i] = input_buf[input->w*orig_y + orig_x]; - } - else { + if (tl_x == x && tl_y == y) { + output_buf[output->w * j + i] = input_buf[input->w * orig_y + orig_x]; + } else { // Calculate the difference from the top left uint16_t alpha_x = (x - tl_x); uint16_t alpha_y = (y - tl_y); // Blend from the 4 surrounding pixels - uint32_t blend = (subpixel_factor - alpha_x) * (subpixel_factor - alpha_y) * input_buf[input->w*orig_y + orig_x]; - blend += alpha_x * (subpixel_factor - alpha_y) * input_buf[input->w*orig_y + (orig_x+1)]; - blend += (subpixel_factor - alpha_x) * alpha_y * input_buf[input->w*(orig_y+1) + orig_x]; - blend += alpha_x * alpha_y * input_buf[input->w*(orig_y+1) + (orig_x+1)]; + uint32_t blend = (subpixel_factor - alpha_x) * (subpixel_factor - alpha_y) * input_buf[input->w * orig_y + orig_x]; + blend += alpha_x * (subpixel_factor - alpha_y) * input_buf[input->w * orig_y + (orig_x + 1)]; + blend += (subpixel_factor - alpha_x) * alpha_y * input_buf[input->w * (orig_y + 1) + orig_x]; + blend += alpha_x * alpha_y * input_buf[input->w * (orig_y + 1) + (orig_x + 1)]; // Set the normalized pixel blend - output_buf[output->w*j + i] = blend / (subpixel_factor * subpixel_factor); + output_buf[output->w * j + i] = blend / (subpixel_factor * subpixel_factor); } } } @@ -302,10 +304,10 @@ void image_gradients(struct image_t *input, struct image_t *dx, struct image_t * int16_t *dy_buf = (int16_t *)dy->buf; // Go trough all pixels except the borders - for(uint16_t x = 1; x < input->w-1; x++) { - for(uint16_t y = 1; y < input->h-1; y++) { - dx_buf[(y-1)*dx->w + (x-1)] = (int16_t)input_buf[y*input->w + x+1] - (int16_t)input_buf[y*input->w + x-1]; - dy_buf[(y-1)*dy->w + (x-1)] = (int16_t)input_buf[(y+1)*input->w + x] - (int16_t)input_buf[(y-1)*input->w + x]; + for (uint16_t x = 1; x < input->w - 1; x++) { + for (uint16_t y = 1; y < input->h - 1; y++) { + dx_buf[(y - 1)*dx->w + (x - 1)] = (int16_t)input_buf[y * input->w + x + 1] - (int16_t)input_buf[y * input->w + x - 1]; + dy_buf[(y - 1)*dy->w + (x - 1)] = (int16_t)input_buf[(y + 1) * input->w + x] - (int16_t)input_buf[(y - 1) * input->w + x]; } } } @@ -326,11 +328,11 @@ void image_calculate_g(struct image_t *dx, struct image_t *dy, int32_t *g) int16_t *dy_buf = (int16_t *)dy->buf; // Calculate the different sums - for(uint16_t x = 0; x < dx->w; x++) { - for(uint16_t y = 0; y < dy->h; y++) { - sum_dxx += ((int32_t)dx_buf[y*dx->w + x] * dx_buf[y*dx->w + x]); - sum_dxy += ((int32_t)dx_buf[y*dx->w + x] * dy_buf[y*dy->w + x]); - sum_dyy += ((int32_t)dy_buf[y*dy->w + x] * dy_buf[y*dy->w + x]); + for (uint16_t x = 0; x < dx->w; x++) { + for (uint16_t y = 0; y < dy->h; y++) { + sum_dxx += ((int32_t)dx_buf[y * dx->w + x] * dx_buf[y * dx->w + x]); + sum_dxy += ((int32_t)dx_buf[y * dx->w + x] * dy_buf[y * dy->w + x]); + sum_dyy += ((int32_t)dy_buf[y * dy->w + x] * dy_buf[y * dy->w + x]); } } @@ -359,18 +361,20 @@ uint32_t image_difference(struct image_t *img_a, struct image_t *img_b, struct i uint8_t *img_b_buf = (uint8_t *)img_b->buf; // If we want the difference image back - if(diff != NULL) + if (diff != NULL) { diff_buf = (int16_t *)diff->buf; + } // Go trough the imagge pixels and calculate the difference - for(uint16_t x = 0; x < img_b->w; x++) { - for(uint16_t y = 0; y < img_b->h; y++) { - int16_t diff_c = img_a_buf[(y+1)*img_a->w +(x+1)] - img_b_buf[y*img_b->w +x]; - sum_diff2 += diff_c*diff_c; + for (uint16_t x = 0; x < img_b->w; x++) { + for (uint16_t y = 0; y < img_b->h; y++) { + int16_t diff_c = img_a_buf[(y + 1) * img_a->w + (x + 1)] - img_b_buf[y * img_b->w + x]; + sum_diff2 += diff_c * diff_c; // Set the difference image - if(diff_buf != NULL) - diff_buf[y*diff->w + x] = diff_c; + if (diff_buf != NULL) { + diff_buf[y * diff->w + x] = diff_c; + } } } @@ -393,18 +397,20 @@ int32_t image_multiply(struct image_t *img_a, struct image_t *img_b, struct imag int16_t *mult_buf = NULL; // When we want an output - if(mult != NULL) + if (mult != NULL) { mult_buf = (int16_t *)mult->buf; + } // Calculate the multiplication - for(uint16_t x = 0; x < img_a->w; x++) { - for(uint16_t y = 0; y < img_a->h; y++) { - int16_t mult_c = img_a_buf[y*img_a->w +x] * img_b_buf[y*img_b->w +x]; + for (uint16_t x = 0; x < img_a->w; x++) { + for (uint16_t y = 0; y < img_a->h; y++) { + int16_t mult_c = img_a_buf[y * img_a->w + x] * img_b_buf[y * img_b->w + x]; sum += mult_c; // Set the difference image - if(mult_buf != NULL) - mult_buf[y*mult->w + x] = mult_c; + if (mult_buf != NULL) { + mult_buf[y * mult->w + x] = mult_c; + } } } @@ -422,15 +428,15 @@ int32_t image_multiply(struct image_t *img_a, struct image_t *img_b, struct imag void image_show_points(struct image_t *img, struct point_t *points, uint16_t points_cnt) { uint8_t *img_buf = (uint8_t *)img->buf; - uint8_t pixel_width = (img->type == IMAGE_YUV422)? 2 : 1; + uint8_t pixel_width = (img->type == IMAGE_YUV422) ? 2 : 1; // Go trough all points and color them - for(int i = 0; i < points_cnt; i++) { - uint32_t idx = pixel_width*points[i].y*img->w + points[i].x*pixel_width; + for (int i = 0; i < points_cnt; i++) { + uint32_t idx = pixel_width * points[i].y * img->w + points[i].x * pixel_width; img_buf[idx] = 255; // YUV422 consists of 2 pixels - if(img->type == IMAGE_YUV422) { + if (img->type == IMAGE_YUV422) { idx++; img_buf[idx] = 255; } @@ -447,7 +453,7 @@ void image_show_points(struct image_t *img, struct point_t *points, uint16_t poi void image_show_flow(struct image_t *img, struct flow_t *vectors, uint16_t points_cnt, uint8_t subpixel_factor) { // Go through all the points - for(uint16_t i = 0; i < points_cnt; i++) { + for (uint16_t i = 0; i < points_cnt; i++) { // Draw a line from the original position with the flow vector struct point_t from = { vectors[i].pos.x / subpixel_factor, @@ -469,9 +475,9 @@ void image_show_flow(struct image_t *img, struct flow_t *vectors, uint16_t point */ void image_draw_line(struct image_t *img, struct point_t *from, struct point_t *to) { - int xerr=0, yerr=0; + int xerr = 0, yerr = 0; uint8_t *img_buf = (uint8_t *)img->buf; - uint8_t pixel_width = (img->type == IMAGE_YUV422)? 2 : 1; + uint8_t pixel_width = (img->type == IMAGE_YUV422) ? 2 : 1; uint16_t startx = from->x; uint16_t starty = from->y; @@ -484,43 +490,43 @@ void image_draw_line(struct image_t *img, struct point_t *from, struct point_t * line. */ int8_t incx, incy; - if(delta_x>0) incx=1; - else if(delta_x==0) incx=0; - else incx=-1; + if (delta_x > 0) { incx = 1; } + else if (delta_x == 0) { incx = 0; } + else { incx = -1; } - if(delta_y>0) incy=1; - else if(delta_y==0) incy=0; - else incy=-1; + if (delta_y > 0) { incy = 1; } + else if (delta_y == 0) { incy = 0; } + else { incy = -1; } /* determine which distance is greater */ uint16_t distance = 0; delta_x = abs(delta_x); delta_y = abs(delta_y); - if(delta_x > delta_y) distance = delta_x*20; - else distance = delta_y*20; + if (delta_x > delta_y) { distance = delta_x * 20; } + else { distance = delta_y * 20; } /* draw the line */ - for(uint16_t t = 0; starty >= 0 && starty < img->h && startx >= 0 && startx < img->w && t <= distance+1; t++) { - img_buf[img->w*pixel_width*starty + startx*pixel_width] = (t <= 3)? 0 : 255; + for (uint16_t t = 0; starty >= 0 && starty < img->h && startx >= 0 && startx < img->w && t <= distance + 1; t++) { + img_buf[img->w * pixel_width * starty + startx * pixel_width] = (t <= 3) ? 0 : 255; - if(img->type == IMAGE_YUV422) { - img_buf[img->w*pixel_width*starty + startx*pixel_width +1] = 255; + if (img->type == IMAGE_YUV422) { + img_buf[img->w * pixel_width * starty + startx * pixel_width + 1] = 255; - if(startx+1 < img->w) { - img_buf[img->w*pixel_width*starty + startx*pixel_width +2] = (t <= 3)? 0 : 255; - img_buf[img->w*pixel_width*starty + startx*pixel_width +3] = 255; - } + if (startx + 1 < img->w) { + img_buf[img->w * pixel_width * starty + startx * pixel_width + 2] = (t <= 3) ? 0 : 255; + img_buf[img->w * pixel_width * starty + startx * pixel_width + 3] = 255; } + } - xerr += delta_x; - yerr += delta_y; - if(xerr > distance) { - xerr -= distance; - startx += incx; - } - if(yerr > distance) { - yerr -= distance; - starty += incy; - } + xerr += delta_x; + yerr += delta_y; + if (xerr > distance) { + xerr -= distance; + startx += incx; + } + if (yerr > distance) { + yerr -= distance; + starty += incy; + } } } diff --git a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c index 5db2f789943..ac4f9b52c4a 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c +++ b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.c @@ -51,8 +51,7 @@ * @return The vectors from the original *points in subpixels */ struct flow_t *opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_t *points, uint16_t *points_cnt, - uint16_t half_window_size, uint16_t subpixel_factor, uint8_t max_iterations, uint8_t step_threshold, uint16_t max_points) -{ + uint16_t half_window_size, uint16_t subpixel_factor, uint8_t max_iterations, uint8_t step_threshold, uint16_t max_points) { // A straightforward one-level implementation of Lucas-Kanade. // For all points: // (1) determine the subpixel neighborhood in the old image @@ -72,7 +71,7 @@ struct flow_t *opticFlowLK(struct image_t *new_img, struct image_t *old_img, str // determine patch sizes and initialize neighborhoods uint16_t patch_size = 2 * half_window_size; - uint32_t error_threshold = (25 * 25) * (patch_size * patch_size); + uint32_t error_threshold = (25 * 25) *(patch_size *patch_size); uint16_t padded_patch_size = patch_size + 2; // Create the window images @@ -84,22 +83,21 @@ struct flow_t *opticFlowLK(struct image_t *new_img, struct image_t *old_img, str image_create(&window_diff, patch_size, patch_size, IMAGE_GRADIENT); // Calculate the amount of points to skip - float skip_points = (points_orig > max_points)? points_orig/max_points : 1; + float skip_points = (points_orig > max_points) ? points_orig / max_points : 1; // Go trough all points for (uint16_t i = 0; i < max_points && i < points_orig; i++) { uint16_t p = i * skip_points; // If the pixel is outside ROI, do not track it - if(points[p].x < half_window_size || (old_img->w - points[p].x) < half_window_size - || points[p].y < half_window_size || (old_img->h - points[p].y) < half_window_size) - { + if (points[p].x < half_window_size || (old_img->w - points[p].x) < half_window_size + || points[p].y < half_window_size || (old_img->h - points[p].y) < half_window_size) { continue; } // Convert the point to a subpixel coordinate - vectors[new_p].pos.x = points[p].x*subpixel_factor; - vectors[new_p].pos.y = points[p].y*subpixel_factor; + vectors[new_p].pos.x = points[p].x * subpixel_factor; + vectors[new_p].pos.y = points[p].y * subpixel_factor; vectors[new_p].flow_x = 0; vectors[new_p].flow_y = 0; @@ -127,16 +125,14 @@ struct flow_t *opticFlowLK(struct image_t *new_img, struct image_t *old_img, str // (4) iterate over taking steps in the image to minimize the error: bool_t tracked = TRUE; - for(uint8_t it = 0; it < max_iterations; it++) - { + for (uint8_t it = 0; it < max_iterations; it++) { struct point_t new_point = { vectors[new_p].pos.x + vectors[new_p].flow_x, vectors[new_p].pos.y + vectors[new_p].flow_y }; // If the pixel is outside ROI, do not track it - if(new_point.x/subpixel_factor < half_window_size || (old_img->w - new_point.x/subpixel_factor) < half_window_size - || new_point.y/subpixel_factor < half_window_size || (old_img->h - new_point.y/subpixel_factor) < half_window_size) - { + if (new_point.x / subpixel_factor < half_window_size || (old_img->w - new_point.x / subpixel_factor) < half_window_size + || new_point.y / subpixel_factor < half_window_size || (old_img->h - new_point.y / subpixel_factor) < half_window_size) { tracked = FALSE; break; } @@ -161,12 +157,13 @@ struct flow_t *opticFlowLK(struct image_t *new_img, struct image_t *old_img, str vectors[new_p].flow_y += step_y; // Check if we exceeded the treshold - if((abs(step_x) + abs(step_y)) < step_threshold) + if ((abs(step_x) + abs(step_y)) < step_threshold) { break; + } } // If we tracked the point we update the index and the count - if(tracked) { + if (tracked) { new_p++; (*points_cnt)++; } diff --git a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h index dada79f7fdf..5a8d0b3da9a 100644 --- a/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h +++ b/sw/airborne/modules/computer_vision/lib/vision/lucas_kanade.h @@ -35,6 +35,6 @@ #include "image.h" struct flow_t *opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_t *points, uint16_t *points_cnt, - uint16_t half_window_size, uint16_t subpixel_factor, uint8_t max_iterations, uint8_t step_threshold, uint16_t max_points); + uint16_t half_window_size, uint16_t subpixel_factor, uint8_t max_iterations, uint8_t step_threshold, uint16_t max_points); #endif /* OPTIC_FLOW_INT_H */ diff --git a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c index 75974178aad..bbe556c1241 100644 --- a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c +++ b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.c @@ -163,16 +163,17 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ // FAST corner detection (TODO: non fixed threashold) struct point_t *corners = fast9_detect(img, opticflow->fast9_threshold, opticflow->fast9_min_distance, - 20, 20, &result->corner_cnt); + 20, 20, &result->corner_cnt); // Adaptive threshold - if(opticflow->fast9_adaptive) { + if (opticflow->fast9_adaptive) { // Decrease and increase the threshold based on previous values - if(result->corner_cnt < 40 && opticflow->fast9_threshold > 5) + if (result->corner_cnt < 40 && opticflow->fast9_threshold > 5) { opticflow->fast9_threshold--; - else if(result->corner_cnt > 50 && opticflow->fast9_threshold < 60) + } else if (result->corner_cnt > 50 && opticflow->fast9_threshold < 60) { opticflow->fast9_threshold++; + } } #if OPTICFLOW_DEBUG && OPTICFLOW_SHOW_CORNERS @@ -180,7 +181,7 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ #endif // Check if we found some corners to track - if(result->corner_cnt < 1) { + if (result->corner_cnt < 1) { free(corners); image_copy(&opticflow->img_gray, &opticflow->prev_img_gray); return; @@ -193,8 +194,8 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ // Execute a Lucas Kanade optical flow result->tracked_cnt = result->corner_cnt; struct flow_t *vectors = opticFlowLK(&opticflow->img_gray, &opticflow->prev_img_gray, corners, &result->tracked_cnt, - opticflow->window_size/2, opticflow->subpixel_factor, opticflow->max_iterations, - opticflow->threshold_vec, opticflow->max_track_corners); + opticflow->window_size / 2, opticflow->subpixel_factor, opticflow->max_iterations, + opticflow->threshold_vec, opticflow->max_track_corners); #if OPTICFLOW_DEBUG && OPTICFLOW_SHOW_FLOW image_show_flow(img, vectors, result->tracked_cnt, opticflow->subpixel_factor); @@ -202,24 +203,24 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ // Get the median flow qsort(vectors, result->tracked_cnt, sizeof(struct flow_t), cmp_flow); - if(result->tracked_cnt == 0) { + if (result->tracked_cnt == 0) { // We got no flow result->flow_x = 0; result->flow_y = 0; - } else if(result->tracked_cnt > 3) { + } else if (result->tracked_cnt > 3) { // Take the average of the 3 median points - result->flow_x = vectors[result->tracked_cnt/2 -1].flow_x; - result->flow_y = vectors[result->tracked_cnt/2 -1].flow_y; - result->flow_x += vectors[result->tracked_cnt/2].flow_x; - result->flow_y += vectors[result->tracked_cnt/2].flow_y; - result->flow_x += vectors[result->tracked_cnt/2 +1].flow_x; - result->flow_y += vectors[result->tracked_cnt/2 +1].flow_y; + result->flow_x = vectors[result->tracked_cnt / 2 - 1].flow_x; + result->flow_y = vectors[result->tracked_cnt / 2 - 1].flow_y; + result->flow_x += vectors[result->tracked_cnt / 2].flow_x; + result->flow_y += vectors[result->tracked_cnt / 2].flow_y; + result->flow_x += vectors[result->tracked_cnt / 2 + 1].flow_x; + result->flow_y += vectors[result->tracked_cnt / 2 + 1].flow_y; result->flow_x /= 3; result->flow_y /= 3; } else { // Take the median point - result->flow_x = vectors[result->tracked_cnt/2].flow_x; - result->flow_y = vectors[result->tracked_cnt/2].flow_y; + result->flow_x = vectors[result->tracked_cnt / 2].flow_x; + result->flow_y = vectors[result->tracked_cnt / 2].flow_y; } // Flow Derotation @@ -251,8 +252,8 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_ static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime) { uint32_t msec; - msec=(finishtime->tv_sec-starttime->tv_sec)*1000; - msec+=(finishtime->tv_usec-starttime->tv_usec)/1000; + msec = (finishtime->tv_sec - starttime->tv_sec) * 1000; + msec += (finishtime->tv_usec - starttime->tv_usec) / 1000; return msec; } @@ -267,5 +268,5 @@ static int cmp_flow(const void *a, const void *b) { const struct flow_t *a_p = (const struct flow_t *)a; const struct flow_t *b_p = (const struct flow_t *)b; - return (a_p->flow_x*a_p->flow_x + a_p->flow_y*a_p->flow_y) - (b_p->flow_x*b_p->flow_x + b_p->flow_y*b_p->flow_y); + return (a_p->flow_x * a_p->flow_x + a_p->flow_y * a_p->flow_y) - (b_p->flow_x * b_p->flow_x + b_p->flow_y * b_p->flow_y); } diff --git a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h index f30c0dc566a..a1ce65b5708 100644 --- a/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h +++ b/sw/airborne/modules/computer_vision/opticflow/opticflow_calculator.h @@ -34,8 +34,7 @@ #include "lib/vision/image.h" #include "lib/v4l/v4l2.h" -struct opticflow_t -{ +struct opticflow_t { bool_t got_first_img; //< If we got a image to work with float prev_phi; //< Phi from the previous image frame float prev_theta; //< Theta from the previous image frame diff --git a/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c b/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c index cff2aba7842..16c53e8cd3f 100644 --- a/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c +++ b/sw/airborne/modules/computer_vision/opticflow/stabilization_opticflow.c @@ -148,9 +148,9 @@ void stabilization_opticflow_update(struct opticflow_result_t *result) /* Calculate the commands */ opticflow_stab.cmd.phi = opticflow_stab.phi_pgain * err_vx / 100 - + opticflow_stab.phi_igain * opticflow_stab.err_vx_int; + + opticflow_stab.phi_igain * opticflow_stab.err_vx_int; opticflow_stab.cmd.theta = -(opticflow_stab.theta_pgain * err_vy / 100 - + opticflow_stab.theta_igain * opticflow_stab.err_vy_int); + + opticflow_stab.theta_igain * opticflow_stab.err_vy_int); /* Bound the roll and pitch commands */ BoundAbs(opticflow_stab.cmd.phi, CMD_OF_SAT); diff --git a/sw/airborne/modules/computer_vision/opticflow_module.c b/sw/airborne/modules/computer_vision/opticflow_module.c index 654e5416737..1ad3e456400 100644 --- a/sw/airborne/modules/computer_vision/opticflow_module.c +++ b/sw/airborne/modules/computer_vision/opticflow_module.c @@ -88,12 +88,12 @@ static void opticflow_telem_send(struct transport_tx *trans, struct link_device { pthread_mutex_lock(&opticflow_mutex); pprz_msg_send_OPTIC_FLOW_EST(trans, dev, AC_ID, - &opticflow_result.fps, &opticflow_result.corner_cnt, - &opticflow_result.tracked_cnt, &opticflow_result.flow_x, - &opticflow_result.flow_y, &opticflow_result.flow_der_x, - &opticflow_result.flow_der_y, &opticflow_result.vel_x, - &opticflow_result.vel_y, - &opticflow_stab.cmd.phi, &opticflow_stab.cmd.theta); + &opticflow_result.fps, &opticflow_result.corner_cnt, + &opticflow_result.tracked_cnt, &opticflow_result.flow_x, + &opticflow_result.flow_y, &opticflow_result.flow_der_x, + &opticflow_result.flow_der_y, &opticflow_result.vel_x, + &opticflow_result.vel_y, + &opticflow_stab.cmd.phi, &opticflow_stab.cmd.theta); pthread_mutex_unlock(&opticflow_mutex); } #endif @@ -149,7 +149,7 @@ void opticflow_module_run(void) opticflow_state.theta = stateGetNedToBodyEulers_f()->theta; // Update the stabilization loops on the current calculation - if(opticflow_got_result) { + if (opticflow_got_result) { stabilization_opticflow_update(&opticflow_result); opticflow_got_result = FALSE; } @@ -162,7 +162,7 @@ void opticflow_module_run(void) void opticflow_module_start(void) { // Check if we are not already running - if(opticflow_calc_thread != 0) { + if (opticflow_calc_thread != 0) { printf("[opticflow_module] Opticflow already started!\n"); return; } @@ -191,9 +191,10 @@ void opticflow_module_stop(void) * calculator based on Lucas Kanade */ #include "errno.h" -static void *opticflow_module_calc(void *data __attribute__((unused))) { +static void *opticflow_module_calc(void *data __attribute__((unused))) +{ // Start the streaming on the V4L2 device - if(!v4l2_start_capture(opticflow_dev)) { + if (!v4l2_start_capture(opticflow_dev)) { printf("[opticflow_module] Could not start capture of the camera\n"); return 0; } @@ -205,7 +206,7 @@ static void *opticflow_module_calc(void *data __attribute__((unused))) { #endif /* Main loop of the optical flow calculation */ - while(TRUE) { + while (TRUE) { // Try to fetch an image struct image_t img; v4l2_image_get(opticflow_dev, &img); diff --git a/sw/airborne/modules/computer_vision/viewvideo.c b/sw/airborne/modules/computer_vision/viewvideo.c index b79f0987119..aed05036fe3 100644 --- a/sw/airborne/modules/computer_vision/viewvideo.c +++ b/sw/airborne/modules/computer_vision/viewvideo.c @@ -138,9 +138,9 @@ static void *viewvideo_thread(void *data __attribute__((unused))) // Resize image if needed struct image_t img_small; image_create(&img_small, - viewvideo.dev->w/viewvideo.downsize_factor, - viewvideo.dev->h/viewvideo.downsize_factor, - IMAGE_YUV422); + viewvideo.dev->w / viewvideo.downsize_factor, + viewvideo.dev->h / viewvideo.downsize_factor, + IMAGE_YUV422); // Create the JPEG encoded image struct image_t img_jpeg; @@ -214,10 +214,9 @@ static void *viewvideo_thread(void *data __attribute__((unused))) // Open process to send using netcat (in a fork because sometimes kills itself???) pid_t pid = fork(); - if(pid < 0) { + if (pid < 0) { printf("[viewvideo] Could not create netcat fork.\n"); - } - else if(pid ==0) { + } else if (pid == 0) { // We are the child and want to send the image FILE *netcat = popen(nc_cmd, "w"); if (netcat != NULL) { @@ -229,8 +228,7 @@ static void *viewvideo_thread(void *data __attribute__((unused))) // Exit the program since we don't want to continue after transmitting exit(0); - } - else { + } else { // We want to wait until the child is finished wait(NULL); }