Skip to content

Commit

Permalink
Supported dynamic resolution change by DV timings & timeout
Browse files Browse the repository at this point in the history
input_uvc had two problems:

1) It hangs if you disconnect the camera cable (or HDMI cable from capture board) while mjpg-streamer was running on ioctl(VIDIOC_DQBUF). I added select() befure this syscall and now mjpg streamer closes after timeout (5 sec. by default). It can be changed by option -timeout.

2) When using video capture devices, it is possible to change the resolution spontaneously when changing the source. I added support of DV timings events and now mjpg-streamer don't hang on changing source disconnecting/connecting cable - it changes resolution automatically and and continues streaming.

Both these changes are necessary for work Auvidea B101 board:
https://auvidea.com/b101-hdmi-to-csi-2-bridge-15-pin-fpc/

This is a very promising device for video processing. It's officially supports by Raspberry Pi team:
https://www.raspberrypi.org/forums/viewtopic.php?f=38&t=120702
  • Loading branch information
mdevaev committed Sep 12, 2018
1 parent f387bb4 commit 29fb2b9
Show file tree
Hide file tree
Showing 3 changed files with 269 additions and 129 deletions.
235 changes: 163 additions & 72 deletions mjpg-streamer-experimental/plugins/input_uvc/input_uvc.c
Expand Up @@ -74,6 +74,8 @@ static unsigned int every = 1;
static int wantTimestamp = 0;
static struct timeval timestamp;
static int softfps = -1;
static unsigned int timeout = 5;
static unsigned int dv_timings = 0;

static const struct {
const char * k;
Expand Down Expand Up @@ -211,6 +213,8 @@ int input_init(input_parameter *param, int id)
{"cb", required_argument, 0, 0},
{"timestamp", no_argument, 0, 0},
{"softfps", required_argument, 0, 0},
{"timeout", required_argument, 0, 0},
{"dv_timings", no_argument, 0, 0},
{0, 0, 0, 0}
};

Expand Down Expand Up @@ -373,6 +377,14 @@ int input_init(input_parameter *param, int id)
case 40:
softfps = atoi(optarg);
break;
case 41:
DBG("case 41\n");
timeout = MAX(atoi(optarg), 1);
break;
case 42:
DBG("case 42\n");
dv_timings = 1;
break;
default:
DBG("default case\n");
help();
Expand Down Expand Up @@ -430,6 +442,7 @@ int input_init(input_parameter *param, int id)

DBG("vdIn pn: %d\n", id);
/* open video device and prepare data structure */
pctx->videoIn->dv_timings = dv_timings;
if(init_videoIn(pctx->videoIn, dev, width, height, fps, format, 1, pctx->pglobal, id, tvnorm) < 0) {
IPRINT("init_VideoIn failed\n");
closelog();
Expand Down Expand Up @@ -531,6 +544,8 @@ void help(void)
" [-timestamp ]..........: Populate frame timestamp with system time\n" \
" [-softfps] ............: Drop frames to try and achieve this fps\n" \
" set your camera to its maximum fps to avoid stuttering\n" \
" [-timeout] ............: Timeout for device querying\n" \
" [-dv_timings] .........: Enable DV timings queriyng and events processing\n" \
" ---------------------------------------------------------------\n");

fprintf(stderr, "\n"\
Expand Down Expand Up @@ -648,100 +663,176 @@ void *cam_thread(void *arg)
pcontext->videoIn->frame_period_time = 1000/softfps;
}

if (video_enable(pcontext->videoIn)) {
IPRINT("Can\'t enable video in first time\n");
exit(EXIT_FAILURE);
}

while(!pglobal->stop) {
while(pcontext->videoIn->streamingState == STREAMING_PAUSED) {
usleep(1); // maybe not the best way so FIXME
}

/* grab a frame */
if(uvcGrab(pcontext->videoIn) < 0) {
IPRINT("Error grabbing frames\n");
exit(EXIT_FAILURE);
}
fd_set fds[3];
fd_set *rd_fds = &fds[0]; /* for capture */
fd_set *ex_fds = &fds[1]; /* for capture */
fd_set *wr_fds = &fds[2]; /* for output */

if ( every_count < every - 1 ) {
DBG("dropping %d frame for every=%d\n", every_count + 1, every);
++every_count;
continue;
} else {
every_count = 0;
if (rd_fds) {
FD_ZERO(rd_fds);
FD_SET(pcontext->videoIn->fd, rd_fds);
}

//DBG("received frame of size: %d from plugin: %d\n", pcontext->videoIn->tmpbytesused, pcontext->id);

/*
* Workaround for broken, corrupted frames:
* Under low light conditions corrupted frames may get captured.
* The good thing is such frames are quite small compared to the regular pictures.
* For example a VGA (640x480) webcam picture is normally >= 8kByte large,
* corrupted frames are smaller.
*/
if(pcontext->videoIn->tmpbytesused < minimum_size) {
DBG("dropping too small frame, assuming it as broken\n");
continue;
if (ex_fds) {
FD_ZERO(ex_fds);
FD_SET(pcontext->videoIn->fd, ex_fds);
}

// Overwrite timestamp (e.g. where camera is providing 0 values)
// Do it here so that this timestamp can be used in frameskipping
if(wantTimestamp)
{
gettimeofday(&timestamp, NULL);
pcontext->videoIn->tmptimestamp = timestamp;
if (wr_fds) {
// FD_ZERO(wr_fds);
// FD_SET(pcontext->videoIn->vd->fd, wr_fds);
}

// use software frame dropping on low fps
if (pcontext->videoIn->soft_framedrop == 1) {
unsigned long last = pglobal->in[pcontext->id].timestamp.tv_sec * 1000 +
(pglobal->in[pcontext->id].timestamp.tv_usec/1000); // convert to ms
unsigned long current = pcontext->videoIn->tmptimestamp.tv_sec * 1000 +
pcontext->videoIn->tmptimestamp.tv_usec/1000; // convert to ms
struct timeval tv;
tv.tv_sec = timeout;
tv.tv_usec = 0;

int sel = select(pcontext->videoIn->fd + 1, rd_fds, wr_fds, ex_fds, &tv);
DBG("select() = %d\n", sel);

// if the requested time did not esplashed skip the frame
if ((current - last) < pcontext->videoIn->frame_period_time) {
DBG("Last frame taken %d ms ago so drop it\n", (current - last));
if (sel < 0) {
if (errno == EINTR) {
goto other_select_handlers;
}
IPRINT("select() error: %s\n", strerror(errno));
exit(EXIT_FAILURE);
} else if (sel == 0) {
IPRINT("select() timeout\n");
if (dv_timings) {
setResolution(pcontext->videoIn, pcontext->videoIn->width, pcontext->videoIn->height);
continue;
} else {
exit(EXIT_FAILURE);
}
DBG("Lagg: %ld\n", (current - last) - pcontext->videoIn->frame_period_time);
}

/* copy JPG picture to global buffer */
pthread_mutex_lock(&pglobal->in[pcontext->id].db);
if (rd_fds && FD_ISSET(pcontext->videoIn->fd, rd_fds)) {
DBG("Grabbing a frame...\n");
/* grab a frame */
if(uvcGrab(pcontext->videoIn) < 0) {
IPRINT("Error grabbing frames\n");
exit(EXIT_FAILURE);
}

/*
* If capturing in YUV mode convert to JPEG now.
* This compression requires many CPU cycles, so try to avoid YUV format.
* Getting JPEGs straight from the webcam, is one of the major advantages of
* Linux-UVC compatible devices.
*/
#ifndef NO_LIBJPEG
if ((pcontext->videoIn->formatIn == V4L2_PIX_FMT_YUYV) ||
(pcontext->videoIn->formatIn == V4L2_PIX_FMT_UYVY) ||
(pcontext->videoIn->formatIn == V4L2_PIX_FMT_RGB565) ) {
DBG("compressing frame from input: %d\n", (int)pcontext->id);
pglobal->in[pcontext->id].size = compress_image_to_jpeg(pcontext->videoIn, pglobal->in[pcontext->id].buf, pcontext->videoIn->framesizeIn, quality);
/* copy this frame's timestamp to user space */
pglobal->in[pcontext->id].timestamp = pcontext->videoIn->tmptimestamp;
} else {
#endif
DBG("copying frame from input: %d\n", (int)pcontext->id);
pglobal->in[pcontext->id].size = memcpy_picture(pglobal->in[pcontext->id].buf, pcontext->videoIn->tmpbuffer, pcontext->videoIn->tmpbytesused);
/* copy this frame's timestamp to user space */
pglobal->in[pcontext->id].timestamp = pcontext->videoIn->tmptimestamp;
#ifndef NO_LIBJPEG
}
#endif
if ( every_count < every - 1 ) {
DBG("dropping %d frame for every=%d\n", every_count + 1, every);
++every_count;
goto other_select_handlers;
} else {
every_count = 0;
}

//DBG("received frame of size: %d from plugin: %d\n", pcontext->videoIn->tmpbytesused, pcontext->id);

/*
* Workaround for broken, corrupted frames:
* Under low light conditions corrupted frames may get captured.
* The good thing is such frames are quite small compared to the regular pictures.
* For example a VGA (640x480) webcam picture is normally >= 8kByte large,
* corrupted frames are smaller.
*/
if(pcontext->videoIn->tmpbytesused < minimum_size) {
DBG("dropping too small frame, assuming it as broken\n");
goto other_select_handlers;
}

// Overwrite timestamp (e.g. where camera is providing 0 values)
// Do it here so that this timestamp can be used in frameskipping
if(wantTimestamp)
{
gettimeofday(&timestamp, NULL);
pcontext->videoIn->tmptimestamp = timestamp;
}

// use software frame dropping on low fps
if (pcontext->videoIn->soft_framedrop == 1) {
unsigned long last = pglobal->in[pcontext->id].timestamp.tv_sec * 1000 +
(pglobal->in[pcontext->id].timestamp.tv_usec/1000); // convert to ms
unsigned long current = pcontext->videoIn->tmptimestamp.tv_sec * 1000 +
pcontext->videoIn->tmptimestamp.tv_usec/1000; // convert to ms

// if the requested time did not esplashed skip the frame
if ((current - last) < pcontext->videoIn->frame_period_time) {
DBG("Last frame taken %d ms ago so drop it\n", (current - last));
goto other_select_handlers;
}
DBG("Lagg: %ld\n", (current - last) - pcontext->videoIn->frame_period_time);
}

/* copy JPG picture to global buffer */
pthread_mutex_lock(&pglobal->in[pcontext->id].db);

/*
* If capturing in YUV mode convert to JPEG now.
* This compression requires many CPU cycles, so try to avoid YUV format.
* Getting JPEGs straight from the webcam, is one of the major advantages of
* Linux-UVC compatible devices.
*/
#ifndef NO_LIBJPEG
if ((pcontext->videoIn->formatIn == V4L2_PIX_FMT_YUYV) ||
(pcontext->videoIn->formatIn == V4L2_PIX_FMT_UYVY) ||
(pcontext->videoIn->formatIn == V4L2_PIX_FMT_RGB565) ) {
DBG("compressing frame from input: %d\n", (int)pcontext->id);
pglobal->in[pcontext->id].size = compress_image_to_jpeg(pcontext->videoIn, pglobal->in[pcontext->id].buf, pcontext->videoIn->framesizeIn, quality);
/* copy this frame's timestamp to user space */
pglobal->in[pcontext->id].timestamp = pcontext->videoIn->tmptimestamp;
} else {
#endif
DBG("copying frame from input: %d\n", (int)pcontext->id);
pglobal->in[pcontext->id].size = memcpy_picture(pglobal->in[pcontext->id].buf, pcontext->videoIn->tmpbuffer, pcontext->videoIn->tmpbytesused);
/* copy this frame's timestamp to user space */
pglobal->in[pcontext->id].timestamp = pcontext->videoIn->tmptimestamp;
#ifndef NO_LIBJPEG
}
#endif

#if 0
/* motion detection can be done just by comparing the picture size, but it is not very accurate!! */
if((prev_size - global->size)*(prev_size - global->size) > 4 * 1024 * 1024) {
DBG("motion detected (delta: %d kB)\n", (prev_size - global->size) / 1024);
}
prev_size = global->size;
/* motion detection can be done just by comparing the picture size, but it is not very accurate!! */
if((prev_size - global->size)*(prev_size - global->size) > 4 * 1024 * 1024) {
DBG("motion detected (delta: %d kB)\n", (prev_size - global->size) / 1024);
}
prev_size = global->size;
#endif

/* signal fresh_frame */
pthread_cond_broadcast(&pglobal->in[pcontext->id].db_update);
pthread_mutex_unlock(&pglobal->in[pcontext->id].db);
/* signal fresh_frame */
pthread_cond_broadcast(&pglobal->in[pcontext->id].db_update);
pthread_mutex_unlock(&pglobal->in[pcontext->id].db);
}

other_select_handlers:

if (dv_timings) {
if (wr_fds && FD_ISSET(pcontext->videoIn->fd, wr_fds)) {
IPRINT("Writing?!\n");
}

if (ex_fds && FD_ISSET(pcontext->videoIn->fd, ex_fds)) {
IPRINT("FD exception\n");
struct v4l2_event ev;

while (!ioctl(pcontext->videoIn->fd, VIDIOC_DQEVENT, &ev)) {
switch (ev.type) {
case V4L2_EVENT_SOURCE_CHANGE:
IPRINT("V4L2_EVENT_SOURCE_CHANGE: Source changed\n");
setResolution(pcontext->videoIn, pcontext->videoIn->width, pcontext->videoIn->height);
break;
case V4L2_EVENT_EOS:
IPRINT("V4L2_EVENT_EOS\n");
break;
}
}
}
}
}

DBG("leaving input thread, calling cleanup function now\n");
Expand Down

0 comments on commit 29fb2b9

Please sign in to comment.