@@ -269,6 +269,101 @@ static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
269269 return ret ;
270270}
271271
272+ /* This function must be called with video->req_lock held. */
273+ static int uvcg_video_usb_req_queue (struct uvc_video * video ,
274+ struct usb_request * req , bool queue_to_ep )
275+ {
276+ bool is_bulk = video -> max_payload_size ;
277+ struct list_head * list = NULL ;
278+
279+ if (!video -> is_enabled ) {
280+ uvc_video_free_request (req -> context , video -> ep );
281+ return - ENODEV ;
282+ }
283+ if (queue_to_ep ) {
284+ struct uvc_request * ureq = req -> context ;
285+ /*
286+ * With USB3 handling more requests at a higher speed, we can't
287+ * afford to generate an interrupt for every request. Decide to
288+ * interrupt:
289+ *
290+ * - When no more requests are available in the free queue, as
291+ * this may be our last chance to refill the endpoint's
292+ * request queue.
293+ *
294+ * - When this is request is the last request for the video
295+ * buffer, as we want to start sending the next video buffer
296+ * ASAP in case it doesn't get started already in the next
297+ * iteration of this loop.
298+ *
299+ * - Four times over the length of the requests queue (as
300+ * indicated by video->uvc_num_requests), as a trade-off
301+ * between latency and interrupt load.
302+ */
303+ if (list_empty (& video -> req_free ) || ureq -> last_buf ||
304+ !(video -> req_int_count %
305+ DIV_ROUND_UP (video -> uvc_num_requests , 4 ))) {
306+ video -> req_int_count = 0 ;
307+ req -> no_interrupt = 0 ;
308+ } else {
309+ req -> no_interrupt = 1 ;
310+ }
311+ video -> req_int_count ++ ;
312+ return uvcg_video_ep_queue (video , req );
313+ }
314+ /*
315+ * If we're not queuing to the ep, for isoc we're queuing
316+ * to the req_ready list, otherwise req_free.
317+ */
318+ list = is_bulk ? & video -> req_free : & video -> req_ready ;
319+ list_add_tail (& req -> list , list );
320+ return 0 ;
321+ }
322+
323+ /*
324+ * Must only be called from uvcg_video_enable - since after that we only want to
325+ * queue requests to the endpoint from the uvc_video_complete complete handler.
326+ * This function is needed in order to 'kick start' the flow of requests from
327+ * gadget driver to the usb controller.
328+ */
329+ static void uvc_video_ep_queue_initial_requests (struct uvc_video * video )
330+ {
331+ struct usb_request * req = NULL ;
332+ unsigned long flags = 0 ;
333+ unsigned int count = 0 ;
334+ int ret = 0 ;
335+
336+ /*
337+ * We only queue half of the free list since we still want to have
338+ * some free usb_requests in the free list for the video_pump async_wq
339+ * thread to encode uvc buffers into. Otherwise we could get into a
340+ * situation where the free list does not have any usb requests to
341+ * encode into - we always end up queueing 0 length requests to the
342+ * end point.
343+ */
344+ unsigned int half_list_size = video -> uvc_num_requests / 2 ;
345+
346+ spin_lock_irqsave (& video -> req_lock , flags );
347+ /*
348+ * Take these requests off the free list and queue them all to the
349+ * endpoint. Since we queue 0 length requests with the req_lock held,
350+ * there isn't any 'data' race involved here with the complete handler.
351+ */
352+ while (count < half_list_size ) {
353+ req = list_first_entry (& video -> req_free , struct usb_request ,
354+ list );
355+ list_del (& req -> list );
356+ req -> length = 0 ;
357+ ret = uvcg_video_ep_queue (video , req );
358+ if (ret < 0 ) {
359+ uvcg_queue_cancel (& video -> queue , 0 );
360+ break ;
361+ }
362+ count ++ ;
363+ }
364+ spin_unlock_irqrestore (& video -> req_lock , flags );
365+ }
366+
272367static void
273368uvc_video_complete (struct usb_ep * ep , struct usb_request * req )
274369{
@@ -277,6 +372,8 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
277372 struct uvc_video_queue * queue = & video -> queue ;
278373 struct uvc_buffer * last_buf ;
279374 unsigned long flags ;
375+ bool is_bulk = video -> max_payload_size ;
376+ int ret = 0 ;
280377
281378 spin_lock_irqsave (& video -> req_lock , flags );
282379 if (!video -> is_enabled ) {
@@ -330,8 +427,45 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
330427 * back to req_free
331428 */
332429 if (video -> is_enabled ) {
333- list_add_tail (& req -> list , & video -> req_free );
334- queue_work (video -> async_wq , & video -> pump );
430+ /*
431+ * Here we check whether any request is available in the ready
432+ * list. If it is, queue it to the ep and add the current
433+ * usb_request to the req_free list - for video_pump to fill in.
434+ * Otherwise, just use the current usb_request to queue a 0
435+ * length request to the ep. Since we always add to the req_free
436+ * list if we dequeue from the ready list, there will never
437+ * be a situation where the req_free list is completely out of
438+ * requests and cannot recover.
439+ */
440+ struct usb_request * to_queue = req ;
441+
442+ to_queue -> length = 0 ;
443+ if (!list_empty (& video -> req_ready )) {
444+ to_queue = list_first_entry (& video -> req_ready ,
445+ struct usb_request , list );
446+ list_del (& to_queue -> list );
447+ list_add_tail (& req -> list , & video -> req_free );
448+ /*
449+ * Queue work to the wq as well since it is possible that a
450+ * buffer may not have been completely encoded with the set of
451+ * in-flight usb requests for whih the complete callbacks are
452+ * firing.
453+ * In that case, if we do not queue work to the worker thread,
454+ * the buffer will never be marked as complete - and therefore
455+ * not be returned to userpsace. As a result,
456+ * dequeue -> queue -> dequeue flow of uvc buffers will not
457+ * happen.
458+ */
459+ queue_work (video -> async_wq , & video -> pump );
460+ }
461+ /*
462+ * Queue to the endpoint. The actual queueing to ep will
463+ * only happen on one thread - the async_wq for bulk endpoints
464+ * and this thread for isoc endpoints.
465+ */
466+ ret = uvcg_video_usb_req_queue (video , to_queue , !is_bulk );
467+ if (ret < 0 )
468+ uvcg_queue_cancel (queue , 0 );
335469 } else {
336470 uvc_video_free_request (ureq , ep );
337471 }
@@ -348,6 +482,7 @@ uvc_video_free_requests(struct uvc_video *video)
348482
349483 INIT_LIST_HEAD (& video -> ureqs );
350484 INIT_LIST_HEAD (& video -> req_free );
485+ INIT_LIST_HEAD (& video -> req_ready );
351486 video -> req_size = 0 ;
352487 return 0 ;
353488}
@@ -425,8 +560,7 @@ static void uvcg_video_pump(struct work_struct *work)
425560 struct usb_request * req = NULL ;
426561 struct uvc_buffer * buf ;
427562 unsigned long flags ;
428- bool buf_done ;
429- int ret ;
563+ int ret = 0 ;
430564
431565 while (true) {
432566 if (!video -> ep -> enabled )
@@ -455,15 +589,6 @@ static void uvcg_video_pump(struct work_struct *work)
455589
456590 if (buf != NULL ) {
457591 video -> encode (req , video , buf );
458- buf_done = buf -> state == UVC_BUF_STATE_DONE ;
459- } else if (!(queue -> flags & UVC_QUEUE_DISCONNECTED ) && !is_bulk ) {
460- /*
461- * No video buffer available; the queue is still connected and
462- * we're transferring over ISOC. Queue a 0 length request to
463- * prevent missed ISOC transfers.
464- */
465- req -> length = 0 ;
466- buf_done = false;
467592 } else {
468593 /*
469594 * Either the queue has been disconnected or no video buffer
@@ -474,45 +599,25 @@ static void uvcg_video_pump(struct work_struct *work)
474599 break ;
475600 }
476601
477- /*
478- * With USB3 handling more requests at a higher speed, we can't
479- * afford to generate an interrupt for every request. Decide to
480- * interrupt:
481- *
482- * - When no more requests are available in the free queue, as
483- * this may be our last chance to refill the endpoint's
484- * request queue.
485- *
486- * - When this is request is the last request for the video
487- * buffer, as we want to start sending the next video buffer
488- * ASAP in case it doesn't get started already in the next
489- * iteration of this loop.
490- *
491- * - Four times over the length of the requests queue (as
492- * indicated by video->uvc_num_requests), as a trade-off
493- * between latency and interrupt load.
494- */
495- if (list_empty (& video -> req_free ) || buf_done ||
496- !(video -> req_int_count %
497- DIV_ROUND_UP (video -> uvc_num_requests , 4 ))) {
498- video -> req_int_count = 0 ;
499- req -> no_interrupt = 0 ;
500- } else {
501- req -> no_interrupt = 1 ;
502- }
503-
504- /* Queue the USB request */
505- ret = uvcg_video_ep_queue (video , req );
506602 spin_unlock_irqrestore (& queue -> irqlock , flags );
507603
604+ spin_lock_irqsave (& video -> req_lock , flags );
605+ /* For bulk end points we queue from the worker thread
606+ * since we would preferably not want to wait on requests
607+ * to be ready, in the uvcg_video_complete() handler.
608+ * For isoc endpoints we add the request to the ready list
609+ * and only queue it to the endpoint from the complete handler.
610+ */
611+ ret = uvcg_video_usb_req_queue (video , req , is_bulk );
612+ spin_unlock_irqrestore (& video -> req_lock , flags );
613+
508614 if (ret < 0 ) {
509615 uvcg_queue_cancel (queue , 0 );
510616 break ;
511617 }
512618
513- /* Endpoint now owns the request */
619+ /* The request is owned by the endpoint / ready list. */
514620 req = NULL ;
515- video -> req_int_count ++ ;
516621 }
517622
518623 if (!req )
@@ -581,8 +686,14 @@ uvcg_video_disable(struct uvc_video *video)
581686 uvc_video_free_request (req -> context , video -> ep );
582687 }
583688
689+ list_for_each_entry_safe (req , temp , & video -> req_ready , list ) {
690+ list_del (& req -> list );
691+ uvc_video_free_request (req -> context , video -> ep );
692+ }
693+
584694 INIT_LIST_HEAD (& video -> ureqs );
585695 INIT_LIST_HEAD (& video -> req_free );
696+ INIT_LIST_HEAD (& video -> req_ready );
586697 video -> req_size = 0 ;
587698 spin_unlock_irqrestore (& video -> req_lock , flags );
588699
@@ -636,7 +747,7 @@ int uvcg_video_enable(struct uvc_video *video)
636747
637748 video -> req_int_count = 0 ;
638749
639- queue_work (video -> async_wq , & video -> pump );
750+ uvc_video_ep_queue_initial_requests (video );
640751
641752 return ret ;
642753}
@@ -649,6 +760,7 @@ int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
649760 video -> is_enabled = false;
650761 INIT_LIST_HEAD (& video -> ureqs );
651762 INIT_LIST_HEAD (& video -> req_free );
763+ INIT_LIST_HEAD (& video -> req_ready );
652764 spin_lock_init (& video -> req_lock );
653765 INIT_WORK (& video -> pump , uvcg_video_pump );
654766
0 commit comments