@@ -24,7 +24,6 @@ struct io_rsrc_update {
2424};
2525
2626static void io_rsrc_buf_put (struct io_ring_ctx * ctx , struct io_rsrc_put * prsrc );
27- static void io_rsrc_file_put (struct io_ring_ctx * ctx , struct io_rsrc_put * prsrc );
2827static int io_sqe_buffer_register (struct io_ring_ctx * ctx , struct iovec * iov ,
2928 struct io_mapped_ubuf * * pimu ,
3029 struct page * * last_hpage );
@@ -157,7 +156,7 @@ static void io_rsrc_put_work(struct io_rsrc_node *node)
157156
158157 switch (node -> type ) {
159158 case IORING_RSRC_FILE :
160- io_rsrc_file_put ( node -> ctx , prsrc );
159+ fput ( prsrc -> file );
161160 break ;
162161 case IORING_RSRC_BUFFER :
163162 io_rsrc_buf_put (node -> ctx , prsrc );
@@ -402,23 +401,13 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
402401 break ;
403402 }
404403 /*
405- * Don't allow io_uring instances to be registered. If
406- * UNIX isn't enabled, then this causes a reference
407- * cycle and this instance can never get freed. If UNIX
408- * is enabled we'll handle it just fine, but there's
409- * still no point in allowing a ring fd as it doesn't
410- * support regular read/write anyway.
404+ * Don't allow io_uring instances to be registered.
411405 */
412406 if (io_is_uring_fops (file )) {
413407 fput (file );
414408 err = - EBADF ;
415409 break ;
416410 }
417- err = io_scm_file_account (ctx , file );
418- if (err ) {
419- fput (file );
420- break ;
421- }
422411 * io_get_tag_slot (data , i ) = tag ;
423412 io_fixed_file_set (file_slot , file );
424413 io_file_bitmap_set (& ctx -> file_table , i );
@@ -675,22 +664,12 @@ void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
675664 for (i = 0 ; i < ctx -> nr_user_files ; i ++ ) {
676665 struct file * file = io_file_from_index (& ctx -> file_table , i );
677666
678- /* skip scm accounted files, they'll be freed by ->ring_sock */
679- if (!file || io_file_need_scm (file ))
667+ if (!file )
680668 continue ;
681669 io_file_bitmap_clear (& ctx -> file_table , i );
682670 fput (file );
683671 }
684672
685- #if defined(CONFIG_UNIX )
686- if (ctx -> ring_sock ) {
687- struct sock * sock = ctx -> ring_sock -> sk ;
688- struct sk_buff * skb ;
689-
690- while ((skb = skb_dequeue (& sock -> sk_receive_queue )) != NULL )
691- kfree_skb (skb );
692- }
693- #endif
694673 io_free_file_tables (& ctx -> file_table );
695674 io_file_table_set_alloc_range (ctx , 0 , 0 );
696675 io_rsrc_data_free (ctx -> file_data );
@@ -718,137 +697,6 @@ int io_sqe_files_unregister(struct io_ring_ctx *ctx)
718697 return ret ;
719698}
720699
721- /*
722- * Ensure the UNIX gc is aware of our file set, so we are certain that
723- * the io_uring can be safely unregistered on process exit, even if we have
724- * loops in the file referencing. We account only files that can hold other
725- * files because otherwise they can't form a loop and so are not interesting
726- * for GC.
727- */
728- int __io_scm_file_account (struct io_ring_ctx * ctx , struct file * file )
729- {
730- #if defined(CONFIG_UNIX )
731- struct sock * sk = ctx -> ring_sock -> sk ;
732- struct sk_buff_head * head = & sk -> sk_receive_queue ;
733- struct scm_fp_list * fpl ;
734- struct sk_buff * skb ;
735-
736- if (likely (!io_file_need_scm (file )))
737- return 0 ;
738-
739- /*
740- * See if we can merge this file into an existing skb SCM_RIGHTS
741- * file set. If there's no room, fall back to allocating a new skb
742- * and filling it in.
743- */
744- spin_lock_irq (& head -> lock );
745- skb = skb_peek (head );
746- if (skb && UNIXCB (skb ).fp -> count < SCM_MAX_FD )
747- __skb_unlink (skb , head );
748- else
749- skb = NULL ;
750- spin_unlock_irq (& head -> lock );
751-
752- if (!skb ) {
753- fpl = kzalloc (sizeof (* fpl ), GFP_KERNEL );
754- if (!fpl )
755- return - ENOMEM ;
756-
757- skb = alloc_skb (0 , GFP_KERNEL );
758- if (!skb ) {
759- kfree (fpl );
760- return - ENOMEM ;
761- }
762-
763- fpl -> user = get_uid (current_user ());
764- fpl -> max = SCM_MAX_FD ;
765- fpl -> count = 0 ;
766-
767- UNIXCB (skb ).fp = fpl ;
768- skb -> sk = sk ;
769- skb -> destructor = io_uring_destruct_scm ;
770- refcount_add (skb -> truesize , & sk -> sk_wmem_alloc );
771- }
772-
773- fpl = UNIXCB (skb ).fp ;
774- fpl -> fp [fpl -> count ++ ] = get_file (file );
775- unix_inflight (fpl -> user , file );
776- skb_queue_head (head , skb );
777- fput (file );
778- #endif
779- return 0 ;
780- }
781-
782- static __cold void io_rsrc_file_scm_put (struct io_ring_ctx * ctx , struct file * file )
783- {
784- #if defined(CONFIG_UNIX )
785- struct sock * sock = ctx -> ring_sock -> sk ;
786- struct sk_buff_head list , * head = & sock -> sk_receive_queue ;
787- struct sk_buff * skb ;
788- int i ;
789-
790- __skb_queue_head_init (& list );
791-
792- /*
793- * Find the skb that holds this file in its SCM_RIGHTS. When found,
794- * remove this entry and rearrange the file array.
795- */
796- skb = skb_dequeue (head );
797- while (skb ) {
798- struct scm_fp_list * fp ;
799-
800- fp = UNIXCB (skb ).fp ;
801- for (i = 0 ; i < fp -> count ; i ++ ) {
802- int left ;
803-
804- if (fp -> fp [i ] != file )
805- continue ;
806-
807- unix_notinflight (fp -> user , fp -> fp [i ]);
808- left = fp -> count - 1 - i ;
809- if (left ) {
810- memmove (& fp -> fp [i ], & fp -> fp [i + 1 ],
811- left * sizeof (struct file * ));
812- }
813- fp -> count -- ;
814- if (!fp -> count ) {
815- kfree_skb (skb );
816- skb = NULL ;
817- } else {
818- __skb_queue_tail (& list , skb );
819- }
820- fput (file );
821- file = NULL ;
822- break ;
823- }
824-
825- if (!file )
826- break ;
827-
828- __skb_queue_tail (& list , skb );
829-
830- skb = skb_dequeue (head );
831- }
832-
833- if (skb_peek (& list )) {
834- spin_lock_irq (& head -> lock );
835- while ((skb = __skb_dequeue (& list )) != NULL )
836- __skb_queue_tail (head , skb );
837- spin_unlock_irq (& head -> lock );
838- }
839- #endif
840- }
841-
842- static void io_rsrc_file_put (struct io_ring_ctx * ctx , struct io_rsrc_put * prsrc )
843- {
844- struct file * file = prsrc -> file ;
845-
846- if (likely (!io_file_need_scm (file )))
847- fput (file );
848- else
849- io_rsrc_file_scm_put (ctx , file );
850- }
851-
852700int io_sqe_files_register (struct io_ring_ctx * ctx , void __user * arg ,
853701 unsigned nr_args , u64 __user * tags )
854702{
@@ -897,21 +745,12 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
897745 goto fail ;
898746
899747 /*
900- * Don't allow io_uring instances to be registered. If UNIX
901- * isn't enabled, then this causes a reference cycle and this
902- * instance can never get freed. If UNIX is enabled we'll
903- * handle it just fine, but there's still no point in allowing
904- * a ring fd as it doesn't support regular read/write anyway.
748+ * Don't allow io_uring instances to be registered.
905749 */
906750 if (io_is_uring_fops (file )) {
907751 fput (file );
908752 goto fail ;
909753 }
910- ret = io_scm_file_account (ctx , file );
911- if (ret ) {
912- fput (file );
913- goto fail ;
914- }
915754 file_slot = io_fixed_file_slot (& ctx -> file_table , i );
916755 io_fixed_file_set (file_slot , file );
917756 io_file_bitmap_set (& ctx -> file_table , i );
0 commit comments