@@ -338,7 +338,6 @@ struct io_ring_ctx {
338338 unsigned int drain_next : 1 ;
339339 unsigned int eventfd_async : 1 ;
340340 unsigned int restricted : 1 ;
341- unsigned int sqo_dead : 1 ;
342341 unsigned int sqo_exec : 1 ;
343342
344343 /*
@@ -1967,7 +1966,7 @@ static void __io_req_task_submit(struct io_kiocb *req)
19671966
19681967 /* ctx stays valid until unlock, even if we drop all ours ctx->refs */
19691968 mutex_lock (& ctx -> uring_lock );
1970- if (!ctx -> sqo_dead && ! (current -> flags & PF_EXITING ) && !current -> in_execve )
1969+ if (!(current -> flags & PF_EXITING ) && !current -> in_execve )
19711970 __io_queue_sqe (req );
19721971 else
19731972 __io_req_task_cancel (req , - EFAULT );
@@ -6578,8 +6577,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
65786577 if (!list_empty (& ctx -> iopoll_list ))
65796578 io_do_iopoll (ctx , & nr_events , 0 );
65806579
6581- if (to_submit && !ctx -> sqo_dead &&
6582- likely (!percpu_ref_is_dying (& ctx -> refs )))
6580+ if (to_submit && likely (!percpu_ref_is_dying (& ctx -> refs )))
65836581 ret = io_submit_sqes (ctx , to_submit );
65846582 mutex_unlock (& ctx -> uring_lock );
65856583 }
@@ -7818,7 +7816,7 @@ static int io_sq_thread_fork(struct io_sq_data *sqd, struct io_ring_ctx *ctx)
78187816
78197817 clear_bit (IO_SQ_THREAD_SHOULD_STOP , & sqd -> state );
78207818 reinit_completion (& sqd -> completion );
7821- ctx -> sqo_dead = ctx -> sqo_exec = 0 ;
7819+ ctx -> sqo_exec = 0 ;
78227820 sqd -> task_pid = current -> pid ;
78237821 current -> flags |= PF_IO_WORKER ;
78247822 ret = io_wq_fork_thread (io_sq_thread , sqd );
@@ -8529,10 +8527,6 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
85298527{
85308528 mutex_lock (& ctx -> uring_lock );
85318529 percpu_ref_kill (& ctx -> refs );
8532-
8533- if (WARN_ON_ONCE ((ctx -> flags & IORING_SETUP_SQPOLL ) && !ctx -> sqo_dead ))
8534- ctx -> sqo_dead = 1 ;
8535-
85368530 /* if force is set, the ring is going away. always drop after that */
85378531 ctx -> cq_overflow_flushed = 1 ;
85388532 if (ctx -> rings )
@@ -8692,19 +8686,6 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
86928686 }
86938687}
86948688
8695- static void io_disable_sqo_submit (struct io_ring_ctx * ctx )
8696- {
8697- mutex_lock (& ctx -> uring_lock );
8698- ctx -> sqo_dead = 1 ;
8699- if (ctx -> flags & IORING_SETUP_R_DISABLED )
8700- io_sq_offload_start (ctx );
8701- mutex_unlock (& ctx -> uring_lock );
8702-
8703- /* make sure callers enter the ring to get error */
8704- if (ctx -> rings )
8705- io_ring_set_wakeup_flag (ctx );
8706- }
8707-
87088689/*
87098690 * We need to iteratively cancel requests, in case a request has dependent
87108691 * hard links. These persist even for failure of cancelations, hence keep
@@ -8717,7 +8698,11 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
87178698 bool did_park = false;
87188699
87198700 if ((ctx -> flags & IORING_SETUP_SQPOLL ) && ctx -> sq_data ) {
8720- io_disable_sqo_submit (ctx );
8701+ /* never started, nothing to cancel */
8702+ if (ctx -> flags & IORING_SETUP_R_DISABLED ) {
8703+ io_sq_offload_start (ctx );
8704+ return ;
8705+ }
87218706 did_park = io_sq_thread_park (ctx -> sq_data );
87228707 if (did_park ) {
87238708 task = ctx -> sq_data -> thread ;
@@ -8838,7 +8823,6 @@ static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
88388823
88398824 if (!sqd )
88408825 return ;
8841- io_disable_sqo_submit (ctx );
88428826 if (!io_sq_thread_park (sqd ))
88438827 return ;
88448828 tctx = ctx -> sq_data -> thread -> io_uring ;
@@ -8883,7 +8867,6 @@ void __io_uring_task_cancel(void)
88838867 /* make sure overflow events are dropped */
88848868 atomic_inc (& tctx -> in_idle );
88858869
8886- /* trigger io_disable_sqo_submit() */
88878870 if (tctx -> sqpoll ) {
88888871 struct file * file ;
88898872 unsigned long index ;
@@ -8996,22 +8979,14 @@ static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
89968979 do {
89978980 if (!io_sqring_full (ctx ))
89988981 break ;
8999-
90008982 prepare_to_wait (& ctx -> sqo_sq_wait , & wait , TASK_INTERRUPTIBLE );
90018983
9002- if (unlikely (ctx -> sqo_dead )) {
9003- ret = - EOWNERDEAD ;
9004- goto out ;
9005- }
9006-
90078984 if (!io_sqring_full (ctx ))
90088985 break ;
9009-
90108986 schedule ();
90118987 } while (!signal_pending (current ));
90128988
90138989 finish_wait (& ctx -> sqo_sq_wait , & wait );
9014- out :
90158990 return ret ;
90168991}
90178992
@@ -9093,8 +9068,6 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
90939068 ctx -> sqo_exec = 0 ;
90949069 }
90959070 ret = - EOWNERDEAD ;
9096- if (unlikely (ctx -> sqo_dead ))
9097- goto out ;
90989071 if (flags & IORING_ENTER_SQ_WAKEUP )
90999072 wake_up (& ctx -> sq_data -> wait );
91009073 if (flags & IORING_ENTER_SQ_WAIT ) {
@@ -9466,7 +9439,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
94669439 */
94679440 ret = io_uring_install_fd (ctx , file );
94689441 if (ret < 0 ) {
9469- io_disable_sqo_submit (ctx );
94709442 /* fput will clean it up */
94719443 fput (file );
94729444 return ret ;
@@ -9475,7 +9447,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
94759447 trace_io_uring_create (ret , ctx , p -> sq_entries , p -> cq_entries , p -> flags );
94769448 return ret ;
94779449err :
9478- io_disable_sqo_submit (ctx );
94799450 io_ring_ctx_wait_and_kill (ctx );
94809451 return ret ;
94819452}
0 commit comments