Skip to content

Commit 07471df

Browse files
axboegregkh
authored andcommitted
io_uring: include dying ring in task_work "should cancel" state
Commit 3539b14 upstream. When running task_work for an exiting task, rather than perform the issue retry attempt, the task_work is canceled. However, this isn't done for a ring that has been closed. This can lead to requests being successfully completed post the ring being closed, which is somewhat confusing and surprising to an application. Rather than just check the task exit state, also include the ring ref state in deciding whether or not to terminate a given request when run from task_work. Cc: stable@vger.kernel.org # 6.1+ Link: axboe/liburing#1459 Reported-by: Benedek Thaler <thaler@thaler.hu> Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 9d3489b commit 07471df

File tree

4 files changed

+12
-8
lines changed

4 files changed

+12
-8
lines changed

io_uring/io_uring.c

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1248,8 +1248,10 @@ static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
12481248

12491249
void io_req_task_submit(struct io_kiocb *req, bool *locked)
12501250
{
1251-
io_tw_lock(req->ctx, locked);
1252-
if (likely(!io_should_terminate_tw()))
1251+
struct io_ring_ctx *ctx = req->ctx;
1252+
1253+
io_tw_lock(ctx, locked);
1254+
if (likely(!io_should_terminate_tw(ctx)))
12531255
io_queue_sqe(req);
12541256
else
12551257
io_req_complete_failed(req, -EFAULT);
@@ -1771,8 +1773,10 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
17711773

17721774
int io_poll_issue(struct io_kiocb *req, bool *locked)
17731775
{
1774-
io_tw_lock(req->ctx, locked);
1775-
if (unlikely(io_should_terminate_tw()))
1776+
struct io_ring_ctx *ctx = req->ctx;
1777+
1778+
io_tw_lock(ctx, locked);
1779+
if (unlikely(io_should_terminate_tw(ctx)))
17761780
return -EFAULT;
17771781
return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT);
17781782
}

io_uring/io_uring.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -403,9 +403,9 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
403403
* 2) PF_KTHREAD is set, in which case the invoker of the task_work is
404404
* our fallback task_work.
405405
*/
406-
static inline bool io_should_terminate_tw(void)
406+
static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx)
407407
{
408-
return current->flags & (PF_KTHREAD | PF_EXITING);
408+
return (current->flags & (PF_KTHREAD | PF_EXITING)) || percpu_ref_is_dying(&ctx->refs);
409409
}
410410

411411
static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)

io_uring/poll.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,7 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
241241
struct io_ring_ctx *ctx = req->ctx;
242242
int v;
243243

244-
if (unlikely(io_should_terminate_tw()))
244+
if (unlikely(io_should_terminate_tw(ctx)))
245245
return -ECANCELED;
246246

247247
do {

io_uring/timeout.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,7 @@ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
275275
int ret = -ENOENT;
276276

277277
if (prev) {
278-
if (!io_should_terminate_tw()) {
278+
if (!io_should_terminate_tw(req->ctx)) {
279279
struct io_cancel_data cd = {
280280
.ctx = req->ctx,
281281
.data = prev->cqe.user_data,

0 commit comments

Comments
 (0)