From 49250f33bb436a29387f80cc64d1f40eba1ae19e Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 9 Feb 2021 04:47:37 +0000 Subject: [PATCH] io_uring: don't iterate io_uring_cancel_files() [ Upstream commit b52fda00dd9df8b4a6de5784df94f9617f6133a1 ] io_uring_cancel_files() guarantees to cancel all matching requests, that's not necessary to do that in a loop. Move it up in the callchain into io_uring_cancel_task_requests(). Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- fs/io_uring.c | 34 ++++++++++++---------------------- 1 file changed, 12 insertions(+), 22 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 8e9a345ab8a35..9faa1cb961dbe 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -8654,16 +8654,10 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx, } } -/* - * Returns true if we found and killed one or more files pinning requests - */ -static bool io_uring_cancel_files(struct io_ring_ctx *ctx, +static void io_uring_cancel_files(struct io_ring_ctx *ctx, struct task_struct *task, struct files_struct *files) { - if (list_empty_careful(&ctx->inflight_list)) - return false; - while (!list_empty_careful(&ctx->inflight_list)) { struct io_kiocb *cancel_req = NULL, *req; DEFINE_WAIT(wait); @@ -8698,8 +8692,6 @@ static bool io_uring_cancel_files(struct io_ring_ctx *ctx, schedule(); finish_wait(&ctx->inflight_wait, &wait); } - - return true; } static bool io_cancel_task_cb(struct io_wq_work *work, void *data) @@ -8710,15 +8702,12 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data) return io_task_match(req, task); } -static bool __io_uring_cancel_task_requests(struct io_ring_ctx *ctx, - struct task_struct *task, - struct files_struct *files) +static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx, + struct task_struct *task) { - bool ret; - - ret = io_uring_cancel_files(ctx, task, files); - if (!files) { + while (1) { enum io_wq_cancel cret; + bool ret = false; cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, task, true); if (cret != IO_WQ_CANCEL_NOTFOUND) @@ -8734,9 +8723,11 @@ static bool __io_uring_cancel_task_requests(struct io_ring_ctx *ctx, ret |= io_poll_remove_all(ctx, task); ret |= io_kill_timeouts(ctx, task); + if (!ret) + break; + io_run_task_work(); + cond_resched(); } - - return ret; } static void io_disable_sqo_submit(struct io_ring_ctx *ctx) @@ -8771,11 +8762,10 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx, io_cancel_defer_files(ctx, task, files); io_cqring_overflow_flush(ctx, true, task, files); + io_uring_cancel_files(ctx, task, files); - while (__io_uring_cancel_task_requests(ctx, task, files)) { - io_run_task_work(); - cond_resched(); - } + if (!files) + __io_uring_cancel_task_requests(ctx, task); if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) { atomic_dec(&task->io_uring->in_idle);