Skip to content

Commit

Permalink
io_uring: deduplicate failing task_work_add
Browse files Browse the repository at this point in the history
commit eab30c4 upstream

When io_req_task_work_add() fails, the request will be cancelled by
enqueueing via task_works of io-wq. Extract a function for that.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
isilence authored and gregkh committed Mar 11, 2021
1 parent 24fcea3 commit 9673ff4
Showing 1 changed file with 17 additions and 29 deletions.
46 changes: 17 additions & 29 deletions fs/io_uring.c
Expand Up @@ -2172,6 +2172,16 @@ static int io_req_task_work_add(struct io_kiocb *req)
return ret;
}

static void io_req_task_work_add_fallback(struct io_kiocb *req,
void (*cb)(struct callback_head *))
{
struct task_struct *tsk = io_wq_get_task(req->ctx->io_wq);

init_task_work(&req->task_work, cb);
task_work_add(tsk, &req->task_work, TWA_NONE);
wake_up_process(tsk);
}

static void __io_req_task_cancel(struct io_kiocb *req, int error)
{
struct io_ring_ctx *ctx = req->ctx;
Expand Down Expand Up @@ -2229,14 +2239,8 @@ static void io_req_task_queue(struct io_kiocb *req)
percpu_ref_get(&req->ctx->refs);

ret = io_req_task_work_add(req);
if (unlikely(ret)) {
struct task_struct *tsk;

init_task_work(&req->task_work, io_req_task_cancel);
tsk = io_wq_get_task(req->ctx->io_wq);
task_work_add(tsk, &req->task_work, TWA_NONE);
wake_up_process(tsk);
}
if (unlikely(ret))
io_req_task_work_add_fallback(req, io_req_task_cancel);
}

static inline void io_queue_next(struct io_kiocb *req)
Expand Down Expand Up @@ -2354,13 +2358,8 @@ static void io_free_req_deferred(struct io_kiocb *req)

init_task_work(&req->task_work, io_put_req_deferred_cb);
ret = io_req_task_work_add(req);
if (unlikely(ret)) {
struct task_struct *tsk;

tsk = io_wq_get_task(req->ctx->io_wq);
task_work_add(tsk, &req->task_work, TWA_NONE);
wake_up_process(tsk);
}
if (unlikely(ret))
io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
}

static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
Expand Down Expand Up @@ -3439,15 +3438,8 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
/* submit ref gets dropped, acquire a new one */
refcount_inc(&req->refs);
ret = io_req_task_work_add(req);
if (unlikely(ret)) {
struct task_struct *tsk;

/* queue just for cancelation */
init_task_work(&req->task_work, io_req_task_cancel);
tsk = io_wq_get_task(req->ctx->io_wq);
task_work_add(tsk, &req->task_work, TWA_NONE);
wake_up_process(tsk);
}
if (unlikely(ret))
io_req_task_work_add_fallback(req, io_req_task_cancel);
return 1;
}

Expand Down Expand Up @@ -5159,12 +5151,8 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
*/
ret = io_req_task_work_add(req);
if (unlikely(ret)) {
struct task_struct *tsk;

WRITE_ONCE(poll->canceled, true);
tsk = io_wq_get_task(req->ctx->io_wq);
task_work_add(tsk, &req->task_work, TWA_NONE);
wake_up_process(tsk);
io_req_task_work_add_fallback(req, func);
}
return 1;
}
Expand Down

0 comments on commit 9673ff4

Please sign in to comment.