Skip to content

Commit

Permalink
io_uring: allow timeout/poll/files killing to take task into account
Browse files Browse the repository at this point in the history
commit 07d3ca52b0056f25eef61b1c896d089f8d365468 upstream.

We currently cancel these when the ring exits, and we cancel all of
them. This is in preparation for killing only the ones associated
with a given task.

Reviewed-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
axboe authored and gregkh committed Nov 1, 2020
1 parent 10cf2d8 commit 2c54017
Showing 1 changed file with 24 additions and 9 deletions.
33 changes: 24 additions & 9 deletions fs/io_uring.c
Expand Up @@ -1226,13 +1226,26 @@ static void io_kill_timeout(struct io_kiocb *req)
}
}

static void io_kill_timeouts(struct io_ring_ctx *ctx)
static bool io_task_match(struct io_kiocb *req, struct task_struct *tsk)
{
struct io_ring_ctx *ctx = req->ctx;

if (!tsk || req->task == tsk)
return true;
if ((ctx->flags & IORING_SETUP_SQPOLL) && req->task == ctx->sqo_thread)
return true;
return false;
}

static void io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk)
{
struct io_kiocb *req, *tmp;

spin_lock_irq(&ctx->completion_lock);
list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list)
io_kill_timeout(req);
list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
if (io_task_match(req, tsk))
io_kill_timeout(req);
}
spin_unlock_irq(&ctx->completion_lock);
}

Expand Down Expand Up @@ -5017,7 +5030,7 @@ static bool io_poll_remove_one(struct io_kiocb *req)
return do_complete;
}

static void io_poll_remove_all(struct io_ring_ctx *ctx)
static void io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk)
{
struct hlist_node *tmp;
struct io_kiocb *req;
Expand All @@ -5028,8 +5041,10 @@ static void io_poll_remove_all(struct io_ring_ctx *ctx)
struct hlist_head *list;

list = &ctx->cancel_hash[i];
hlist_for_each_entry_safe(req, tmp, list, hash_node)
posted += io_poll_remove_one(req);
hlist_for_each_entry_safe(req, tmp, list, hash_node) {
if (io_task_match(req, tsk))
posted += io_poll_remove_one(req);
}
}
spin_unlock_irq(&ctx->completion_lock);

Expand Down Expand Up @@ -7989,8 +8004,8 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
percpu_ref_kill(&ctx->refs);
mutex_unlock(&ctx->uring_lock);

io_kill_timeouts(ctx);
io_poll_remove_all(ctx);
io_kill_timeouts(ctx, NULL);
io_poll_remove_all(ctx, NULL);

if (ctx->io_wq)
io_wq_cancel_all(ctx->io_wq);
Expand Down Expand Up @@ -8221,7 +8236,7 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct task_struct *task = data;

return req->task == task;
return io_task_match(req, task);
}

static int io_uring_flush(struct file *file, void *data)
Expand Down

0 comments on commit 2c54017

Please sign in to comment.