From 16eadf8979564c53a5a9aaa0203ee87a034cc59d Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sun, 16 May 2021 22:58:12 +0100 Subject: [PATCH] io_uring: don't bounce submit_state cachelines [ Upstream commit d0acdee296d42e700c16271d9f95085a9c897a53 ] struct io_submit_state contains struct io_comp_state and so locked_free_*, that renders cachelines around ->locked_free* being invalidated on most non-inline completions, that may terrorise caches if submissions and completions are done by different tasks. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/290cb5412b76892e8631978ee8ab9db0c6290dd5.1621201931.git.asml.silence@gmail.com Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin --- fs/io_uring.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 5d685c92b8fd5..bf3566ff95165 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -299,11 +299,8 @@ struct io_sq_data { struct io_comp_state { struct io_kiocb *reqs[IO_COMPL_BATCH]; unsigned int nr; - unsigned int locked_free_nr; /* inline/task_work completion list, under ->uring_lock */ struct list_head free_list; - /* IRQ completion list, under ->completion_lock */ - struct list_head locked_free_list; }; struct io_submit_link { @@ -382,6 +379,9 @@ struct io_ring_ctx { } ____cacheline_aligned_in_smp; struct io_submit_state submit_state; + /* IRQ completion list, under ->completion_lock */ + struct list_head locked_free_list; + unsigned int locked_free_nr; struct io_rings *rings; @@ -1193,7 +1193,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) init_llist_head(&ctx->rsrc_put_llist); INIT_LIST_HEAD(&ctx->tctx_list); INIT_LIST_HEAD(&ctx->submit_state.comp.free_list); - INIT_LIST_HEAD(&ctx->submit_state.comp.locked_free_list); + INIT_LIST_HEAD(&ctx->locked_free_list); return ctx; err: kfree(ctx->dummy_ubuf); @@ -1590,8 +1590,6 @@ static void io_req_complete_post(struct io_kiocb *req, long res, * free_list cache. */ if (req_ref_put_and_test(req)) { - struct io_comp_state *cs = &ctx->submit_state.comp; - if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL_LINK)) io_disarm_next(req); @@ -1602,8 +1600,8 @@ static void io_req_complete_post(struct io_kiocb *req, long res, } io_dismantle_req(req); io_put_task(req->task, 1); - list_add(&req->compl.list, &cs->locked_free_list); - cs->locked_free_nr++; + list_add(&req->compl.list, &ctx->locked_free_list); + ctx->locked_free_nr++; } else { if (!percpu_ref_tryget(&ctx->refs)) req = NULL; @@ -1658,8 +1656,8 @@ static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx, struct io_comp_state *cs) { spin_lock_irq(&ctx->completion_lock); - list_splice_init(&cs->locked_free_list, &cs->free_list); - cs->locked_free_nr = 0; + list_splice_init(&ctx->locked_free_list, &cs->free_list); + ctx->locked_free_nr = 0; spin_unlock_irq(&ctx->completion_lock); } @@ -1675,7 +1673,7 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx) * locked cache, grab the lock and move them over to our submission * side cache. */ - if (READ_ONCE(cs->locked_free_nr) > IO_COMPL_BATCH) + if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH) io_flush_cached_locked_reqs(ctx, cs); nr = state->free_reqs;