Skip to content

Commit 4e32635

Browse files
isilenceaxboe
authored andcommitted
io_uring: optimise SQPOLL mm/files grabbing
There are two reasons for this. First is to optimise io_sq_thread_acquire_mm_files() for non-SQPOLL case, which currently do too many checks and function calls in the hot path, e.g. in io_init_req(). The second is to not grab mm/files when there are not needed. As __io_queue_sqe() issues only one request now, we can reuse io_sq_thread_acquire_mm_files() instead of unconditional acquire mm/files. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent d3d7298 commit 4e32635

File tree

1 file changed

+13
-14
lines changed

1 file changed

+13
-14
lines changed

fs/io_uring.c

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1145,9 +1145,6 @@ static void io_sq_thread_drop_mm_files(void)
11451145

11461146
static int __io_sq_thread_acquire_files(struct io_ring_ctx *ctx)
11471147
{
1148-
if (current->flags & PF_EXITING)
1149-
return -EFAULT;
1150-
11511148
if (!current->files) {
11521149
struct files_struct *files;
11531150
struct nsproxy *nsproxy;
@@ -1175,15 +1172,9 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
11751172
{
11761173
struct mm_struct *mm;
11771174

1178-
if (current->flags & PF_EXITING)
1179-
return -EFAULT;
11801175
if (current->mm)
11811176
return 0;
11821177

1183-
/* Should never happen */
1184-
if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL)))
1185-
return -EFAULT;
1186-
11871178
task_lock(ctx->sqo_task);
11881179
mm = ctx->sqo_task->mm;
11891180
if (unlikely(!mm || !mmget_not_zero(mm)))
@@ -1198,8 +1189,8 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
11981189
return -EFAULT;
11991190
}
12001191

1201-
static int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
1202-
struct io_kiocb *req)
1192+
static int __io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
1193+
struct io_kiocb *req)
12031194
{
12041195
const struct io_op_def *def = &io_op_defs[req->opcode];
12051196
int ret;
@@ -1219,6 +1210,16 @@ static int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
12191210
return 0;
12201211
}
12211212

1213+
static inline int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
1214+
struct io_kiocb *req)
1215+
{
1216+
if (unlikely(current->flags & PF_EXITING))
1217+
return -EFAULT;
1218+
if (!(ctx->flags & IORING_SETUP_SQPOLL))
1219+
return 0;
1220+
return __io_sq_thread_acquire_mm_files(ctx, req);
1221+
}
1222+
12221223
static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx,
12231224
struct cgroup_subsys_state **cur_css)
12241225

@@ -2336,9 +2337,7 @@ static void __io_req_task_submit(struct io_kiocb *req)
23362337
struct io_ring_ctx *ctx = req->ctx;
23372338

23382339
mutex_lock(&ctx->uring_lock);
2339-
if (!ctx->sqo_dead &&
2340-
!__io_sq_thread_acquire_mm(ctx) &&
2341-
!__io_sq_thread_acquire_files(ctx))
2340+
if (!ctx->sqo_dead && !io_sq_thread_acquire_mm_files(ctx, req))
23422341
__io_queue_sqe(req);
23432342
else
23442343
__io_req_task_cancel(req, -EFAULT);

0 commit comments

Comments
 (0)