Skip to content

Commit dbc2564

Browse files
Hao Xuaxboe
authored andcommitted
io_uring: let fast poll support multishot
For operations like accept, multishot is a useful feature, since we can reduce a number of accept sqe. Let's integrate it to fast poll, it may be good for other operations in the future. Signed-off-by: Hao Xu <howeyxu@tencent.com> Link: https://lore.kernel.org/r/20220514142046.58072-4-haoxu.linux@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 227685e commit dbc2564

File tree

1 file changed

+32
-15
lines changed

1 file changed

+32
-15
lines changed

fs/io_uring.c

Lines changed: 32 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -6011,6 +6011,7 @@ static void io_poll_remove_entries(struct io_kiocb *req)
60116011
rcu_read_unlock();
60126012
}
60136013

6014+
static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags);
60146015
/*
60156016
* All poll tw should go through this. Checks for poll events, manages
60166017
* references, does rewait, etc.
@@ -6019,10 +6020,10 @@ static void io_poll_remove_entries(struct io_kiocb *req)
60196020
* either spurious wakeup or multishot CQE is served. 0 when it's done with
60206021
* the request, then the mask is stored in req->cqe.res.
60216022
*/
6022-
static int io_poll_check_events(struct io_kiocb *req, bool locked)
6023+
static int io_poll_check_events(struct io_kiocb *req, bool *locked)
60236024
{
60246025
struct io_ring_ctx *ctx = req->ctx;
6025-
int v;
6026+
int v, ret;
60266027

60276028
/* req->task == current here, checking PF_EXITING is safe */
60286029
if (unlikely(req->task->flags & PF_EXITING))
@@ -6046,23 +6047,37 @@ static int io_poll_check_events(struct io_kiocb *req, bool locked)
60466047
req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
60476048
}
60486049

6049-
/* multishot, just fill an CQE and proceed */
6050-
if (req->cqe.res && !(req->apoll_events & EPOLLONESHOT)) {
6051-
__poll_t mask = mangle_poll(req->cqe.res & req->apoll_events);
6050+
if ((unlikely(!req->cqe.res)))
6051+
continue;
6052+
if (req->apoll_events & EPOLLONESHOT)
6053+
return 0;
6054+
6055+
/* multishot, just fill a CQE and proceed */
6056+
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
6057+
__poll_t mask = mangle_poll(req->cqe.res &
6058+
req->apoll_events);
60526059
bool filled;
60536060

60546061
spin_lock(&ctx->completion_lock);
6055-
filled = io_fill_cqe_aux(ctx, req->cqe.user_data, mask,
6056-
IORING_CQE_F_MORE);
6062+
filled = io_fill_cqe_aux(ctx, req->cqe.user_data,
6063+
mask, IORING_CQE_F_MORE);
60576064
io_commit_cqring(ctx);
60586065
spin_unlock(&ctx->completion_lock);
6059-
if (unlikely(!filled))
6060-
return -ECANCELED;
6061-
io_cqring_ev_posted(ctx);
6062-
} else if (req->cqe.res) {
6063-
return 0;
6066+
if (filled) {
6067+
io_cqring_ev_posted(ctx);
6068+
continue;
6069+
}
6070+
return -ECANCELED;
60646071
}
60656072

6073+
io_tw_lock(req->ctx, locked);
6074+
if (unlikely(req->task->flags & PF_EXITING))
6075+
return -EFAULT;
6076+
ret = io_issue_sqe(req,
6077+
IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
6078+
if (ret)
6079+
return ret;
6080+
60666081
/*
60676082
* Release all references, retry if someone tried to restart
60686083
* task_work while we were executing it.
@@ -6077,7 +6092,7 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
60776092
struct io_ring_ctx *ctx = req->ctx;
60786093
int ret;
60796094

6080-
ret = io_poll_check_events(req, *locked);
6095+
ret = io_poll_check_events(req, locked);
60816096
if (ret > 0)
60826097
return;
60836098

@@ -6102,7 +6117,7 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
61026117
struct io_ring_ctx *ctx = req->ctx;
61036118
int ret;
61046119

6105-
ret = io_poll_check_events(req, *locked);
6120+
ret = io_poll_check_events(req, locked);
61066121
if (ret > 0)
61076122
return;
61086123

@@ -6343,7 +6358,7 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
63436358
struct io_ring_ctx *ctx = req->ctx;
63446359
struct async_poll *apoll;
63456360
struct io_poll_table ipt;
6346-
__poll_t mask = IO_ASYNC_POLL_COMMON | POLLERR;
6361+
__poll_t mask = POLLPRI | POLLERR;
63476362
int ret;
63486363

63496364
if (!def->pollin && !def->pollout)
@@ -6352,6 +6367,8 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
63526367
return IO_APOLL_ABORTED;
63536368
if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
63546369
return IO_APOLL_ABORTED;
6370+
if (!(req->flags & REQ_F_APOLL_MULTISHOT))
6371+
mask |= EPOLLONESHOT;
63556372

63566373
if (def->pollin) {
63576374
mask |= POLLIN | POLLRDNORM;

0 commit comments

Comments
 (0)