Skip to content

Commit 3e6a0d3

Browse files
committed
io_uring: fix -EAGAIN retry with IOPOLL
We no longer revert the iovec on -EIOCBQUEUED, see commit ab2125d, and this started causing issues for IOPOLL on devies that run out of request slots. Turns out what outside of needing a revert for those, we also had a bug where we didn't properly setup retry inside the submission path. That could cause re-import of the iovec, if any, and that could lead to spurious results if the application had those allocated on the stack. Catch -EAGAIN retry and make the iovec stable for IOPOLL, just like we do for !IOPOLL retries. Cc: <stable@vger.kernel.org> # 5.9+ Reported-by: Abaci Robot <abaci@linux.alibaba.com> Reported-by: Xiaoguang Wang <xiaoguang.wang@linux.alibaba.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent dc7bbc9 commit 3e6a0d3

File tree

1 file changed

+31
-5
lines changed

1 file changed

+31
-5
lines changed

fs/io_uring.c

Lines changed: 31 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2423,23 +2423,32 @@ static bool io_resubmit_prep(struct io_kiocb *req)
24232423
return false;
24242424
return !io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
24252425
}
2426-
#endif
24272426

2428-
static bool io_rw_reissue(struct io_kiocb *req)
2427+
static bool io_rw_should_reissue(struct io_kiocb *req)
24292428
{
2430-
#ifdef CONFIG_BLOCK
24312429
umode_t mode = file_inode(req->file)->i_mode;
2430+
struct io_ring_ctx *ctx = req->ctx;
24322431

24332432
if (!S_ISBLK(mode) && !S_ISREG(mode))
24342433
return false;
2435-
if ((req->flags & REQ_F_NOWAIT) || io_wq_current_is_worker())
2434+
if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
2435+
!(ctx->flags & IORING_SETUP_IOPOLL)))
24362436
return false;
24372437
/*
24382438
* If ref is dying, we might be running poll reap from the exit work.
24392439
* Don't attempt to reissue from that path, just let it fail with
24402440
* -EAGAIN.
24412441
*/
2442-
if (percpu_ref_is_dying(&req->ctx->refs))
2442+
if (percpu_ref_is_dying(&ctx->refs))
2443+
return false;
2444+
return true;
2445+
}
2446+
#endif
2447+
2448+
static bool io_rw_reissue(struct io_kiocb *req)
2449+
{
2450+
#ifdef CONFIG_BLOCK
2451+
if (!io_rw_should_reissue(req))
24432452
return false;
24442453

24452454
lockdep_assert_held(&req->ctx->uring_lock);
@@ -2482,6 +2491,19 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
24822491
{
24832492
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
24842493

2494+
#ifdef CONFIG_BLOCK
2495+
/* Rewind iter, if we have one. iopoll path resubmits as usual */
2496+
if (res == -EAGAIN && io_rw_should_reissue(req)) {
2497+
struct io_async_rw *rw = req->async_data;
2498+
2499+
if (rw)
2500+
iov_iter_revert(&rw->iter,
2501+
req->result - iov_iter_count(&rw->iter));
2502+
else if (!io_resubmit_prep(req))
2503+
res = -EIO;
2504+
}
2505+
#endif
2506+
24852507
if (kiocb->ki_flags & IOCB_WRITE)
24862508
kiocb_end_write(req);
24872509

@@ -3230,6 +3252,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
32303252
ret = io_iter_do_read(req, iter);
32313253

32323254
if (ret == -EIOCBQUEUED) {
3255+
if (req->async_data)
3256+
iov_iter_revert(iter, io_size - iov_iter_count(iter));
32333257
goto out_free;
32343258
} else if (ret == -EAGAIN) {
32353259
/* IOPOLL retry should happen for io-wq threads */
@@ -3361,6 +3385,8 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
33613385
/* no retry on NONBLOCK nor RWF_NOWAIT */
33623386
if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
33633387
goto done;
3388+
if (ret2 == -EIOCBQUEUED && req->async_data)
3389+
iov_iter_revert(iter, io_size - iov_iter_count(iter));
33643390
if (!force_nonblock || ret2 != -EAGAIN) {
33653391
/* IOPOLL retry should happen for io-wq threads */
33663392
if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)

0 commit comments

Comments
 (0)