Skip to content

Commit

Permalink
io_uring: clear opcode specific data for an early failure
Browse files Browse the repository at this point in the history
[ Upstream commit e21e1c4 ]

If failure happens before the opcode prep handler is called, ensure that
we clear the opcode specific area of the request, which holds data
specific to that request type. This prevents errors where opcode
handlers either don't get to clear per-request private data since prep
isn't even called.

Reported-and-tested-by: syzbot+f8e9a371388aa62ecab4@syzkaller.appspotmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
  • Loading branch information
axboe authored and gregkh committed Apr 13, 2024
1 parent c818cb6 commit cb1cd17
Showing 1 changed file with 16 additions and 9 deletions.
25 changes: 16 additions & 9 deletions io_uring/io_uring.c
Expand Up @@ -2143,6 +2143,13 @@ static void io_init_req_drain(struct io_kiocb *req)
}
}

static __cold int io_init_fail_req(struct io_kiocb *req, int err)
{
/* ensure per-opcode data is cleared if we fail before prep */
memset(&req->cmd.data, 0, sizeof(req->cmd.data));
return err;
}

static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
const struct io_uring_sqe *sqe)
__must_hold(&ctx->uring_lock)
Expand All @@ -2163,29 +2170,29 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,

if (unlikely(opcode >= IORING_OP_LAST)) {
req->opcode = 0;
return -EINVAL;
return io_init_fail_req(req, -EINVAL);
}
def = &io_issue_defs[opcode];
if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
/* enforce forwards compatibility on users */
if (sqe_flags & ~SQE_VALID_FLAGS)
return -EINVAL;
return io_init_fail_req(req, -EINVAL);
if (sqe_flags & IOSQE_BUFFER_SELECT) {
if (!def->buffer_select)
return -EOPNOTSUPP;
return io_init_fail_req(req, -EOPNOTSUPP);
req->buf_index = READ_ONCE(sqe->buf_group);
}
if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
ctx->drain_disabled = true;
if (sqe_flags & IOSQE_IO_DRAIN) {
if (ctx->drain_disabled)
return -EOPNOTSUPP;
return io_init_fail_req(req, -EOPNOTSUPP);
io_init_req_drain(req);
}
}
if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
return -EACCES;
return io_init_fail_req(req, -EACCES);
/* knock it to the slow queue path, will be drained there */
if (ctx->drain_active)
req->flags |= REQ_F_FORCE_ASYNC;
Expand All @@ -2198,9 +2205,9 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
}

if (!def->ioprio && sqe->ioprio)
return -EINVAL;
return io_init_fail_req(req, -EINVAL);
if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
return io_init_fail_req(req, -EINVAL);

if (def->needs_file) {
struct io_submit_state *state = &ctx->submit_state;
Expand All @@ -2224,12 +2231,12 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,

req->creds = xa_load(&ctx->personalities, personality);
if (!req->creds)
return -EINVAL;
return io_init_fail_req(req, -EINVAL);
get_cred(req->creds);
ret = security_uring_override_creds(req->creds);
if (ret) {
put_cred(req->creds);
return ret;
return io_init_fail_req(req, ret);
}
req->flags |= REQ_F_CREDS;
}
Expand Down

0 comments on commit cb1cd17

Please sign in to comment.