Skip to content

Commit 390a61d

Browse files
axboegregkh
authored andcommitted
io_uring/kbuf: always use READ_ONCE() to read ring provided buffer lengths
[ Upstream commit 98b6fa6 ] Since the buffers are mapped from userspace, it is prudent to use READ_ONCE() to read the value into a local variable, and use that for any other actions taken. Having a stable read of the buffer length avoids worrying about it changing after checking, or being read multiple times. Similarly, the buffer may well change in between it being picked and being committed. Ensure the looping for incremental ring buffer commit stops if it hits a zero sized buffer, as no further progress can be made at that point. Fixes: ae98dbf ("io_uring/kbuf: add support for incremental buffer consumption") Link: https://lore.kernel.org/io-uring/tencent_000C02641F6250C856D0C26228DE29A3D30A@qq.com/ Reported-by: Qingyue Zhang <chunzhennn@qq.com> Reported-by: Suoxing Zhang <aftern00n@qq.com> Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent 3fdc52c commit 390a61d

File tree

1 file changed

+13
-7
lines changed

1 file changed

+13
-7
lines changed

io_uring/kbuf.c

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -36,15 +36,19 @@ static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len)
3636
{
3737
while (len) {
3838
struct io_uring_buf *buf;
39-
u32 this_len;
39+
u32 buf_len, this_len;
4040

4141
buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
42-
this_len = min_t(u32, len, buf->len);
43-
buf->len -= this_len;
44-
if (buf->len) {
42+
buf_len = READ_ONCE(buf->len);
43+
this_len = min_t(u32, len, buf_len);
44+
buf_len -= this_len;
45+
/* Stop looping for invalid buffer length of 0 */
46+
if (buf_len || !this_len) {
4547
buf->addr += this_len;
48+
buf->len = buf_len;
4649
return false;
4750
}
51+
buf->len = 0;
4852
bl->head++;
4953
len -= this_len;
5054
}
@@ -159,6 +163,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
159163
__u16 tail, head = bl->head;
160164
struct io_uring_buf *buf;
161165
void __user *ret;
166+
u32 buf_len;
162167

163168
tail = smp_load_acquire(&br->tail);
164169
if (unlikely(tail == head))
@@ -168,8 +173,9 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
168173
req->flags |= REQ_F_BL_EMPTY;
169174

170175
buf = io_ring_head_to_buf(br, head, bl->mask);
171-
if (*len == 0 || *len > buf->len)
172-
*len = buf->len;
176+
buf_len = READ_ONCE(buf->len);
177+
if (*len == 0 || *len > buf_len)
178+
*len = buf_len;
173179
req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
174180
req->buf_list = bl;
175181
req->buf_index = buf->bid;
@@ -265,7 +271,7 @@ static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
265271

266272
req->buf_index = buf->bid;
267273
do {
268-
u32 len = buf->len;
274+
u32 len = READ_ONCE(buf->len);
269275

270276
/* truncate end piece, if needed, for non partial buffers */
271277
if (len > arg->max_len) {

0 commit comments

Comments
 (0)