Skip to content

Commit

Permalink
Default to mmap'ed provided buffers for hppa
Browse files Browse the repository at this point in the history
Architectures like parisc cannot guarantee that the kernel and userspace
virtual address map to the same cache color, when we have the application
allocate the ring and then the kernel mapping it.

Add a helper to use the buffer ring registration that has the kernel
allocate the memory, and then have the application mmap(2) it. With
that, we can have mmap pick an appropriate virtual address that so we
avoid any coloring mismatches.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
axboe committed Mar 17, 2023
1 parent 31e9506 commit 9c66898
Show file tree
Hide file tree
Showing 2 changed files with 72 additions and 7 deletions.
19 changes: 18 additions & 1 deletion src/include/liburing/io_uring.h
Expand Up @@ -389,6 +389,9 @@ enum {
#define IORING_OFF_SQ_RING 0ULL
#define IORING_OFF_CQ_RING 0x8000000ULL
#define IORING_OFF_SQES 0x10000000ULL
#define IORING_OFF_PBUF_RING 0x80000000ULL
#define IORING_OFF_PBUF_SHIFT 16
#define IORING_OFF_MMAP_MASK 0xf8000000ULL

/*
* Filled with the offset for mmap(2)
Expand Down Expand Up @@ -635,12 +638,26 @@ struct io_uring_buf_ring {
};
};

/*
* Flags for IORING_REGISTER_PBUF_RING.
*
* IOU_PBUF_RING_MMAP: If set, kernel will allocate the memory for the ring.
* The application must not set a ring_addr in struct
* io_uring_buf_reg, instead it must subsequently call
* mmap(2) with the offset set as:
* IORING_OFF_PBUF_RING | (bgid << IORING_OFF_PBUF_SHIFT)
* to get a virtual mapping for the ring.
*/
enum {
IOU_PBUF_RING_MMAP = 1,
};

/* argument for IORING_(UN)REGISTER_PBUF_RING */
struct io_uring_buf_reg {
__u64 ring_addr;
__u32 ring_entries;
__u16 bgid;
__u16 pad;
__u16 flags;
__u64 resv[3];
};

Expand Down
60 changes: 54 additions & 6 deletions src/setup.c
Expand Up @@ -369,25 +369,57 @@ __cold ssize_t io_uring_mlock_size(unsigned entries, unsigned flags)
return io_uring_mlock_size_params(entries, &p);
}

struct io_uring_buf_ring *io_uring_setup_buf_ring(struct io_uring *ring,
unsigned int nentries,
int bgid, unsigned int flags,
int *ret)
#if defined(__hppa__)
static struct io_uring_buf_ring *br_setup(struct io_uring *ring,
unsigned int nentries, int bgid,
unsigned int flags, int *ret)
{
struct io_uring_buf_reg reg = { };
struct io_uring_buf_ring *br;
size_t ring_size;
off_t off;
int lret;

reg.ring_entries = nentries;
reg.bgid = bgid;
reg.flags = IOU_PBUF_RING_MMAP;

*ret = 0;
lret = io_uring_register_buf_ring(ring, &reg, flags);
if (lret) {
*ret = lret;
return NULL;
}

off = IORING_OFF_PBUF_RING | (unsigned long long) bgid << IORING_OFF_PBUF_SHIFT;
ring_size = nentries * sizeof(struct io_uring_buf);
br = __sys_mmap(NULL, ring_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
MAP_SHARED | MAP_POPULATE, ring->ring_fd, off);
if (IS_ERR(br)) {
*ret = PTR_ERR(br);
return NULL;
}

io_uring_buf_ring_init(br);
return br;

}
#else
static struct io_uring_buf_ring *br_setup(struct io_uring *ring,
unsigned int nentries, int bgid,
unsigned int flags, int *ret)
{
struct io_uring_buf_reg reg = { };
struct io_uring_buf_ring *br;
size_t ring_size;
int lret;

ring_size = nentries * sizeof(struct io_uring_buf);
br = __sys_mmap(NULL, ring_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (IS_ERR(br)) {
*ret = PTR_ERR(br);
return NULL;
}

reg.ring_addr = (unsigned long) (uintptr_t) br;
reg.ring_entries = nentries;
Expand All @@ -402,6 +434,22 @@ struct io_uring_buf_ring *io_uring_setup_buf_ring(struct io_uring *ring,
}

return br;

}
#endif

struct io_uring_buf_ring *io_uring_setup_buf_ring(struct io_uring *ring,
unsigned int nentries,
int bgid, unsigned int flags,
int *ret)
{
struct io_uring_buf_ring *br;

br = br_setup(ring, nentries, bgid, flags, ret);
if (br)
io_uring_buf_ring_init(br);

return br;
}

int io_uring_free_buf_ring(struct io_uring *ring, struct io_uring_buf_ring *br,
Expand Down

0 comments on commit 9c66898

Please sign in to comment.