Skip to content

Commit

Permalink
iopoll: test that we always enter the ring
Browse files Browse the repository at this point in the history
If the ring is configured to operate in poll mode we need to always
enter the kernel to fetch new events. This behavior was introduced
in  bf3aeb3 but recently broken.

Break me once, shame on you. Break me twice, shame on me:
  - Add a unit test.

The unit test submits sqes manually through the system call, which
guarantees that IORING_ENTER_GETEVENTS will not be present in the flag
set during the initial submission.

The only way we can ever fetch new I/O events is if io_uring_submit
indeed will enter the ring despite not having new sqes to submit.

This test passes with 1bafb3c but fails before it due to the
aforementioned regression.

Signed-off-by: Glauber Costa <glauber@datadoghq.com>
  • Loading branch information
Glauber Costa committed Aug 21, 2020
1 parent 1bafb3c commit 5964134
Show file tree
Hide file tree
Showing 2 changed files with 80 additions and 1 deletion.
2 changes: 1 addition & 1 deletion src/queue.c
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ unsigned io_uring_peek_batch_cqe(struct io_uring *ring,
* Sync internal state with kernel ring state on the SQ side. Returns the
* number of pending items in the SQ ring, for the shared ring.
*/
static int __io_uring_flush_sq(struct io_uring *ring)
int __io_uring_flush_sq(struct io_uring *ring)
{
struct io_uring_sq *sq = &ring->sq;
const unsigned mask = *sq->kring_mask;
Expand Down
79 changes: 79 additions & 0 deletions test/iopoll.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include <sys/eventfd.h>
#include <sys/resource.h>
#include "liburing.h"
#include "../src/syscall.h"

#define FILE_SIZE (128 * 1024)
#define BS 4096
Expand Down Expand Up @@ -231,6 +232,78 @@ static int __test_io(const char *file, struct io_uring *ring, int write, int sqt
return 1;
}

extern int __io_uring_flush_sq(struct io_uring *ring);

/*
* if we are polling io_uring_submit needs to always enter the
* kernel to fetch events
*/
static int test_io_uring_submit_enters(const char *file)
{
struct io_uring ring;
int fd, i, ret, ring_flags, open_flags;

ring_flags = IORING_SETUP_IOPOLL;
ret = io_uring_queue_init(64, &ring, ring_flags);
if (ret) {
fprintf(stderr, "ring create failed: %d\n", ret);
return 1;
}

open_flags = O_WRONLY | O_DIRECT | O_TRUNC;
fd = open(file, open_flags);
if (fd < 0) {
perror("file open");
goto err;
}

for (i = 0; i < BUFFERS; i++) {
struct io_uring_sqe *sqe;
off_t offset = BS * (rand() % BUFFERS);

sqe = io_uring_get_sqe(&ring);
io_uring_prep_writev(sqe, fd, &vecs[i], 1, offset);
sqe->user_data = 1;
}

/* submit manually to avoid adding IORING_ENTER_GETEVENTS */
ret = __sys_io_uring_enter(ring.ring_fd, __io_uring_flush_sq(&ring), 0,
0, NULL);
if (ret < 0)
goto err;

for (i = 0; i < 500; i++) {
ret = io_uring_submit(&ring);
if (ret != 0) {
fprintf(stderr, "still had %d sqes to submit, this is unexpected", ret);
goto err;
}

unsigned head;
struct io_uring_cqe *cqe;
io_uring_for_each_cqe(&ring, head, cqe) {
/* runs after test_io so should not have happened */
if (cqe->res == -EOPNOTSUPP) {
fprintf(stdout, "File/device/fs doesn't support polled IO\n");
goto err;
}
goto ok;
}
usleep(10000);
}
err:
ret = 1;
#ifdef VERBOSE
fprintf(stderr, "FAILED\n");
#endif
if (fd != -1)
close(fd);

ok:
io_uring_queue_exit(&ring);
return ret;
}

static int test_io(const char *file, int write, int sqthread, int fixed,
int buf_select)
{
Expand Down Expand Up @@ -322,6 +395,12 @@ int main(int argc, char *argv[])
break;
}

ret = test_io_uring_submit_enters(fname);
if (ret) {
fprintf(stderr, "test_io_uring_submit_enters failed\n");
goto err;
}

if (fname != argv[1])
unlink(fname);
return 0;
Expand Down

0 comments on commit 5964134

Please sign in to comment.