Skip to content

Commit 50220d6

Browse files
committed
io_uring/net: get rid of ->prep_async() for send side
Move the io_async_msghdr out of the issue path and into prep handling, e it's now done unconditionally and hence does not need to be part of the issue path. This means any usage of io_sendrecv_prep_async() and io_sendmsg_prep_async(), and hence the forced async setup path is now unified with the normal prep setup. Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent c6f32c7 commit 50220d6

File tree

3 files changed

+46
-114
lines changed

3 files changed

+46
-114
lines changed

io_uring/net.c

Lines changed: 46 additions & 108 deletions
Original file line numberDiff line numberDiff line change
@@ -290,50 +290,56 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
290290
return ret;
291291
}
292292

293-
int io_sendrecv_prep_async(struct io_kiocb *req)
293+
void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
294+
{
295+
struct io_async_msghdr *io = req->async_data;
296+
297+
kfree(io->free_iov);
298+
}
299+
300+
static int io_send_setup(struct io_kiocb *req)
294301
{
295302
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
296-
struct io_async_msghdr *io;
303+
struct io_async_msghdr *kmsg = req->async_data;
297304
int ret;
298305

299-
if (req_has_async_data(req))
300-
return 0;
301-
sr->done_io = 0;
302-
if (!sr->addr)
303-
return 0;
304-
io = io_msg_alloc_async_prep(req);
305-
if (!io)
306-
return -ENOMEM;
307-
memset(&io->msg, 0, sizeof(io->msg));
308-
ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &io->msg.msg_iter);
309-
if (unlikely(ret))
306+
kmsg->msg.msg_name = NULL;
307+
kmsg->msg.msg_namelen = 0;
308+
kmsg->msg.msg_control = NULL;
309+
kmsg->msg.msg_controllen = 0;
310+
kmsg->msg.msg_ubuf = NULL;
311+
312+
if (sr->addr) {
313+
ret = move_addr_to_kernel(sr->addr, sr->addr_len, &kmsg->addr);
314+
if (unlikely(ret < 0))
315+
return ret;
316+
kmsg->msg.msg_name = &kmsg->addr;
317+
kmsg->msg.msg_namelen = sr->addr_len;
318+
}
319+
ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
320+
if (unlikely(ret < 0))
310321
return ret;
311-
io->msg.msg_name = &io->addr;
312-
io->msg.msg_namelen = sr->addr_len;
313-
return move_addr_to_kernel(sr->addr, sr->addr_len, &io->addr);
322+
323+
return 0;
314324
}
315325

316-
int io_sendmsg_prep_async(struct io_kiocb *req)
326+
static int io_sendmsg_prep_setup(struct io_kiocb *req, int is_msg)
317327
{
318-
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
328+
struct io_async_msghdr *kmsg;
319329
int ret;
320330

321-
sr->done_io = 0;
322-
if (!io_msg_alloc_async_prep(req))
331+
/* always locked for prep */
332+
kmsg = io_msg_alloc_async(req, 0);
333+
if (unlikely(!kmsg))
323334
return -ENOMEM;
324-
ret = io_sendmsg_copy_hdr(req, req->async_data);
335+
if (!is_msg)
336+
return io_send_setup(req);
337+
ret = io_sendmsg_copy_hdr(req, kmsg);
325338
if (!ret)
326339
req->flags |= REQ_F_NEED_CLEANUP;
327340
return ret;
328341
}
329342

330-
void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
331-
{
332-
struct io_async_msghdr *io = req->async_data;
333-
334-
kfree(io->free_iov);
335-
}
336-
337343
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
338344
{
339345
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
@@ -362,7 +368,7 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
362368
if (req->ctx->compat)
363369
sr->msg_flags |= MSG_CMSG_COMPAT;
364370
#endif
365-
return 0;
371+
return io_sendmsg_prep_setup(req, req->opcode == IORING_OP_SENDMSG);
366372
}
367373

368374
static void io_req_msg_cleanup(struct io_kiocb *req,
@@ -379,7 +385,7 @@ static void io_req_msg_cleanup(struct io_kiocb *req,
379385
int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
380386
{
381387
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
382-
struct io_async_msghdr *kmsg;
388+
struct io_async_msghdr *kmsg = req->async_data;
383389
struct socket *sock;
384390
unsigned flags;
385391
int min_ret = 0;
@@ -389,17 +395,6 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
389395
if (unlikely(!sock))
390396
return -ENOTSOCK;
391397

392-
if (req_has_async_data(req)) {
393-
kmsg = req->async_data;
394-
} else {
395-
kmsg = io_msg_alloc_async(req, issue_flags);
396-
if (unlikely(!kmsg))
397-
return -ENOMEM;
398-
ret = io_sendmsg_copy_hdr(req, kmsg);
399-
if (ret)
400-
return ret;
401-
}
402-
403398
if (!(req->flags & REQ_F_POLLED) &&
404399
(sr->flags & IORING_RECVSEND_POLL_FIRST))
405400
return -EAGAIN;
@@ -437,52 +432,10 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
437432
return IOU_OK;
438433
}
439434

440-
static struct io_async_msghdr *io_send_setup(struct io_kiocb *req,
441-
unsigned int issue_flags)
442-
{
443-
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
444-
struct io_async_msghdr *kmsg;
445-
int ret;
446-
447-
if (req_has_async_data(req)) {
448-
kmsg = req->async_data;
449-
} else {
450-
kmsg = io_msg_alloc_async(req, issue_flags);
451-
if (unlikely(!kmsg))
452-
return ERR_PTR(-ENOMEM);
453-
kmsg->msg.msg_name = NULL;
454-
kmsg->msg.msg_namelen = 0;
455-
kmsg->msg.msg_control = NULL;
456-
kmsg->msg.msg_controllen = 0;
457-
kmsg->msg.msg_ubuf = NULL;
458-
459-
if (sr->addr) {
460-
ret = move_addr_to_kernel(sr->addr, sr->addr_len,
461-
&kmsg->addr);
462-
if (unlikely(ret < 0))
463-
return ERR_PTR(ret);
464-
kmsg->msg.msg_name = &kmsg->addr;
465-
kmsg->msg.msg_namelen = sr->addr_len;
466-
}
467-
468-
ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
469-
&kmsg->msg.msg_iter);
470-
if (unlikely(ret))
471-
return ERR_PTR(ret);
472-
}
473-
474-
if (!(req->flags & REQ_F_POLLED) &&
475-
(sr->flags & IORING_RECVSEND_POLL_FIRST))
476-
return ERR_PTR(-EAGAIN);
477-
478-
return kmsg;
479-
}
480-
481435
int io_send(struct io_kiocb *req, unsigned int issue_flags)
482436
{
483437
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
484-
struct io_async_msghdr *kmsg;
485-
size_t len = sr->len;
438+
struct io_async_msghdr *kmsg = req->async_data;
486439
struct socket *sock;
487440
unsigned flags;
488441
int min_ret = 0;
@@ -492,13 +445,9 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
492445
if (unlikely(!sock))
493446
return -ENOTSOCK;
494447

495-
kmsg = io_send_setup(req, issue_flags);
496-
if (IS_ERR(kmsg))
497-
return PTR_ERR(kmsg);
498-
499-
ret = import_ubuf(ITER_SOURCE, sr->buf, len, &kmsg->msg.msg_iter);
500-
if (unlikely(ret))
501-
return ret;
448+
if (!(req->flags & REQ_F_POLLED) &&
449+
(sr->flags & IORING_RECVSEND_POLL_FIRST))
450+
return -EAGAIN;
502451

503452
flags = sr->msg_flags;
504453
if (issue_flags & IO_URING_F_NONBLOCK)
@@ -1084,7 +1033,7 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
10841033
if (req->ctx->compat)
10851034
zc->msg_flags |= MSG_CMSG_COMPAT;
10861035
#endif
1087-
return 0;
1036+
return io_sendmsg_prep_setup(req, req->opcode == IORING_OP_SENDMSG_ZC);
10881037
}
10891038

10901039
static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
@@ -1173,7 +1122,7 @@ static int io_send_zc_import(struct io_kiocb *req, struct io_async_msghdr *kmsg)
11731122
int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
11741123
{
11751124
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1176-
struct io_async_msghdr *kmsg;
1125+
struct io_async_msghdr *kmsg = req->async_data;
11771126
struct socket *sock;
11781127
unsigned msg_flags;
11791128
int ret, min_ret = 0;
@@ -1184,9 +1133,9 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
11841133
if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
11851134
return -EOPNOTSUPP;
11861135

1187-
kmsg = io_send_setup(req, issue_flags);
1188-
if (IS_ERR(kmsg))
1189-
return PTR_ERR(kmsg);
1136+
if (!(req->flags & REQ_F_POLLED) &&
1137+
(zc->flags & IORING_RECVSEND_POLL_FIRST))
1138+
return -EAGAIN;
11901139

11911140
if (!zc->done_io) {
11921141
ret = io_send_zc_import(req, kmsg);
@@ -1242,7 +1191,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
12421191
int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
12431192
{
12441193
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1245-
struct io_async_msghdr *kmsg;
1194+
struct io_async_msghdr *kmsg = req->async_data;
12461195
struct socket *sock;
12471196
unsigned flags;
12481197
int ret, min_ret = 0;
@@ -1255,17 +1204,6 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
12551204
if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
12561205
return -EOPNOTSUPP;
12571206

1258-
if (req_has_async_data(req)) {
1259-
kmsg = req->async_data;
1260-
} else {
1261-
kmsg = io_msg_alloc_async(req, issue_flags);
1262-
if (unlikely(!kmsg))
1263-
return -ENOMEM;
1264-
ret = io_sendmsg_copy_hdr(req, kmsg);
1265-
if (ret)
1266-
return ret;
1267-
}
1268-
12691207
if (!(req->flags & REQ_F_POLLED) &&
12701208
(sr->flags & IORING_RECVSEND_POLL_FIRST))
12711209
return -EAGAIN;

io_uring/net.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,13 +34,11 @@ struct io_async_connect {
3434
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
3535
int io_shutdown(struct io_kiocb *req, unsigned int issue_flags);
3636

37-
int io_sendmsg_prep_async(struct io_kiocb *req);
3837
void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req);
3938
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
4039
int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags);
4140

4241
int io_send(struct io_kiocb *req, unsigned int issue_flags);
43-
int io_sendrecv_prep_async(struct io_kiocb *req);
4442

4543
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
4644
int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags);

io_uring/opdef.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -527,7 +527,6 @@ const struct io_cold_def io_cold_defs[] = {
527527
.name = "SENDMSG",
528528
#if defined(CONFIG_NET)
529529
.async_size = sizeof(struct io_async_msghdr),
530-
.prep_async = io_sendmsg_prep_async,
531530
.cleanup = io_sendmsg_recvmsg_cleanup,
532531
.fail = io_sendrecv_fail,
533532
#endif
@@ -603,7 +602,6 @@ const struct io_cold_def io_cold_defs[] = {
603602
.async_size = sizeof(struct io_async_msghdr),
604603
.cleanup = io_sendmsg_recvmsg_cleanup,
605604
.fail = io_sendrecv_fail,
606-
.prep_async = io_sendrecv_prep_async,
607605
#endif
608606
},
609607
[IORING_OP_RECV] = {
@@ -688,7 +686,6 @@ const struct io_cold_def io_cold_defs[] = {
688686
.name = "SEND_ZC",
689687
#if defined(CONFIG_NET)
690688
.async_size = sizeof(struct io_async_msghdr),
691-
.prep_async = io_sendrecv_prep_async,
692689
.cleanup = io_send_zc_cleanup,
693690
.fail = io_sendrecv_fail,
694691
#endif
@@ -697,7 +694,6 @@ const struct io_cold_def io_cold_defs[] = {
697694
.name = "SENDMSG_ZC",
698695
#if defined(CONFIG_NET)
699696
.async_size = sizeof(struct io_async_msghdr),
700-
.prep_async = io_sendmsg_prep_async,
701697
.cleanup = io_send_zc_cleanup,
702698
.fail = io_sendrecv_fail,
703699
#endif

0 commit comments

Comments
 (0)