1414#include "rsrc.h"
1515#include "uring_cmd.h"
1616
17+ static struct uring_cache * io_uring_async_get (struct io_kiocb * req )
18+ {
19+ struct io_ring_ctx * ctx = req -> ctx ;
20+ struct io_cache_entry * entry ;
21+ struct uring_cache * cache ;
22+
23+ entry = io_alloc_cache_get (& ctx -> uring_cache );
24+ if (entry ) {
25+ cache = container_of (entry , struct uring_cache , cache );
26+ req -> flags |= REQ_F_ASYNC_DATA ;
27+ req -> async_data = cache ;
28+ return cache ;
29+ }
30+ if (!io_alloc_async_data (req ))
31+ return req -> async_data ;
32+ return NULL ;
33+ }
34+
35+ static void io_req_uring_cleanup (struct io_kiocb * req , unsigned int issue_flags )
36+ {
37+ struct io_uring_cmd * ioucmd = io_kiocb_to_cmd (req , struct io_uring_cmd );
38+ struct uring_cache * cache = req -> async_data ;
39+
40+ if (issue_flags & IO_URING_F_UNLOCKED )
41+ return ;
42+ if (io_alloc_cache_put (& req -> ctx -> uring_cache , & cache -> cache )) {
43+ ioucmd -> sqe = NULL ;
44+ req -> async_data = NULL ;
45+ req -> flags &= ~REQ_F_ASYNC_DATA ;
46+ }
47+ }
48+
1749bool io_uring_try_cancel_uring_cmd (struct io_ring_ctx * ctx ,
1850 struct task_struct * task , bool cancel_all )
1951{
@@ -128,6 +160,7 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
128160 io_req_set_res (req , ret , 0 );
129161 if (req -> ctx -> flags & IORING_SETUP_CQE32 )
130162 io_req_set_cqe32_extra (req , res2 , 0 );
163+ io_req_uring_cleanup (req , issue_flags );
131164 if (req -> ctx -> flags & IORING_SETUP_IOPOLL ) {
132165 /* order with io_iopoll_req_issued() checking ->iopoll_complete */
133166 smp_store_release (& req -> iopoll_completed , 1 );
@@ -142,13 +175,19 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
142175}
143176EXPORT_SYMBOL_GPL (io_uring_cmd_done );
144177
145- int io_uring_cmd_prep_async (struct io_kiocb * req )
178+ static int io_uring_cmd_prep_setup (struct io_kiocb * req ,
179+ const struct io_uring_sqe * sqe )
146180{
147181 struct io_uring_cmd * ioucmd = io_kiocb_to_cmd (req , struct io_uring_cmd );
182+ struct uring_cache * cache ;
148183
149- memcpy (req -> async_data , ioucmd -> sqe , uring_sqe_size (req -> ctx ));
150- ioucmd -> sqe = req -> async_data ;
151- return 0 ;
184+ cache = io_uring_async_get (req );
185+ if (cache ) {
186+ memcpy (cache -> sqes , sqe , uring_sqe_size (req -> ctx ));
187+ ioucmd -> sqe = req -> async_data ;
188+ return 0 ;
189+ }
190+ return - ENOMEM ;
152191}
153192
154193int io_uring_cmd_prep (struct io_kiocb * req , const struct io_uring_sqe * sqe )
@@ -173,9 +212,9 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
173212 req -> imu = ctx -> user_bufs [index ];
174213 io_req_set_rsrc_node (req , ctx , 0 );
175214 }
176- ioucmd -> sqe = sqe ;
177215 ioucmd -> cmd_op = READ_ONCE (sqe -> cmd_op );
178- return 0 ;
216+
217+ return io_uring_cmd_prep_setup (req , sqe );
179218}
180219
181220int io_uring_cmd (struct io_kiocb * req , unsigned int issue_flags )
@@ -206,23 +245,14 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
206245 }
207246
208247 ret = file -> f_op -> uring_cmd (ioucmd , issue_flags );
209- if (ret == - EAGAIN ) {
210- if (!req_has_async_data (req )) {
211- if (io_alloc_async_data (req ))
212- return - ENOMEM ;
213- io_uring_cmd_prep_async (req );
214- }
215- return - EAGAIN ;
216- }
217-
218- if (ret != - EIOCBQUEUED ) {
219- if (ret < 0 )
220- req_set_fail (req );
221- io_req_set_res (req , ret , 0 );
248+ if (ret == - EAGAIN || ret == - EIOCBQUEUED )
222249 return ret ;
223- }
224250
225- return IOU_ISSUE_SKIP_COMPLETE ;
251+ if (ret < 0 )
252+ req_set_fail (req );
253+ io_req_uring_cleanup (req , issue_flags );
254+ io_req_set_res (req , ret , 0 );
255+ return ret ;
226256}
227257
228258int io_uring_cmd_import_fixed (u64 ubuf , unsigned long len , int rw ,
@@ -311,3 +341,8 @@ int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
311341}
312342EXPORT_SYMBOL_GPL (io_uring_cmd_sock );
313343#endif
344+
345+ void io_uring_cache_free (struct io_cache_entry * entry )
346+ {
347+ kfree (container_of (entry , struct uring_cache , cache ));
348+ }
0 commit comments