7878#include <linux/task_work.h>
7979#include <linux/pagemap.h>
8080#include <linux/io_uring.h>
81- #include <linux/freezer.h>
8281
8382#define CREATE_TRACE_POINTS
8483#include <trace/events/io_uring.h>
@@ -1095,8 +1094,6 @@ static bool io_match_task(struct io_kiocb *head,
10951094 io_for_each_link (req , head ) {
10961095 if (req -> flags & REQ_F_INFLIGHT )
10971096 return true;
1098- if (req -> task -> files == files )
1099- return true;
11001097 }
11011098 return false;
11021099}
@@ -1239,16 +1236,16 @@ static void io_queue_async_work(struct io_kiocb *req)
12391236 BUG_ON (!tctx );
12401237 BUG_ON (!tctx -> io_wq );
12411238
1242- trace_io_uring_queue_async_work (ctx , io_wq_is_hashed (& req -> work ), req ,
1243- & req -> work , req -> flags );
12441239 /* init ->work of the whole link before punting */
12451240 io_prep_async_link (req );
1241+ trace_io_uring_queue_async_work (ctx , io_wq_is_hashed (& req -> work ), req ,
1242+ & req -> work , req -> flags );
12461243 io_wq_enqueue (tctx -> io_wq , & req -> work );
12471244 if (link )
12481245 io_queue_linked_timeout (link );
12491246}
12501247
1251- static void io_kill_timeout (struct io_kiocb * req )
1248+ static void io_kill_timeout (struct io_kiocb * req , int status )
12521249{
12531250 struct io_timeout_data * io = req -> async_data ;
12541251 int ret ;
@@ -1258,31 +1255,11 @@ static void io_kill_timeout(struct io_kiocb *req)
12581255 atomic_set (& req -> ctx -> cq_timeouts ,
12591256 atomic_read (& req -> ctx -> cq_timeouts ) + 1 );
12601257 list_del_init (& req -> timeout .list );
1261- io_cqring_fill_event (req , 0 );
1258+ io_cqring_fill_event (req , status );
12621259 io_put_req_deferred (req , 1 );
12631260 }
12641261}
12651262
1266- /*
1267- * Returns true if we found and killed one or more timeouts
1268- */
1269- static bool io_kill_timeouts (struct io_ring_ctx * ctx , struct task_struct * tsk ,
1270- struct files_struct * files )
1271- {
1272- struct io_kiocb * req , * tmp ;
1273- int canceled = 0 ;
1274-
1275- spin_lock_irq (& ctx -> completion_lock );
1276- list_for_each_entry_safe (req , tmp , & ctx -> timeout_list , timeout .list ) {
1277- if (io_match_task (req , tsk , files )) {
1278- io_kill_timeout (req );
1279- canceled ++ ;
1280- }
1281- }
1282- spin_unlock_irq (& ctx -> completion_lock );
1283- return canceled != 0 ;
1284- }
1285-
12861263static void __io_queue_deferred (struct io_ring_ctx * ctx )
12871264{
12881265 do {
@@ -1327,7 +1304,7 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
13271304 break ;
13281305
13291306 list_del_init (& req -> timeout .list );
1330- io_kill_timeout (req );
1307+ io_kill_timeout (req , 0 );
13311308 } while (!list_empty (& ctx -> timeout_list ));
13321309
13331310 ctx -> cq_last_tm_flush = seq ;
@@ -2524,13 +2501,12 @@ static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
25242501{
25252502 int cflags = 0 ;
25262503
2504+ if (req -> rw .kiocb .ki_flags & IOCB_WRITE )
2505+ kiocb_end_write (req );
25272506 if ((res == - EAGAIN || res == - EOPNOTSUPP ) && io_rw_reissue (req ))
25282507 return ;
25292508 if (res != req -> result )
25302509 req_set_fail_links (req );
2531-
2532- if (req -> rw .kiocb .ki_flags & IOCB_WRITE )
2533- kiocb_end_write (req );
25342510 if (req -> flags & REQ_F_BUFFER_SELECTED )
25352511 cflags = io_put_rw_kbuf (req );
25362512 __io_req_complete (req , issue_flags , res , cflags );
@@ -3978,6 +3954,7 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
39783954static int io_provide_buffers_prep (struct io_kiocb * req ,
39793955 const struct io_uring_sqe * sqe )
39803956{
3957+ unsigned long size ;
39813958 struct io_provide_buf * p = & req -> pbuf ;
39823959 u64 tmp ;
39833960
@@ -3991,7 +3968,8 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
39913968 p -> addr = READ_ONCE (sqe -> addr );
39923969 p -> len = READ_ONCE (sqe -> len );
39933970
3994- if (!access_ok (u64_to_user_ptr (p -> addr ), (p -> len * p -> nbufs )))
3971+ size = (unsigned long )p -> len * p -> nbufs ;
3972+ if (!access_ok (u64_to_user_ptr (p -> addr ), size ))
39953973 return - EFAULT ;
39963974
39973975 p -> bgid = READ_ONCE (sqe -> buf_group );
@@ -4820,7 +4798,6 @@ static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
48204798 ret = - ENOMEM ;
48214799 goto out ;
48224800 }
4823- io = req -> async_data ;
48244801 memcpy (req -> async_data , & __io , sizeof (__io ));
48254802 return - EAGAIN ;
48264803 }
@@ -5583,7 +5560,8 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
55835560
55845561 data -> mode = io_translate_timeout_mode (flags );
55855562 hrtimer_init (& data -> timer , CLOCK_MONOTONIC , data -> mode );
5586- io_req_track_inflight (req );
5563+ if (is_timeout_link )
5564+ io_req_track_inflight (req );
55875565 return 0 ;
55885566}
55895567
@@ -6479,15 +6457,15 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
64796457 ret = io_init_req (ctx , req , sqe );
64806458 if (unlikely (ret )) {
64816459fail_req :
6482- io_put_req (req );
6483- io_req_complete (req , ret );
64846460 if (link -> head ) {
64856461 /* fail even hard links since we don't submit */
64866462 link -> head -> flags |= REQ_F_FAIL_LINK ;
64876463 io_put_req (link -> head );
64886464 io_req_complete (link -> head , - ECANCELED );
64896465 link -> head = NULL ;
64906466 }
6467+ io_put_req (req );
6468+ io_req_complete (req , ret );
64916469 return ret ;
64926470 }
64936471 ret = io_req_prep (req , sqe );
@@ -6764,8 +6742,13 @@ static int io_sq_thread(void *data)
67646742 timeout = jiffies + sqd -> sq_thread_idle ;
67656743 continue ;
67666744 }
6767- if (fatal_signal_pending (current ))
6745+ if (signal_pending (current )) {
6746+ struct ksignal ksig ;
6747+
6748+ if (!get_signal (& ksig ))
6749+ continue ;
67686750 break ;
6751+ }
67696752 sqt_spin = false;
67706753 cap_entries = !list_is_singular (& sqd -> ctx_list );
67716754 list_for_each_entry (ctx , & sqd -> ctx_list , sqd_list ) {
@@ -6808,7 +6791,6 @@ static int io_sq_thread(void *data)
68086791
68096792 mutex_unlock (& sqd -> lock );
68106793 schedule ();
6811- try_to_freeze ();
68126794 mutex_lock (& sqd -> lock );
68136795 list_for_each_entry (ctx , & sqd -> ctx_list , sqd_list )
68146796 io_ring_clear_wakeup_flag (ctx );
@@ -6873,7 +6855,7 @@ static int io_run_task_work_sig(void)
68736855 return 1 ;
68746856 if (!signal_pending (current ))
68756857 return 0 ;
6876- if (test_tsk_thread_flag ( current , TIF_NOTIFY_SIGNAL ))
6858+ if (test_thread_flag ( TIF_NOTIFY_SIGNAL ))
68776859 return - ERESTARTSYS ;
68786860 return - EINTR ;
68796861}
@@ -8563,6 +8545,14 @@ static void io_ring_exit_work(struct work_struct *work)
85638545 struct io_tctx_node * node ;
85648546 int ret ;
85658547
8548+ /* prevent SQPOLL from submitting new requests */
8549+ if (ctx -> sq_data ) {
8550+ io_sq_thread_park (ctx -> sq_data );
8551+ list_del_init (& ctx -> sqd_list );
8552+ io_sqd_update_thread_idle (ctx -> sq_data );
8553+ io_sq_thread_unpark (ctx -> sq_data );
8554+ }
8555+
85668556 /*
85678557 * If we're doing polled IO and end up having requests being
85688558 * submitted async (out-of-line), then completions can come in while
@@ -8599,6 +8589,28 @@ static void io_ring_exit_work(struct work_struct *work)
85998589 io_ring_ctx_free (ctx );
86008590}
86018591
8592+ /* Returns true if we found and killed one or more timeouts */
8593+ static bool io_kill_timeouts (struct io_ring_ctx * ctx , struct task_struct * tsk ,
8594+ struct files_struct * files )
8595+ {
8596+ struct io_kiocb * req , * tmp ;
8597+ int canceled = 0 ;
8598+
8599+ spin_lock_irq (& ctx -> completion_lock );
8600+ list_for_each_entry_safe (req , tmp , & ctx -> timeout_list , timeout .list ) {
8601+ if (io_match_task (req , tsk , files )) {
8602+ io_kill_timeout (req , - ECANCELED );
8603+ canceled ++ ;
8604+ }
8605+ }
8606+ io_commit_cqring (ctx );
8607+ spin_unlock_irq (& ctx -> completion_lock );
8608+
8609+ if (canceled != 0 )
8610+ io_cqring_ev_posted (ctx );
8611+ return canceled != 0 ;
8612+ }
8613+
86028614static void io_ring_ctx_wait_and_kill (struct io_ring_ctx * ctx )
86038615{
86048616 unsigned long index ;
@@ -8614,14 +8626,6 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
86148626 io_unregister_personality (ctx , index );
86158627 mutex_unlock (& ctx -> uring_lock );
86168628
8617- /* prevent SQPOLL from submitting new requests */
8618- if (ctx -> sq_data ) {
8619- io_sq_thread_park (ctx -> sq_data );
8620- list_del_init (& ctx -> sqd_list );
8621- io_sqd_update_thread_idle (ctx -> sq_data );
8622- io_sq_thread_unpark (ctx -> sq_data );
8623- }
8624-
86258629 io_kill_timeouts (ctx , NULL , NULL );
86268630 io_poll_remove_all (ctx , NULL , NULL );
86278631
0 commit comments