Skip to content

Commit 2efdaf7

Browse files
soheilhytorvalds
authored andcommitted
epoll: simplify signal handling
Check signals before locking ep->lock, and immediately return -EINTR if there is any signal pending. This saves a few loads, stores, and branches from the hot path and simplifies the loop structure for follow up patches. Link: https://lkml.kernel.org/r/20201106231635.3528496-3-soheil.kdev@gmail.com Signed-off-by: Soheil Hassas Yeganeh <soheil@google.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Reviewed-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Willem de Bruijn <willemb@google.com> Reviewed-by: Khazhismel Kumykov <khazhy@google.com> Cc: Guantao Liu <guantaol@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 289caf5 commit 2efdaf7

File tree

1 file changed

+10
-10
lines changed

1 file changed

+10
-10
lines changed

fs/eventpoll.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1733,7 +1733,7 @@ static inline struct timespec64 ep_set_mstimeout(long ms)
17331733
static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
17341734
int maxevents, long timeout)
17351735
{
1736-
int res = 0, eavail, timed_out = 0;
1736+
int res, eavail, timed_out = 0;
17371737
u64 slack = 0;
17381738
wait_queue_entry_t wait;
17391739
ktime_t expires, *to = NULL;
@@ -1780,6 +1780,9 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
17801780
ep_reset_busy_poll_napi_id(ep);
17811781

17821782
do {
1783+
if (signal_pending(current))
1784+
return -EINTR;
1785+
17831786
/*
17841787
* Internally init_wait() uses autoremove_wake_function(),
17851788
* thus wait entry is removed from the wait queue on each
@@ -1809,15 +1812,12 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
18091812
* important.
18101813
*/
18111814
eavail = ep_events_available(ep);
1812-
if (!eavail) {
1813-
if (signal_pending(current))
1814-
res = -EINTR;
1815-
else
1816-
__add_wait_queue_exclusive(&ep->wq, &wait);
1817-
}
1815+
if (!eavail)
1816+
__add_wait_queue_exclusive(&ep->wq, &wait);
1817+
18181818
write_unlock_irq(&ep->lock);
18191819

1820-
if (!eavail && !res)
1820+
if (!eavail)
18211821
timed_out = !schedule_hrtimeout_range(to, slack,
18221822
HRTIMER_MODE_ABS);
18231823

@@ -1853,14 +1853,14 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
18531853
* finding more events available and fetching
18541854
* repeatedly.
18551855
*/
1856-
res = -EINTR;
1856+
return -EINTR;
18571857
}
18581858
/*
18591859
* Try to transfer events to user space. In case we get 0 events and
18601860
* there's still timeout left over, we go trying again in search of
18611861
* more luck.
18621862
*/
1863-
if (!res && eavail &&
1863+
if (eavail &&
18641864
!(res = ep_send_events(ep, events, maxevents)) && !timed_out)
18651865
goto fetch_events;
18661866

0 commit comments

Comments
 (0)