Skip to content

Commit 2302539

Browse files
committed
xen/netback: use lateeoi irq binding
In order to reduce the chance for the system becoming unresponsive due to event storms triggered by a misbehaving netfront use the lateeoi irq binding for netback and unmask the event channel only just before going to sleep waiting for new events. Make sure not to issue an EOI when none is pending by introducing an eoi_pending element to struct xenvif_queue. When no request has been consumed set the spurious flag when sending the EOI for an interrupt. This is part of XSA-332. Cc: stable@vger.kernel.org Reported-by: Julien Grall <julien@xen.org> Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Wei Liu <wl@xen.org>
1 parent 01263a1 commit 2302539

File tree

4 files changed

+86
-14
lines changed

4 files changed

+86
-14
lines changed

drivers/net/xen-netback/common.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,20 @@ struct xenvif_queue { /* Per-queue data for xenvif */
140140
char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
141141
struct xenvif *vif; /* Parent VIF */
142142

143+
/*
144+
* TX/RX common EOI handling.
145+
* When feature-split-event-channels = 0, interrupt handler sets
146+
* NETBK_COMMON_EOI, otherwise NETBK_RX_EOI and NETBK_TX_EOI are set
147+
* by the RX and TX interrupt handlers.
148+
* RX and TX handler threads will issue an EOI when either
149+
* NETBK_COMMON_EOI or their specific bits (NETBK_RX_EOI or
150+
* NETBK_TX_EOI) are set and they will reset those bits.
151+
*/
152+
atomic_t eoi_pending;
153+
#define NETBK_RX_EOI 0x01
154+
#define NETBK_TX_EOI 0x02
155+
#define NETBK_COMMON_EOI 0x04
156+
143157
/* Use NAPI for guest TX */
144158
struct napi_struct napi;
145159
/* When feature-split-event-channels = 0, tx_irq = rx_irq. */
@@ -378,6 +392,7 @@ int xenvif_dealloc_kthread(void *data);
378392

379393
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
380394

395+
bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
381396
void xenvif_rx_action(struct xenvif_queue *queue);
382397
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
383398

drivers/net/xen-netback/interface.c

Lines changed: 52 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -77,12 +77,28 @@ int xenvif_schedulable(struct xenvif *vif)
7777
!vif->disabled;
7878
}
7979

80+
static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue)
81+
{
82+
bool rc;
83+
84+
rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
85+
if (rc)
86+
napi_schedule(&queue->napi);
87+
return rc;
88+
}
89+
8090
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
8191
{
8292
struct xenvif_queue *queue = dev_id;
93+
int old;
8394

84-
if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
85-
napi_schedule(&queue->napi);
95+
old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending);
96+
WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n");
97+
98+
if (!xenvif_handle_tx_interrupt(queue)) {
99+
atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending);
100+
xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
101+
}
86102

87103
return IRQ_HANDLED;
88104
}
@@ -116,19 +132,46 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
116132
return work_done;
117133
}
118134

135+
static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue)
136+
{
137+
bool rc;
138+
139+
rc = xenvif_have_rx_work(queue, false);
140+
if (rc)
141+
xenvif_kick_thread(queue);
142+
return rc;
143+
}
144+
119145
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
120146
{
121147
struct xenvif_queue *queue = dev_id;
148+
int old;
122149

123-
xenvif_kick_thread(queue);
150+
old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending);
151+
WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n");
152+
153+
if (!xenvif_handle_rx_interrupt(queue)) {
154+
atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending);
155+
xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
156+
}
124157

125158
return IRQ_HANDLED;
126159
}
127160

128161
irqreturn_t xenvif_interrupt(int irq, void *dev_id)
129162
{
130-
xenvif_tx_interrupt(irq, dev_id);
131-
xenvif_rx_interrupt(irq, dev_id);
163+
struct xenvif_queue *queue = dev_id;
164+
int old;
165+
166+
old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
167+
WARN(old, "Interrupt while EOI pending\n");
168+
169+
/* Use bitwise or as we need to call both functions. */
170+
if ((!xenvif_handle_tx_interrupt(queue) |
171+
!xenvif_handle_rx_interrupt(queue))) {
172+
atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
173+
xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
174+
}
132175

133176
return IRQ_HANDLED;
134177
}
@@ -605,7 +648,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
605648
if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl))
606649
goto err_unmap;
607650

608-
err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
651+
err = bind_interdomain_evtchn_to_irq_lateeoi(vif->domid, evtchn);
609652
if (err < 0)
610653
goto err_unmap;
611654

@@ -709,7 +752,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
709752

710753
if (tx_evtchn == rx_evtchn) {
711754
/* feature-split-event-channels == 0 */
712-
err = bind_interdomain_evtchn_to_irqhandler(
755+
err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
713756
queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
714757
queue->name, queue);
715758
if (err < 0)
@@ -720,7 +763,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
720763
/* feature-split-event-channels == 1 */
721764
snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
722765
"%s-tx", queue->name);
723-
err = bind_interdomain_evtchn_to_irqhandler(
766+
err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
724767
queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
725768
queue->tx_irq_name, queue);
726769
if (err < 0)
@@ -730,7 +773,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
730773

731774
snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
732775
"%s-rx", queue->name);
733-
err = bind_interdomain_evtchn_to_irqhandler(
776+
err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
734777
queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
735778
queue->rx_irq_name, queue);
736779
if (err < 0)

drivers/net/xen-netback/netback.c

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -169,6 +169,10 @@ void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
169169

170170
if (more_to_do)
171171
napi_schedule(&queue->napi);
172+
else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI,
173+
&queue->eoi_pending) &
174+
(NETBK_TX_EOI | NETBK_COMMON_EOI))
175+
xen_irq_lateeoi(queue->tx_irq, 0);
172176
}
173177

174178
static void tx_add_credit(struct xenvif_queue *queue)
@@ -1643,9 +1647,14 @@ static bool xenvif_ctrl_work_todo(struct xenvif *vif)
16431647
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
16441648
{
16451649
struct xenvif *vif = data;
1650+
unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS;
16461651

1647-
while (xenvif_ctrl_work_todo(vif))
1652+
while (xenvif_ctrl_work_todo(vif)) {
16481653
xenvif_ctrl_action(vif);
1654+
eoi_flag = 0;
1655+
}
1656+
1657+
xen_irq_lateeoi(irq, eoi_flag);
16491658

16501659
return IRQ_HANDLED;
16511660
}

drivers/net/xen-netback/rx.c

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -503,13 +503,13 @@ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
503503
return queue->stalled && prod - cons >= 1;
504504
}
505505

506-
static bool xenvif_have_rx_work(struct xenvif_queue *queue)
506+
bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
507507
{
508508
return xenvif_rx_ring_slots_available(queue) ||
509509
(queue->vif->stall_timeout &&
510510
(xenvif_rx_queue_stalled(queue) ||
511511
xenvif_rx_queue_ready(queue))) ||
512-
kthread_should_stop() ||
512+
(test_kthread && kthread_should_stop()) ||
513513
queue->vif->disabled;
514514
}
515515

@@ -540,15 +540,20 @@ static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
540540
{
541541
DEFINE_WAIT(wait);
542542

543-
if (xenvif_have_rx_work(queue))
543+
if (xenvif_have_rx_work(queue, true))
544544
return;
545545

546546
for (;;) {
547547
long ret;
548548

549549
prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
550-
if (xenvif_have_rx_work(queue))
550+
if (xenvif_have_rx_work(queue, true))
551551
break;
552+
if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI,
553+
&queue->eoi_pending) &
554+
(NETBK_RX_EOI | NETBK_COMMON_EOI))
555+
xen_irq_lateeoi(queue->rx_irq, 0);
556+
552557
ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
553558
if (!ret)
554559
break;

0 commit comments

Comments
 (0)