Skip to content

Commit

Permalink
xen/netfront: harden netfront against event channel storms
Browse files Browse the repository at this point in the history
commit b27d479 upstream.

The Xen netfront driver is still vulnerable for an attack via excessive
number of events sent by the backend. Fix that by using lateeoi event
channels.

For being able to detect the case of no rx responses being added while
the carrier is down a new lock is needed in order to update and test
rsp_cons and the number of seen unconsumed responses atomically.

This is part of XSA-391

Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
jgross1 authored and gregkh committed Dec 22, 2021
1 parent 8ac3b6e commit d31b337
Showing 1 changed file with 94 additions and 31 deletions.
125 changes: 94 additions & 31 deletions drivers/net/xen-netfront.c
Expand Up @@ -148,6 +148,9 @@ struct netfront_queue {
grant_ref_t gref_rx_head;
grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];

unsigned int rx_rsp_unconsumed;
spinlock_t rx_cons_lock;

struct page_pool *page_pool;
struct xdp_rxq_info xdp_rxq;
};
Expand Down Expand Up @@ -376,12 +379,13 @@ static int xennet_open(struct net_device *dev)
return 0;
}

static void xennet_tx_buf_gc(struct netfront_queue *queue)
static bool xennet_tx_buf_gc(struct netfront_queue *queue)
{
RING_IDX cons, prod;
unsigned short id;
struct sk_buff *skb;
bool more_to_do;
bool work_done = false;
const struct device *dev = &queue->info->netdev->dev;

BUG_ON(!netif_carrier_ok(queue->info->netdev));
Expand All @@ -398,6 +402,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
struct xen_netif_tx_response txrsp;

work_done = true;

RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
if (txrsp.status == XEN_NETIF_RSP_NULL)
continue;
Expand Down Expand Up @@ -441,11 +447,13 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)

xennet_maybe_wake_tx(queue);

return;
return work_done;

err:
queue->info->broken = true;
dev_alert(dev, "Disabled for further use\n");

return work_done;
}

struct xennet_gnttab_make_txreq {
Expand Down Expand Up @@ -836,6 +844,16 @@ static int xennet_close(struct net_device *dev)
return 0;
}

static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
{
unsigned long flags;

spin_lock_irqsave(&queue->rx_cons_lock, flags);
queue->rx.rsp_cons = val;
queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
}

static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
grant_ref_t ref)
{
Expand Down Expand Up @@ -887,7 +905,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
xennet_move_rx_slot(queue, skb, ref);
} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);

queue->rx.rsp_cons = cons;
xennet_set_rx_rsp_cons(queue, cons);
return err;
}

Expand Down Expand Up @@ -1041,7 +1059,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
}

if (unlikely(err))
queue->rx.rsp_cons = cons + slots;
xennet_set_rx_rsp_cons(queue, cons + slots);

return err;
}
Expand Down Expand Up @@ -1095,7 +1113,8 @@ static int xennet_fill_frags(struct netfront_queue *queue,
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
}
if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
queue->rx.rsp_cons = ++cons + skb_queue_len(list);
xennet_set_rx_rsp_cons(queue,
++cons + skb_queue_len(list));
kfree_skb(nskb);
return -ENOENT;
}
Expand All @@ -1108,7 +1127,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
kfree_skb(nskb);
}

queue->rx.rsp_cons = cons;
xennet_set_rx_rsp_cons(queue, cons);

return 0;
}
Expand Down Expand Up @@ -1231,7 +1250,9 @@ static int xennet_poll(struct napi_struct *napi, int budget)

if (unlikely(xennet_set_skb_gso(skb, gso))) {
__skb_queue_head(&tmpq, skb);
queue->rx.rsp_cons += skb_queue_len(&tmpq);
xennet_set_rx_rsp_cons(queue,
queue->rx.rsp_cons +
skb_queue_len(&tmpq));
goto err;
}
}
Expand All @@ -1255,7 +1276,8 @@ static int xennet_poll(struct napi_struct *napi, int budget)

__skb_queue_tail(&rxq, skb);

i = ++queue->rx.rsp_cons;
i = queue->rx.rsp_cons + 1;
xennet_set_rx_rsp_cons(queue, i);
work_done++;
}
if (need_xdp_flush)
Expand Down Expand Up @@ -1419,40 +1441,79 @@ static int xennet_set_features(struct net_device *dev,
return 0;
}

static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
{
struct netfront_queue *queue = dev_id;
unsigned long flags;

if (queue->info->broken)
return IRQ_HANDLED;
if (unlikely(queue->info->broken))
return false;

spin_lock_irqsave(&queue->tx_lock, flags);
xennet_tx_buf_gc(queue);
if (xennet_tx_buf_gc(queue))
*eoi = 0;
spin_unlock_irqrestore(&queue->tx_lock, flags);

return true;
}

static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
{
unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;

if (likely(xennet_handle_tx(dev_id, &eoiflag)))
xen_irq_lateeoi(irq, eoiflag);

return IRQ_HANDLED;
}

static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
{
struct netfront_queue *queue = dev_id;
struct net_device *dev = queue->info->netdev;
unsigned int work_queued;
unsigned long flags;

if (queue->info->broken)
return IRQ_HANDLED;
if (unlikely(queue->info->broken))
return false;

spin_lock_irqsave(&queue->rx_cons_lock, flags);
work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
if (work_queued > queue->rx_rsp_unconsumed) {
queue->rx_rsp_unconsumed = work_queued;
*eoi = 0;
} else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
const struct device *dev = &queue->info->netdev->dev;

spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
dev_alert(dev, "RX producer index going backwards\n");
dev_alert(dev, "Disabled for further use\n");
queue->info->broken = true;
return false;
}
spin_unlock_irqrestore(&queue->rx_cons_lock, flags);

if (likely(netif_carrier_ok(dev) &&
RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
napi_schedule(&queue->napi);

return true;
}

static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
{
unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;

if (likely(xennet_handle_rx(dev_id, &eoiflag)))
xen_irq_lateeoi(irq, eoiflag);

return IRQ_HANDLED;
}

static irqreturn_t xennet_interrupt(int irq, void *dev_id)
{
xennet_tx_interrupt(irq, dev_id);
xennet_rx_interrupt(irq, dev_id);
unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;

if (xennet_handle_tx(dev_id, &eoiflag) &&
xennet_handle_rx(dev_id, &eoiflag))
xen_irq_lateeoi(irq, eoiflag);

return IRQ_HANDLED;
}

Expand Down Expand Up @@ -1770,9 +1831,10 @@ static int setup_netfront_single(struct netfront_queue *queue)
if (err < 0)
goto fail;

err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
xennet_interrupt,
0, queue->info->netdev->name, queue);
err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
xennet_interrupt, 0,
queue->info->netdev->name,
queue);
if (err < 0)
goto bind_fail;
queue->rx_evtchn = queue->tx_evtchn;
Expand Down Expand Up @@ -1800,18 +1862,18 @@ static int setup_netfront_split(struct netfront_queue *queue)

snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
"%s-tx", queue->name);
err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
xennet_tx_interrupt,
0, queue->tx_irq_name, queue);
err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
xennet_tx_interrupt, 0,
queue->tx_irq_name, queue);
if (err < 0)
goto bind_tx_fail;
queue->tx_irq = err;

snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
"%s-rx", queue->name);
err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
xennet_rx_interrupt,
0, queue->rx_irq_name, queue);
err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
xennet_rx_interrupt, 0,
queue->rx_irq_name, queue);
if (err < 0)
goto bind_rx_fail;
queue->rx_irq = err;
Expand Down Expand Up @@ -1913,6 +1975,7 @@ static int xennet_init_queue(struct netfront_queue *queue)

spin_lock_init(&queue->tx_lock);
spin_lock_init(&queue->rx_lock);
spin_lock_init(&queue->rx_cons_lock);

timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);

Expand Down

0 comments on commit d31b337

Please sign in to comment.