Skip to content

Commit

Permalink
netdev-dpdk: Detailed packet drop statistics.
Browse files Browse the repository at this point in the history
OVS may be unable to transmit packets for multiple reasons on
the userspace datapath and today there is a single counter to
track packets dropped due to any of those reasons. This patch
adds custom software stats for the different reasons packets
may be dropped during tx/rx on the userspace datapath in OVS.

- MTU drops : drops that occur due to a too large packet size
- Qos drops : drops that occur due to egress/ingress QOS
- Tx failures: drops as returned by the DPDK PMD send function

Note that the reason for tx failures is not specified in OVS.
In practice for vhost ports it is most common that tx failures
are because there are not enough available descriptors,
which is usually caused by misconfiguration of the guest queues
and/or because the guest is not consuming packets fast enough
from the queues.

These counters are displayed along with other stats in
"ovs-vsctl get interface <iface> statistics" command and are
available for dpdk and vhostuser/vhostuserclient ports.

Also the existing "tx_retries" counter for vhost ports has been
renamed to "ovs_tx_retries", so that all the custom statistics
that OVS accumulates itself will have the prefix "ovs_". This
will prevent any custom stats names overlapping with
driver/HW stats.

Acked-by: Kevin Traynor <ktraynor@redhat.com>
Signed-off-by: Sriram Vatala <sriram.v@altencalsoftlabs.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
  • Loading branch information
Sriram Vatala authored and igsilya committed Nov 11, 2019
1 parent b99ab8a commit 2f862c7
Show file tree
Hide file tree
Showing 3 changed files with 100 additions and 36 deletions.
6 changes: 6 additions & 0 deletions Documentation/topics/dpdk/bridge.rst
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,12 @@ OpenFlow14`` option::

$ ovs-ofctl -O OpenFlow14 dump-ports br0

There are custom statistics that OVS accumulates itself and these stats has
``ovs_`` as prefix. These custom stats are shown along with other stats
using the following command::

$ ovs-vsctl get Interface <iface> statistics

EMC Insertion Probability
-------------------------

Expand Down
2 changes: 1 addition & 1 deletion Documentation/topics/dpdk/vhost-user.rst
Original file line number Diff line number Diff line change
Expand Up @@ -551,7 +551,7 @@ processing packets at the required rate.
The amount of Tx retries on a vhost-user or vhost-user-client interface can be
shown with::

$ ovs-vsctl get Interface dpdkvhostclient0 statistics:tx_retries
$ ovs-vsctl get Interface dpdkvhostclient0 statistics:ovs_tx_retries

vhost-user Dequeue Zero Copy (experimental)
-------------------------------------------
Expand Down
128 changes: 93 additions & 35 deletions lib/netdev-dpdk.c
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,20 @@ static const struct vhost_device_ops virtio_net_device_ops =
.destroy_connection = destroy_connection,
};

/* Custom software stats for dpdk ports */
struct netdev_dpdk_sw_stats {
/* No. of retries when unable to transmit. */
uint64_t tx_retries;
/* Packet drops when unable to transmit; Probably Tx queue is full. */
uint64_t tx_failure_drops;
/* Packet length greater than device MTU. */
uint64_t tx_mtu_exceeded_drops;
/* Packet drops in egress policer processing. */
uint64_t tx_qos_drops;
/* Packet drops in ingress policer processing. */
uint64_t rx_qos_drops;
};

enum { DPDK_RING_SIZE = 256 };
BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
enum { DRAIN_TSC = 200000ULL };
Expand Down Expand Up @@ -419,11 +433,10 @@ struct netdev_dpdk {

PADDED_MEMBERS(CACHE_LINE_SIZE,
struct netdev_stats stats;
/* Custom stat for retries when unable to transmit. */
uint64_t tx_retries;
struct netdev_dpdk_sw_stats *sw_stats;
/* Protects stats */
rte_spinlock_t stats_lock;
/* 4 pad bytes here. */
/* 36 pad bytes here. */
);

PADDED_MEMBERS(CACHE_LINE_SIZE,
Expand Down Expand Up @@ -1179,7 +1192,8 @@ common_construct(struct netdev *netdev, dpdk_port_t port_no,
dev->rte_xstats_ids = NULL;
dev->rte_xstats_ids_size = 0;

dev->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX;
dev->sw_stats = xzalloc(sizeof *dev->sw_stats);
dev->sw_stats->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX;

return 0;
}
Expand Down Expand Up @@ -1365,6 +1379,7 @@ common_destruct(struct netdev_dpdk *dev)
ovs_list_remove(&dev->list_node);
free(ovsrcu_get_protected(struct ingress_policer *,
&dev->ingress_policer));
free(dev->sw_stats);
ovs_mutex_destroy(&dev->mutex);
}

Expand Down Expand Up @@ -2154,16 +2169,18 @@ netdev_dpdk_vhost_update_rx_size_counters(struct netdev_stats *stats,
}

static inline void
netdev_dpdk_vhost_update_rx_counters(struct netdev_stats *stats,
netdev_dpdk_vhost_update_rx_counters(struct netdev_dpdk *dev,
struct dp_packet **packets, int count,
int dropped)
int qos_drops)
{
int i;
unsigned int packet_size;
struct netdev_dpdk_sw_stats *sw_stats = dev->sw_stats;
struct netdev_stats *stats = &dev->stats;
struct dp_packet *packet;
unsigned int packet_size;
int i;

stats->rx_packets += count;
stats->rx_dropped += dropped;
stats->rx_dropped += qos_drops;
for (i = 0; i < count; i++) {
packet = packets[i];
packet_size = dp_packet_size(packet);
Expand All @@ -2186,6 +2203,8 @@ netdev_dpdk_vhost_update_rx_counters(struct netdev_stats *stats,

stats->rx_bytes += packet_size;
}

sw_stats->rx_qos_drops += qos_drops;
}

/*
Expand All @@ -2198,7 +2217,7 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
uint16_t nb_rx = 0;
uint16_t dropped = 0;
uint16_t qos_drops = 0;
int qid = rxq->queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
int vid = netdev_dpdk_get_vid(dev);

Expand All @@ -2225,16 +2244,16 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
}

if (policer) {
dropped = nb_rx;
qos_drops = nb_rx;
nb_rx = ingress_policer_run(policer,
(struct rte_mbuf **) batch->packets,
nb_rx, true);
dropped -= nb_rx;
qos_drops -= nb_rx;
}

rte_spinlock_lock(&dev->stats_lock);
netdev_dpdk_vhost_update_rx_counters(&dev->stats, batch->packets,
nb_rx, dropped);
netdev_dpdk_vhost_update_rx_counters(dev, batch->packets,
nb_rx, qos_drops);
rte_spinlock_unlock(&dev->stats_lock);

batch->count = nb_rx;
Expand Down Expand Up @@ -2284,6 +2303,7 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet_batch *batch,
if (OVS_UNLIKELY(dropped)) {
rte_spinlock_lock(&dev->stats_lock);
dev->stats.rx_dropped += dropped;
dev->sw_stats->rx_qos_drops += dropped;
rte_spinlock_unlock(&dev->stats_lock);
}

Expand Down Expand Up @@ -2343,20 +2363,30 @@ netdev_dpdk_filter_packet_len(struct netdev_dpdk *dev, struct rte_mbuf **pkts,
}

static inline void
netdev_dpdk_vhost_update_tx_counters(struct netdev_stats *stats,
netdev_dpdk_vhost_update_tx_counters(struct netdev_dpdk *dev,
struct dp_packet **packets,
int attempted,
int dropped)
struct netdev_dpdk_sw_stats *sw_stats_add)
{
int i;
struct netdev_dpdk_sw_stats *sw_stats = dev->sw_stats;
int dropped = sw_stats_add->tx_mtu_exceeded_drops +
sw_stats_add->tx_qos_drops +
sw_stats_add->tx_failure_drops;
struct netdev_stats *stats = &dev->stats;
int sent = attempted - dropped;
int i;

stats->tx_packets += sent;
stats->tx_dropped += dropped;

for (i = 0; i < sent; i++) {
stats->tx_bytes += dp_packet_size(packets[i]);
}

sw_stats->tx_retries += sw_stats_add->tx_retries;
sw_stats->tx_failure_drops += sw_stats_add->tx_failure_drops;
sw_stats->tx_mtu_exceeded_drops += sw_stats_add->tx_mtu_exceeded_drops;
sw_stats->tx_qos_drops += sw_stats_add->tx_qos_drops;
}

static void
Expand All @@ -2365,8 +2395,9 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
unsigned int total_pkts = cnt;
unsigned int dropped = 0;
struct netdev_dpdk_sw_stats sw_stats_add;
unsigned int n_packets_to_free = cnt;
unsigned int total_packets = cnt;
int i, retries = 0;
int max_retries = VHOST_ENQ_RETRY_MIN;
int vid = netdev_dpdk_get_vid(dev);
Expand All @@ -2387,9 +2418,14 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
}

cnt = netdev_dpdk_filter_packet_len(dev, cur_pkts, cnt);
sw_stats_add.tx_mtu_exceeded_drops = total_packets - cnt;

/* Check has QoS has been configured for the netdev */
sw_stats_add.tx_qos_drops = cnt;
cnt = netdev_dpdk_qos_run(dev, cur_pkts, cnt, true);
dropped = total_pkts - cnt;
sw_stats_add.tx_qos_drops -= cnt;

n_packets_to_free = cnt;

do {
int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
Expand All @@ -2416,14 +2452,16 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,

rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);

sw_stats_add.tx_failure_drops = cnt;
sw_stats_add.tx_retries = MIN(retries, max_retries);

rte_spinlock_lock(&dev->stats_lock);
netdev_dpdk_vhost_update_tx_counters(&dev->stats, pkts, total_pkts,
cnt + dropped);
dev->tx_retries += MIN(retries, max_retries);
netdev_dpdk_vhost_update_tx_counters(dev, pkts, total_packets,
&sw_stats_add);
rte_spinlock_unlock(&dev->stats_lock);

out:
for (i = 0; i < total_pkts - dropped; i++) {
for (i = 0; i < n_packets_to_free; i++) {
dp_packet_delete(pkts[i]);
}
}
Expand All @@ -2442,14 +2480,18 @@ dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch)
#endif
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
struct rte_mbuf *pkts[PKT_ARRAY_SIZE];
struct netdev_dpdk_sw_stats *sw_stats = dev->sw_stats;
uint32_t cnt = batch_cnt;
uint32_t dropped = 0;
uint32_t tx_failure = 0;
uint32_t mtu_drops = 0;
uint32_t qos_drops = 0;

if (dev->type != DPDK_DEV_VHOST) {
/* Check if QoS has been configured for this netdev. */
cnt = netdev_dpdk_qos_run(dev, (struct rte_mbuf **) batch->packets,
batch_cnt, false);
dropped += batch_cnt - cnt;
qos_drops = batch_cnt - cnt;
}

uint32_t txcnt = 0;
Expand All @@ -2462,13 +2504,13 @@ dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch)
VLOG_WARN_RL(&rl, "Too big size %u max_packet_len %d",
size, dev->max_packet_len);

dropped++;
mtu_drops++;
continue;
}

pkts[txcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
if (OVS_UNLIKELY(!pkts[txcnt])) {
dropped += cnt - i;
dropped = cnt - i;
break;
}

Expand All @@ -2485,13 +2527,17 @@ dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch)
__netdev_dpdk_vhost_send(netdev, qid, (struct dp_packet **) pkts,
txcnt);
} else {
dropped += netdev_dpdk_eth_tx_burst(dev, qid, pkts, txcnt);
tx_failure = netdev_dpdk_eth_tx_burst(dev, qid, pkts, txcnt);
}
}

dropped += qos_drops + mtu_drops + tx_failure;
if (OVS_UNLIKELY(dropped)) {
rte_spinlock_lock(&dev->stats_lock);
dev->stats.tx_dropped += dropped;
sw_stats->tx_failure_drops += tx_failure;
sw_stats->tx_mtu_exceeded_drops += mtu_drops;
sw_stats->tx_qos_drops += qos_drops;
rte_spinlock_unlock(&dev->stats_lock);
}
}
Expand Down Expand Up @@ -2533,19 +2579,27 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
dpdk_do_tx_copy(netdev, qid, batch);
dp_packet_delete_batch(batch, true);
} else {
struct netdev_dpdk_sw_stats *sw_stats = dev->sw_stats;
int tx_cnt, dropped;
int tx_failure, mtu_drops, qos_drops;
int batch_cnt = dp_packet_batch_size(batch);
struct rte_mbuf **pkts = (struct rte_mbuf **) batch->packets;

tx_cnt = netdev_dpdk_filter_packet_len(dev, pkts, batch_cnt);
mtu_drops = batch_cnt - tx_cnt;
qos_drops = tx_cnt;
tx_cnt = netdev_dpdk_qos_run(dev, pkts, tx_cnt, true);
dropped = batch_cnt - tx_cnt;
qos_drops -= tx_cnt;

dropped += netdev_dpdk_eth_tx_burst(dev, qid, pkts, tx_cnt);
tx_failure = netdev_dpdk_eth_tx_burst(dev, qid, pkts, tx_cnt);

dropped = tx_failure + mtu_drops + qos_drops;
if (OVS_UNLIKELY(dropped)) {
rte_spinlock_lock(&dev->stats_lock);
dev->stats.tx_dropped += dropped;
sw_stats->tx_failure_drops += tx_failure;
sw_stats->tx_mtu_exceeded_drops += mtu_drops;
sw_stats->tx_qos_drops += qos_drops;
rte_spinlock_unlock(&dev->stats_lock);
}
}
Expand Down Expand Up @@ -2856,8 +2910,12 @@ netdev_dpdk_get_sw_custom_stats(const struct netdev *netdev,
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
int i, n;

#define SW_CSTATS \
SW_CSTAT(tx_retries)
#define SW_CSTATS \
SW_CSTAT(tx_retries) \
SW_CSTAT(tx_failure_drops) \
SW_CSTAT(tx_mtu_exceeded_drops) \
SW_CSTAT(tx_qos_drops) \
SW_CSTAT(rx_qos_drops)

#define SW_CSTAT(NAME) + 1
custom_stats->size = SW_CSTATS;
Expand All @@ -2870,7 +2928,7 @@ netdev_dpdk_get_sw_custom_stats(const struct netdev *netdev,
rte_spinlock_lock(&dev->stats_lock);
i = 0;
#define SW_CSTAT(NAME) \
custom_stats->counters[i++].value = dev->NAME;
custom_stats->counters[i++].value = dev->sw_stats->NAME;
SW_CSTATS;
#undef SW_CSTAT
rte_spinlock_unlock(&dev->stats_lock);
Expand All @@ -2881,8 +2939,8 @@ netdev_dpdk_get_sw_custom_stats(const struct netdev *netdev,
n = 0;
#define SW_CSTAT(NAME) \
if (custom_stats->counters[i].value != UINT64_MAX) { \
ovs_strlcpy(custom_stats->counters[n].name, #NAME, \
NETDEV_CUSTOM_STATS_NAME_SIZE); \
ovs_strlcpy(custom_stats->counters[n].name, \
"ovs_"#NAME, NETDEV_CUSTOM_STATS_NAME_SIZE); \
custom_stats->counters[n].value = custom_stats->counters[i].value; \
n++; \
} \
Expand Down

0 comments on commit 2f862c7

Please sign in to comment.