Skip to content

Commit

Permalink
netdev-dpdk: Free mempool only when no in-use mbufs.
Browse files Browse the repository at this point in the history
DPDK mempools are freed when they are no longer needed.
This can happen when a port is removed or a port's mtu
is reconfigured so that a new mempool is used.

It is possible that an mbuf is attempted to be returned
to a freed mempool from NIC Tx queues and this can lead
to a segfault.

In order to prevent this, only free mempools when they
are not needed and have no in-use mbufs. As this might
not be possible immediately, sweep the mempools anytime
a port tries to get a mempool.

Fixes: 8d38823 ("netdev-dpdk: fix memory leak")
Cc: mark.b.kavanagh81@gmail.com
Cc: Ilya Maximets <i.maximets@samsung.com>
Reported-by: Venkatesan Pradeep <venkatesan.pradeep@ericsson.com>
Signed-off-by: Kevin Traynor <ktraynor@redhat.com>
Signed-off-by: Ian Stokes <ian.stokes@intel.com>
  • Loading branch information
kevintraynor authored and istokes committed Apr 21, 2018
1 parent 1a8ba9f commit 68fda29
Showing 1 changed file with 45 additions and 11 deletions.
56 changes: 45 additions & 11 deletions lib/netdev-dpdk.c
Expand Up @@ -516,30 +516,69 @@ dpdk_mp_create(int socket_id, int mtu)
return NULL;
}

static int
dpdk_mp_full(const struct rte_mempool *mp) OVS_REQUIRES(dpdk_mp_mutex)
{
unsigned ring_count;
/* This logic is needed because rte_mempool_full() is not guaranteed to
* be atomic and mbufs could be moved from mempool cache --> mempool ring
* during the call. However, as no mbufs will be taken from the mempool
* at this time, we can work around it by also checking the ring entries
* separately and ensuring that they have not changed.
*/
ring_count = rte_mempool_ops_get_count(mp);
if (rte_mempool_full(mp) && rte_mempool_ops_get_count(mp) == ring_count) {
return 1;
}

return 0;
}

/* Free unused mempools. */
static void
dpdk_mp_sweep(void) OVS_REQUIRES(dpdk_mp_mutex)
{
struct dpdk_mp *dmp, *next;

LIST_FOR_EACH_SAFE (dmp, next, list_node, &dpdk_mp_list) {
if (!dmp->refcount && dpdk_mp_full(dmp->mp)) {
ovs_list_remove(&dmp->list_node);
rte_mempool_free(dmp->mp);
rte_free(dmp);
}
}
}

static struct dpdk_mp *
dpdk_mp_get(int socket_id, int mtu)
{
struct dpdk_mp *dmp;
bool reuse = false;

ovs_mutex_lock(&dpdk_mp_mutex);
LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
dmp->refcount++;
goto out;
reuse = true;
break;
}
}
/* Sweep mempools after reuse or before create. */
dpdk_mp_sweep();

dmp = dpdk_mp_create(socket_id, mtu);
if (dmp) {
ovs_list_push_back(&dpdk_mp_list, &dmp->list_node);
if (!reuse) {
dmp = dpdk_mp_create(socket_id, mtu);
if (dmp) {
ovs_list_push_back(&dpdk_mp_list, &dmp->list_node);
}
}

out:
ovs_mutex_unlock(&dpdk_mp_mutex);

return dmp;
}

/* Decrement reference to a mempool. */
static void
dpdk_mp_put(struct dpdk_mp *dmp)
{
Expand All @@ -549,12 +588,7 @@ dpdk_mp_put(struct dpdk_mp *dmp)

ovs_mutex_lock(&dpdk_mp_mutex);
ovs_assert(dmp->refcount);

if (!--dmp->refcount) {
ovs_list_remove(&dmp->list_node);
rte_mempool_free(dmp->mp);
rte_free(dmp);
}
dmp->refcount--;
ovs_mutex_unlock(&dpdk_mp_mutex);
}

Expand Down

0 comments on commit 68fda29

Please sign in to comment.