Skip to content

Commit

Permalink
examples/vhost: fix launch with physical port
Browse files Browse the repository at this point in the history
[ upstream commit 917229c ]

dpdk-vhost will fail to launch with a 40G i40e port because
there are not enough mbufs. This patch adds a new option
--total-num-mbufs, through which the user can set larger
mbuf pool to avoid this problem.

Fixes: 4796ad6 ("examples/vhost: import userspace vhost application")

Signed-off-by: Wenwu Ma <wenwux.ma@intel.com>
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
  • Loading branch information
wenwumax authored and bluca committed Mar 9, 2022
1 parent 0b46c9c commit 80b4931
Showing 1 changed file with 28 additions and 55 deletions.
83 changes: 28 additions & 55 deletions examples/vhost/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@
#define MAX_QUEUES 128
#endif

#define NUM_MBUFS_DEFAULT 0x24000

/* the maximum number of external ports supported */
#define MAX_SUP_PORTS 1

Expand Down Expand Up @@ -60,6 +62,9 @@
/* Maximum long option length for option parsing. */
#define MAX_LONG_OPT_SZ 64

/* number of mbufs in all pools - if specified on command-line. */
static int total_num_mbufs = NUM_MBUFS_DEFAULT;

/* mask of enabled ports */
static uint32_t enabled_port_mask = 0;

Expand Down Expand Up @@ -463,7 +468,8 @@ us_vhost_usage(const char *prgname)
" --tso [0|1] disable/enable TCP segment offload.\n"
" --client register a vhost-user socket as client mode.\n"
" --dma-type register dma type for your vhost async driver. For example \"ioat\" for now.\n"
" --dmas register dma channel for specific vhost device.\n",
" --dmas register dma channel for specific vhost device.\n"
" --total-num-mbufs [0-N] set the number of mbufs to be allocated in mbuf pools, the default value is 147456.\n",
prgname);
}

Expand Down Expand Up @@ -491,7 +497,7 @@ us_vhost_parse_args(int argc, char **argv)
{"builtin-net-driver", no_argument, &builtin_net_driver, 1},
{"dma-type", required_argument, NULL, 0},
{"dmas", required_argument, NULL, 0},
{NULL, 0, 0, 0},
{"total-num-mbufs", required_argument, NULL, 0},
};

/* Parse command line */
Expand Down Expand Up @@ -655,6 +661,21 @@ us_vhost_parse_args(int argc, char **argv)
async_vhost_driver = 1;
}


if (!strncmp(long_option[option_index].name,
"total-num-mbufs", MAX_LONG_OPT_SZ)) {
ret = parse_num_opt(optarg, INT32_MAX);
if (ret == -1) {
RTE_LOG(INFO, VHOST_CONFIG,
"Invalid argument for total-num-mbufs [0..N]\n");
us_vhost_usage(prgname);
return -1;
}

if (total_num_mbufs < ret)
total_num_mbufs = ret;
}

break;

/* Invalid option - print options. */
Expand Down Expand Up @@ -1443,57 +1464,6 @@ sigint_handler(__rte_unused int signum)
exit(0);
}

/*
* While creating an mbuf pool, one key thing is to figure out how
* many mbuf entries is enough for our use. FYI, here are some
* guidelines:
*
* - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
*
* - For each switch core (A CPU core does the packet switch), we need
* also make some reservation for receiving the packets from virtio
* Tx queue. How many is enough depends on the usage. It's normally
* a simple calculation like following:
*
* MAX_PKT_BURST * max packet size / mbuf size
*
* So, we definitely need allocate more mbufs when TSO is enabled.
*
* - Similarly, for each switching core, we should serve @nr_rx_desc
* mbufs for receiving the packets from physical NIC device.
*
* - We also need make sure, for each switch core, we have allocated
* enough mbufs to fill up the mbuf cache.
*/
static void
create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
{
uint32_t nr_mbufs;
uint32_t nr_mbufs_per_core;
uint32_t mtu = 1500;

if (mergeable)
mtu = 9000;
if (enable_tso)
mtu = 64 * 1024;

nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
(mbuf_size - RTE_PKTMBUF_HEADROOM);
nr_mbufs_per_core += nr_rx_desc;
nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);

nr_mbufs = nr_queues * nr_rx_desc;
nr_mbufs += nr_mbufs_per_core * nr_switch_core;
nr_mbufs *= nr_port;

mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
nr_mbuf_cache, 0, mbuf_size,
rte_socket_id());
if (mbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
}

/*
* Main function, does initialisation and calls the per-lcore functions.
*/
Expand Down Expand Up @@ -1552,8 +1522,11 @@ main(int argc, char *argv[])
* many queues here. We probably should only do allocation for
* those queues we are going to use.
*/
create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", total_num_mbufs,
MBUF_CACHE_SIZE, 0, MBUF_DATA_SIZE,
rte_socket_id());
if (mbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");

if (vm2vm_mode == VM2VM_HARDWARE) {
/* Enable VT loop back to let L2 switch to do it. */
Expand Down

0 comments on commit 80b4931

Please sign in to comment.