Skip to content

Commit

Permalink
net/i40e: fix Tx when TSO is enabled
Browse files Browse the repository at this point in the history
[ upstream commit 29b2ba8 ]

Hardware limits that max buffer size per tx descriptor should be
(16K-1)B. So when TSO enabled, the mbuf data size may exceed the
limit and cause malicious behavior to the NIC. This patch fixes
this issue by using more tx descs for this kind of large buffer.

Fixes: 4861cde ("i40e: new poll mode driver")

Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
Tested-by: Ciara Loftus <ciara.loftus@intel.com>
  • Loading branch information
Li-Xiaoyun authored and kevintraynor committed Feb 7, 2020
1 parent 74937a8 commit 950dfee
Showing 1 changed file with 44 additions and 1 deletion.
45 changes: 44 additions & 1 deletion drivers/net/i40e/i40e_rxtx.c
Expand Up @@ -1002,6 +1002,24 @@ i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload)
return ctx_desc;
}

/* HW requires that Tx buffer size ranges from 1B up to (16K-1)B. */
#define I40E_MAX_DATA_PER_TXD \
(I40E_TXD_QW1_TX_BUF_SZ_MASK >> I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
/* Calculate the number of TX descriptors needed for each pkt */
static inline uint16_t
i40e_calc_pkt_desc(struct rte_mbuf *tx_pkt)
{
struct rte_mbuf *txd = tx_pkt;
uint16_t count = 0;

while (txd != NULL) {
count += DIV_ROUND_UP(txd->data_len, I40E_MAX_DATA_PER_TXD);
txd = txd->next;
}

return count;
}

uint16_t
i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
Expand Down Expand Up @@ -1059,8 +1077,15 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
* The number of descriptors that must be allocated for
* a packet equals to the number of the segments of that
* packet plus 1 context descriptor if needed.
* Recalculate the needed tx descs when TSO enabled in case
* the mbuf data size exceeds max data size that hw allows
* per tx desc.
*/
nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
if (ol_flags & PKT_TX_TCP_SEG)
nb_used = (uint16_t)(i40e_calc_pkt_desc(tx_pkt) +
nb_ctx);
else
nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
tx_last = (uint16_t)(tx_id + nb_used - 1);

/* Circular ring */
Expand Down Expand Up @@ -1173,6 +1198,24 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
slen = m_seg->data_len;
buf_dma_addr = rte_mbuf_data_iova(m_seg);

while ((ol_flags & PKT_TX_TCP_SEG) &&
unlikely(slen > I40E_MAX_DATA_PER_TXD)) {
txd->buffer_addr =
rte_cpu_to_le_64(buf_dma_addr);
txd->cmd_type_offset_bsz =
i40e_build_ctob(td_cmd,
td_offset, I40E_MAX_DATA_PER_TXD,
td_tag);

buf_dma_addr += I40E_MAX_DATA_PER_TXD;
slen -= I40E_MAX_DATA_PER_TXD;

txe->last_id = tx_last;
tx_id = txe->next_id;
txe = txn;
txd = &txr[tx_id];
txn = &sw_ring[txe->next_id];
}
PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
"buf_dma_addr: %#"PRIx64";\n"
"td_cmd: %#x;\n"
Expand Down

0 comments on commit 950dfee

Please sign in to comment.