Skip to content

Commit

Permalink
app/testpmd: fix Tx scheduling interval
Browse files Browse the repository at this point in the history
[ upstream commit 9fac5ca ]

The first "set txtimes" command parameter specifies the time
interval between scheduled send bursts for single queue. This
interval should be the same for all the forwarding ports.
It requires to maintain the timing related variables on per
queue basis instead of per core, as currently implemented.
This resulted in wrong burst intervals if two or more cores
were generating the scheduled traffic for two or more ports
in txonly mode.

This patch moves the timing variable to the fstream structure.
Only txonly forwarding mode with enabled send scheduling is
affected.

Fixes: 4940344 ("app/testpmd: add Tx scheduling command")

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Ferruh Yigit <ferruh.yigit@intel.com>
  • Loading branch information
viacheslavo authored and bluca committed Feb 14, 2022
1 parent 8d0afb3 commit a26506b
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 14 deletions.
1 change: 1 addition & 0 deletions app/test-pmd/testpmd.h
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,7 @@ struct fwd_stream {
uint64_t rx_bad_l4_csum ; /**< received packets has bad l4 checksum */
uint64_t rx_bad_outer_l4_csum;
/**< received packets has bad outer l4 checksum */
uint64_t ts_skew; /**< TX scheduling timestamp */
unsigned int gro_times; /**< GRO operation times */
uint64_t core_cycles; /**< used for RX and TX processing */
struct pkt_burst_stats rx_burst_stats;
Expand Down
20 changes: 6 additions & 14 deletions app/test-pmd/txonly.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,14 +60,10 @@ uint32_t tx_ip_dst_addr = (198U << 24) | (18 << 16) | (0 << 8) | 2;
static struct rte_ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */
static struct rte_udp_hdr pkt_udp_hdr; /**< UDP header of tx packets. */
RTE_DEFINE_PER_LCORE(uint64_t, timestamp_qskew);
/**< Timestamp offset per queue */
RTE_DEFINE_PER_LCORE(uint32_t, timestamp_idone); /**< Timestamp init done. */

static uint64_t timestamp_mask; /**< Timestamp dynamic flag mask */
static int32_t timestamp_off; /**< Timestamp dynamic field offset */
static bool timestamp_enable; /**< Timestamp enable */
static uint32_t timestamp_init_req; /**< Timestamp initialization request. */
static uint64_t timestamp_initial[RTE_MAX_ETHPORTS];

static void
Expand Down Expand Up @@ -195,7 +191,7 @@ static inline bool
pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
struct rte_ether_hdr *eth_hdr, const uint16_t vlan_tci,
const uint16_t vlan_tci_outer, const uint64_t ol_flags,
const uint16_t idx, const struct fwd_stream *fs)
const uint16_t idx, struct fwd_stream *fs)
{
struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
struct rte_mbuf *pkt_seg;
Expand Down Expand Up @@ -263,11 +259,10 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
update_pkt_header(pkt, pkt_len);

if (unlikely(timestamp_enable)) {
uint64_t skew = RTE_PER_LCORE(timestamp_qskew);
uint64_t skew = fs->ts_skew;
struct tx_timestamp timestamp_mark;

if (unlikely(timestamp_init_req !=
RTE_PER_LCORE(timestamp_idone))) {
if (!skew) {
struct rte_eth_dev *dev = &rte_eth_devices[fs->tx_port];
unsigned int txqs_n = dev->data->nb_tx_queues;
uint64_t phase = tx_pkt_times_inter * fs->tx_queue /
Expand All @@ -278,8 +273,7 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
*/
skew = timestamp_initial[fs->tx_port] +
tx_pkt_times_inter + phase;
RTE_PER_LCORE(timestamp_qskew) = skew;
RTE_PER_LCORE(timestamp_idone) = timestamp_init_req;
fs->ts_skew = skew;
}
timestamp_mark.pkt_idx = rte_cpu_to_be_16(idx);
timestamp_mark.queue_idx = rte_cpu_to_be_16(fs->tx_queue);
Expand All @@ -289,14 +283,14 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
pkt->ol_flags |= timestamp_mask;
*RTE_MBUF_DYNFIELD
(pkt, timestamp_off, uint64_t *) = skew;
RTE_PER_LCORE(timestamp_qskew) = skew;
fs->ts_skew = skew;
timestamp_mark.ts = rte_cpu_to_be_64(skew);
} else if (tx_pkt_times_intra) {
skew += tx_pkt_times_intra;
pkt->ol_flags |= timestamp_mask;
*RTE_MBUF_DYNFIELD
(pkt, timestamp_off, uint64_t *) = skew;
RTE_PER_LCORE(timestamp_qskew) = skew;
fs->ts_skew = skew;
timestamp_mark.ts = rte_cpu_to_be_64(skew);
} else {
timestamp_mark.ts = RTE_BE64(0);
Expand Down Expand Up @@ -450,7 +444,6 @@ tx_only_begin(portid_t pi)
timestamp_enable = false;
timestamp_mask = 0;
timestamp_off = -1;
RTE_PER_LCORE(timestamp_qskew) = 0;
dynf = rte_mbuf_dynflag_lookup
(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
if (dynf >= 0)
Expand Down Expand Up @@ -493,7 +486,6 @@ tx_only_begin(portid_t pi)
return -EINVAL;
}
}
timestamp_init_req++;
}

/* Make sure all settings are visible on forwarding cores.*/
Expand Down

0 comments on commit a26506b

Please sign in to comment.