Skip to content

Commit 8447357

Browse files
halfboy93anguy11
authored andcommitted
iavf: Implement checking DD desc field
Rx timestamping introduced in PF driver caused the need of refactoring the VF driver mechanism to check packet fields. The function to check errors in descriptor has been removed and from now only previously set struct fields are being checked. The field DD (descriptor done) needs to be checked at the very beginning, before extracting other fields. Reviewed-by: Rahul Rameshbabu <rrameshbabu@nvidia.com> Reviewed-by: Simon Horman <horms@kernel.org> Tested-by: Rafal Romanowski <rafal.romanowski@intel.com> Signed-off-by: Mateusz Polchlopek <mateusz.polchlopek@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
1 parent 2dc8e7c commit 8447357

File tree

3 files changed

+43
-29
lines changed

3 files changed

+43
-29
lines changed

drivers/net/ethernet/intel/iavf/iavf_txrx.c

Lines changed: 41 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,25 @@
99
#include "iavf_trace.h"
1010
#include "iavf_prototype.h"
1111

12+
/**
13+
* iavf_is_descriptor_done - tests DD bit in Rx descriptor
14+
* @qw1: quad word 1 from descriptor to get Descriptor Done field from
15+
* @flex: is the descriptor flex or legacy
16+
*
17+
* This function tests the descriptor done bit in specified descriptor. Because
18+
* there are two types of descriptors (legacy and flex) the parameter rx_ring
19+
* is used to distinguish.
20+
*
21+
* Return: true or false based on the state of DD bit in Rx descriptor.
22+
*/
23+
static bool iavf_is_descriptor_done(u64 qw1, bool flex)
24+
{
25+
if (flex)
26+
return FIELD_GET(IAVF_RXD_FLEX_DD_M, qw1);
27+
else
28+
return FIELD_GET(IAVF_RXD_LEGACY_DD_M, qw1);
29+
}
30+
1231
static __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
1332
u32 td_tag)
1433
{
@@ -1063,14 +1082,16 @@ static void iavf_flex_rx_hash(const struct iavf_ring *ring, __le64 qw1,
10631082
* @rx_desc: pointer to the EOP Rx descriptor
10641083
* @skb: pointer to current skb being populated
10651084
* @ptype: the packet type decoded by hardware
1085+
* @flex: is the descriptor flex or legacy
10661086
*
10671087
* This function checks the ring, descriptor, and packet information in
10681088
* order to populate the hash, checksum, VLAN, protocol, and
10691089
* other fields within the skb.
10701090
**/
10711091
static void iavf_process_skb_fields(const struct iavf_ring *rx_ring,
10721092
const struct iavf_rx_desc *rx_desc,
1073-
struct sk_buff *skb, u32 ptype)
1093+
struct sk_buff *skb, u32 ptype,
1094+
bool flex)
10741095
{
10751096
struct libeth_rx_csum csum_bits;
10761097
struct libeth_rx_pt decoded_pt;
@@ -1079,14 +1100,14 @@ static void iavf_process_skb_fields(const struct iavf_ring *rx_ring,
10791100

10801101
decoded_pt = libie_rx_pt_parse(ptype);
10811102

1082-
if (rx_ring->rxdid == VIRTCHNL_RXDID_1_32B_BASE) {
1083-
iavf_legacy_rx_hash(rx_ring, qw0, qw1, skb, decoded_pt);
1084-
csum_bits = iavf_legacy_rx_csum(rx_ring->vsi, le64_to_cpu(qw1),
1085-
decoded_pt);
1086-
} else {
1103+
if (flex) {
10871104
iavf_flex_rx_hash(rx_ring, qw1, skb, decoded_pt);
10881105
csum_bits = iavf_flex_rx_csum(rx_ring->vsi, le64_to_cpu(qw1),
10891106
decoded_pt);
1107+
} else {
1108+
iavf_legacy_rx_hash(rx_ring, qw0, qw1, skb, decoded_pt);
1109+
csum_bits = iavf_legacy_rx_csum(rx_ring->vsi, le64_to_cpu(qw1),
1110+
decoded_pt);
10901111
}
10911112
iavf_rx_csum(rx_ring->vsi, skb, decoded_pt, csum_bits);
10921113

@@ -1296,12 +1317,13 @@ iavf_extract_flex_rx_fields(const struct iavf_ring *rx_ring,
12961317

12971318
static struct libeth_rqe_info
12981319
iavf_extract_rx_fields(const struct iavf_ring *rx_ring,
1299-
const struct iavf_rx_desc *rx_desc)
1320+
const struct iavf_rx_desc *rx_desc,
1321+
bool flex)
13001322
{
1301-
if (rx_ring->rxdid == VIRTCHNL_RXDID_1_32B_BASE)
1302-
return iavf_extract_legacy_rx_fields(rx_ring, rx_desc);
1303-
else
1323+
if (flex)
13041324
return iavf_extract_flex_rx_fields(rx_ring, rx_desc);
1325+
else
1326+
return iavf_extract_legacy_rx_fields(rx_ring, rx_desc);
13051327
}
13061328

13071329
/**
@@ -1318,6 +1340,7 @@ iavf_extract_rx_fields(const struct iavf_ring *rx_ring,
13181340
**/
13191341
static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
13201342
{
1343+
bool flex = rx_ring->rxdid == VIRTCHNL_RXDID_2_FLEX_SQ_NIC;
13211344
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
13221345
struct sk_buff *skb = rx_ring->skb;
13231346
u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring);
@@ -1327,6 +1350,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
13271350
struct libeth_rqe_info fields;
13281351
struct libeth_fqe *rx_buffer;
13291352
struct iavf_rx_desc *rx_desc;
1353+
u64 qw1;
13301354

13311355
/* return some buffers to hardware, one at a time is too slow */
13321356
if (cleaned_count >= IAVF_RX_BUFFER_WRITE) {
@@ -1343,10 +1367,14 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
13431367
*/
13441368
dma_rmb();
13451369

1346-
if (!iavf_test_staterr(rx_desc, IAVF_RXD_FLEX_DD_M))
1370+
qw1 = le64_to_cpu(rx_desc->qw1);
1371+
/* If DD field (descriptor done) is unset then other fields are
1372+
* not valid
1373+
*/
1374+
if (!iavf_is_descriptor_done(qw1, flex))
13471375
break;
13481376

1349-
fields = iavf_extract_rx_fields(rx_ring, rx_desc);
1377+
fields = iavf_extract_rx_fields(rx_ring, rx_desc, flex);
13501378

13511379
iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
13521380

@@ -1391,7 +1419,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
13911419
total_rx_bytes += skb->len;
13921420

13931421
/* populate checksum, VLAN, and protocol */
1394-
iavf_process_skb_fields(rx_ring, rx_desc, skb, fields.ptype);
1422+
iavf_process_skb_fields(rx_ring, rx_desc, skb, fields.ptype, flex);
13951423

13961424
iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
13971425
iavf_receive_skb(rx_ring, skb, fields.vlan);

drivers/net/ethernet/intel/iavf/iavf_txrx.h

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -80,22 +80,6 @@ enum iavf_dyn_idx_t {
8080
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
8181
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
8282

83-
/**
84-
* iavf_test_staterr - tests bits in Rx descriptor status and error fields
85-
* @rx_desc: pointer to receive descriptor (in le64 format)
86-
* @stat_err_bits: value to mask
87-
*
88-
* This function does some fast chicanery in order to return the
89-
* value of the mask which is really only used for boolean tests.
90-
* The status_error_len doesn't need to be shifted because it begins
91-
* at offset zero.
92-
*/
93-
static inline bool iavf_test_staterr(struct iavf_rx_desc *rx_desc,
94-
const u64 stat_err_bits)
95-
{
96-
return !!(rx_desc->qw1 & cpu_to_le64(stat_err_bits));
97-
}
98-
9983
/* How many Rx Buffers do we bundle into one write to the hardware ? */
10084
#define IAVF_RX_INCREMENT(r, i) \
10185
do { \

drivers/net/ethernet/intel/iavf/iavf_type.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -212,6 +212,8 @@ struct iavf_rx_desc {
212212
#define IAVF_RXD_FLEX_PKT_LEN_M GENMASK_ULL(45, 32)
213213

214214
aligned_le64 qw1;
215+
/* Descriptor done indication flag. */
216+
#define IAVF_RXD_LEGACY_DD_M BIT(0)
215217
/* End of packet. Set to 1 if this descriptor is the last one of the packet */
216218
#define IAVF_RXD_LEGACY_EOP_M BIT(1)
217219
/* L2 TAG 1 presence indication */

0 commit comments

Comments
 (0)