Skip to content

Commit

Permalink
i40e: Refactor rx_bi accesses
Browse files Browse the repository at this point in the history
[ Upstream commit e1675f9 ]

As a first step to migrate i40e to the new MEM_TYPE_XSK_BUFF_POOL
APIs, code that accesses the rx_bi (SW/shadow ring) is refactored to
use an accessor function.

Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Cc: intel-wired-lan@lists.osuosl.org
Link: https://lore.kernel.org/bpf/20200520192103.355233-7-bjorn.topel@gmail.com
Signed-off-by: Sasha Levin <sashal@kernel.org>
  • Loading branch information
Björn Töpel authored and gregkh committed Dec 30, 2020
1 parent 6935f53 commit 405bfd3
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 12 deletions.
17 changes: 11 additions & 6 deletions drivers/net/ethernet/intel/i40e/i40e_txrx.c
Expand Up @@ -1195,6 +1195,11 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
rc->total_packets = 0;
}

static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
{
return &rx_ring->rx_bi[idx];
}

/**
* i40e_reuse_rx_page - page flip buffer and store it back on the ring
* @rx_ring: rx descriptor ring to store buffers on
Expand All @@ -1208,7 +1213,7 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *new_buff;
u16 nta = rx_ring->next_to_alloc;

new_buff = &rx_ring->rx_bi[nta];
new_buff = i40e_rx_bi(rx_ring, nta);

/* update, and store next to alloc */
nta++;
Expand Down Expand Up @@ -1272,7 +1277,7 @@ struct i40e_rx_buffer *i40e_clean_programming_status(
ntc = rx_ring->next_to_clean;

/* fetch, update, and store next to clean */
rx_buffer = &rx_ring->rx_bi[ntc++];
rx_buffer = i40e_rx_bi(rx_ring, ntc++);
ntc = (ntc < rx_ring->count) ? ntc : 0;
rx_ring->next_to_clean = ntc;

Expand Down Expand Up @@ -1361,7 +1366,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)

/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i);

if (!rx_bi->page)
continue;
Expand Down Expand Up @@ -1576,7 +1581,7 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
return false;

rx_desc = I40E_RX_DESC(rx_ring, ntu);
bi = &rx_ring->rx_bi[ntu];
bi = i40e_rx_bi(rx_ring, ntu);

do {
if (!i40e_alloc_mapped_page(rx_ring, bi))
Expand All @@ -1598,7 +1603,7 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
ntu++;
if (unlikely(ntu == rx_ring->count)) {
rx_desc = I40E_RX_DESC(rx_ring, 0);
bi = rx_ring->rx_bi;
bi = i40e_rx_bi(rx_ring, 0);
ntu = 0;
}

Expand Down Expand Up @@ -1965,7 +1970,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
{
struct i40e_rx_buffer *rx_buffer;

rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
prefetchw(rx_buffer->page);

/* we are reusing so sync this buffer for CPU use */
Expand Down
18 changes: 12 additions & 6 deletions drivers/net/ethernet/intel/i40e/i40e_xsk.c
Expand Up @@ -9,6 +9,11 @@
#include "i40e_txrx_common.h"
#include "i40e_xsk.h"

static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
{
return &rx_ring->rx_bi[idx];
}

/**
* i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev
* @vsi: Current VSI
Expand Down Expand Up @@ -321,7 +326,7 @@ __i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count,
bool ok = true;

rx_desc = I40E_RX_DESC(rx_ring, ntu);
bi = &rx_ring->rx_bi[ntu];
bi = i40e_rx_bi(rx_ring, ntu);
do {
if (!alloc(rx_ring, bi)) {
ok = false;
Expand All @@ -340,7 +345,7 @@ __i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count,

if (unlikely(ntu == rx_ring->count)) {
rx_desc = I40E_RX_DESC(rx_ring, 0);
bi = rx_ring->rx_bi;
bi = i40e_rx_bi(rx_ring, 0);
ntu = 0;
}

Expand Down Expand Up @@ -402,7 +407,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer_zc(struct i40e_ring *rx_ring,
{
struct i40e_rx_buffer *bi;

bi = &rx_ring->rx_bi[rx_ring->next_to_clean];
bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);

/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
Expand All @@ -424,7 +429,8 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer_zc(struct i40e_ring *rx_ring,
static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *old_bi)
{
struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc];
struct i40e_rx_buffer *new_bi = i40e_rx_bi(rx_ring,
rx_ring->next_to_alloc);
u16 nta = rx_ring->next_to_alloc;

/* update, and store next to alloc */
Expand Down Expand Up @@ -456,7 +462,7 @@ void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
mask = rx_ring->xsk_umem->chunk_mask;

nta = rx_ring->next_to_alloc;
bi = &rx_ring->rx_bi[nta];
bi = i40e_rx_bi(rx_ring, nta);

nta++;
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
Expand Down Expand Up @@ -824,7 +830,7 @@ void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
u16 i;

for (i = 0; i < rx_ring->count; i++) {
struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i);

if (!rx_bi->addr)
continue;
Expand Down

0 comments on commit 405bfd3

Please sign in to comment.