Skip to content

Commit

Permalink
net: ena: Use dev_alloc() in RX buffer allocation
Browse files Browse the repository at this point in the history
Use dev_alloc() when allocating RX buffers instead of specifying the
allocation flags explicitly. This result in same behaviour with less
code.

Also move the page allocation and its DMA mapping into a function. This
creates a logical block, which may help understanding the code.

Signed-off-by: Shay Agroskin <shayagr@amazon.com>
  • Loading branch information
ShayAgros authored and intel-lab-lkp committed Jun 8, 2021
1 parent 6f27250 commit d37d1c6
Showing 1 changed file with 36 additions and 22 deletions.
58 changes: 36 additions & 22 deletions drivers/net/ethernet/amazon/ena/ena_netdev.c
Expand Up @@ -975,8 +975,37 @@ static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
ena_free_rx_resources(adapter, i);
}

static int ena_alloc_rx_page(struct ena_ring *rx_ring,
struct ena_rx_buffer *rx_info, gfp_t gfp)
struct page *ena_alloc_map_page(struct ena_ring *rx_ring, dma_addr_t *dma)
{
struct page *page;

/* This would allocate the page on the same NUMA node the executing code
* is running on.
*/
page = dev_alloc_page();
if (!page) {
ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
&rx_ring->syncp);
return ERR_PTR(-ENOSPC);
}

/* To enable NIC-side port-mirroring, AKA SPAN port,
* we make the buffer readable from the nic as well
*/
*dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(rx_ring->dev, *dma))) {
ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1,
&rx_ring->syncp);
__free_page(page);
return ERR_PTR(-EIO);
}

return page;
}

static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
struct ena_rx_buffer *rx_info)
{
int headroom = rx_ring->rx_headroom;
struct ena_com_buf *ena_buf;
Expand All @@ -991,25 +1020,11 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
if (unlikely(rx_info->page))
return 0;

page = alloc_page(gfp);
if (unlikely(!page)) {
ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
&rx_ring->syncp);
return -ENOMEM;
}

/* To enable NIC-side port-mirroring, AKA SPAN port,
* we make the buffer readable from the nic as well
*/
dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1,
&rx_ring->syncp);
/* We handle DMA here */
page = ena_alloc_map_page(rx_ring, &dma);
if (unlikely(IS_ERR(page)))
return PTR_ERR(page);

__free_page(page);
return -EIO;
}
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"Allocate page %p, rx_info %p\n", page, rx_info);

Expand Down Expand Up @@ -1065,8 +1080,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)

rx_info = &rx_ring->rx_buffer_info[req_id];

rc = ena_alloc_rx_page(rx_ring, rx_info,
GFP_ATOMIC | __GFP_COMP);
rc = ena_alloc_rx_buffer(rx_ring, rx_info);
if (unlikely(rc < 0)) {
netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
"Failed to allocate buffer for rx queue %d\n",
Expand Down

0 comments on commit d37d1c6

Please sign in to comment.