Skip to content

Commit

Permalink
net: mvneta: rely on build_skb in mvneta_rx_swbm poll routine
Browse files Browse the repository at this point in the history
Refactor mvneta_rx_swbm code introducing mvneta_swbm_rx_frame and
mvneta_swbm_add_rx_fragment routines. Rely on build_skb in oreder to
allocate skb since the previous patch introduced buffer recycling using
the page_pool API.
This patch fixes even an issue in the original driver where dma buffers
are accessed before dma sync.
mvneta driver can run on not cache coherent devices so it is
necessary to sync DMA buffers before sending them to the device
in order to avoid memory corruptions. Running perf analysis we can
see a performance cost associated with this DMA-sync (anyway it is
already there in the original driver code). In follow up patches we
will add more logic to reduce DMA-sync as much as possible.

Signed-off-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
LorenzoBianconi authored and davem330 committed Oct 21, 2019
1 parent 568a3fa commit 8dc9a08
Showing 1 changed file with 95 additions and 84 deletions.
179 changes: 95 additions & 84 deletions drivers/net/ethernet/marvell/mvneta.c
Original file line number Diff line number Diff line change
Expand Up @@ -323,6 +323,11 @@
ETH_HLEN + ETH_FCS_LEN, \
cache_line_size())

#define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
NET_SKB_PAD))
#define MVNETA_SKB_SIZE(len) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD)
#define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD)

#define IS_TSO_HEADER(txq, addr) \
((addr >= txq->tso_hdrs_phys) && \
(addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
Expand Down Expand Up @@ -646,7 +651,6 @@ static int txq_number = 8;
static int rxq_def;

static int rx_copybreak __read_mostly = 256;
static int rx_header_size __read_mostly = 128;

/* HW BM need that each port be identify by a unique ID */
static int global_port_id;
Expand Down Expand Up @@ -1829,7 +1833,7 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
dma_dir = page_pool_get_dma_dir(rxq->page_pool);
dma_sync_single_for_device(pp->dev->dev.parent, phys_addr,
PAGE_SIZE, dma_dir);
MVNETA_MAX_RX_BUF_SIZE, dma_dir);
mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);

return 0;
Expand Down Expand Up @@ -1947,126 +1951,134 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
return i;
}

static int
mvneta_swbm_rx_frame(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc,
struct mvneta_rx_queue *rxq,
struct page *page)
{
unsigned char *data = page_address(page);
int data_len = -MVNETA_MH_SIZE, len;
struct net_device *dev = pp->dev;
enum dma_data_direction dma_dir;

if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) {
len = MVNETA_MAX_RX_BUF_SIZE;
data_len += len;
} else {
len = rx_desc->data_size;
data_len += len - ETH_FCS_LEN;
}

dma_dir = page_pool_get_dma_dir(rxq->page_pool);
dma_sync_single_for_cpu(dev->dev.parent,
rx_desc->buf_phys_addr,
len, dma_dir);

rxq->skb = build_skb(data, PAGE_SIZE);
if (unlikely(!rxq->skb)) {
netdev_err(dev,
"Can't allocate skb on queue %d\n",
rxq->id);
dev->stats.rx_dropped++;
rxq->skb_alloc_err++;
return -ENOMEM;
}
page_pool_release_page(rxq->page_pool, page);

skb_reserve(rxq->skb, MVNETA_MH_SIZE + NET_SKB_PAD);
skb_put(rxq->skb, data_len);
mvneta_rx_csum(pp, rx_desc->status, rxq->skb);

rxq->left_size = rx_desc->data_size - len;
rx_desc->buf_phys_addr = 0;

return 0;
}

static void
mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc,
struct mvneta_rx_queue *rxq,
struct page *page)
{
struct net_device *dev = pp->dev;
enum dma_data_direction dma_dir;
int data_len, len;

if (rxq->left_size > MVNETA_MAX_RX_BUF_SIZE) {
len = MVNETA_MAX_RX_BUF_SIZE;
data_len = len;
} else {
len = rxq->left_size;
data_len = len - ETH_FCS_LEN;
}
dma_dir = page_pool_get_dma_dir(rxq->page_pool);
dma_sync_single_for_cpu(dev->dev.parent,
rx_desc->buf_phys_addr,
len, dma_dir);
if (data_len > 0) {
/* refill descriptor with new buffer later */
skb_add_rx_frag(rxq->skb,
skb_shinfo(rxq->skb)->nr_frags,
page, NET_SKB_PAD, data_len,
PAGE_SIZE);
}
page_pool_release_page(rxq->page_pool, page);
rx_desc->buf_phys_addr = 0;
rxq->left_size -= len;
}

/* Main rx processing when using software buffer management */
static int mvneta_rx_swbm(struct napi_struct *napi,
struct mvneta_port *pp, int budget,
struct mvneta_rx_queue *rxq)
{
int rcvd_pkts = 0, rcvd_bytes = 0, rx_proc = 0;
struct net_device *dev = pp->dev;
int rx_todo, rx_proc;
int refill = 0;
u32 rcvd_pkts = 0;
u32 rcvd_bytes = 0;
int rx_todo, refill;

/* Get number of received packets */
rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
rx_proc = 0;

/* Fairness NAPI loop */
while ((rcvd_pkts < budget) && (rx_proc < rx_todo)) {
while (rx_proc < budget && rx_proc < rx_todo) {
struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
u32 rx_status, index;
unsigned char *data;
struct page *page;
dma_addr_t phys_addr;
u32 rx_status, index;
int rx_bytes, skb_size, copy_size;
int frag_num, frag_size, frag_offset;

index = rx_desc - rxq->descs;
page = (struct page *)rxq->buf_virt_addr[index];
data = page_address(page);
/* Prefetch header */
prefetch(data);

phys_addr = rx_desc->buf_phys_addr;
rx_status = rx_desc->status;
rx_proc++;
rxq->refill_num++;

if (rx_status & MVNETA_RXD_FIRST_DESC) {
int err;

/* Check errors only for FIRST descriptor */
if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
mvneta_rx_error(pp, rx_desc);
dev->stats.rx_errors++;
/* leave the descriptor untouched */
continue;
}
rx_bytes = rx_desc->data_size -
(ETH_FCS_LEN + MVNETA_MH_SIZE);

/* Allocate small skb for each new packet */
skb_size = max(rx_copybreak, rx_header_size);
rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size);
if (unlikely(!rxq->skb)) {
netdev_err(dev,
"Can't allocate skb on queue %d\n",
rxq->id);
dev->stats.rx_dropped++;
rxq->skb_alloc_err++;
err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, page);
if (err)
continue;
}
copy_size = min(skb_size, rx_bytes);

/* Copy data from buffer to SKB, skip Marvell header */
memcpy(rxq->skb->data, data + MVNETA_MH_SIZE,
copy_size);
skb_put(rxq->skb, copy_size);
rxq->left_size = rx_bytes - copy_size;

mvneta_rx_csum(pp, rx_status, rxq->skb);
if (rxq->left_size == 0) {
int size = copy_size + MVNETA_MH_SIZE;

dma_sync_single_range_for_cpu(dev->dev.parent,
phys_addr, 0,
size,
DMA_FROM_DEVICE);

/* leave the descriptor and buffer untouched */
} else {
/* refill descriptor with new buffer later */
rx_desc->buf_phys_addr = 0;

frag_num = 0;
frag_offset = copy_size + MVNETA_MH_SIZE;
frag_size = min(rxq->left_size,
(int)(PAGE_SIZE - frag_offset));
skb_add_rx_frag(rxq->skb, frag_num, page,
frag_offset, frag_size,
PAGE_SIZE);
page_pool_release_page(rxq->page_pool, page);
rxq->left_size -= frag_size;
}
} else {
/* Middle or Last descriptor */
if (unlikely(!rxq->skb)) {
pr_debug("no skb for rx_status 0x%x\n",
rx_status);
continue;
}
if (!rxq->left_size) {
/* last descriptor has only FCS */
/* and can be discarded */
dma_sync_single_range_for_cpu(dev->dev.parent,
phys_addr, 0,
ETH_FCS_LEN,
DMA_FROM_DEVICE);
/* leave the descriptor and buffer untouched */
} else {
/* refill descriptor with new buffer later */
rx_desc->buf_phys_addr = 0;

frag_num = skb_shinfo(rxq->skb)->nr_frags;
frag_offset = 0;
frag_size = min(rxq->left_size,
(int)(PAGE_SIZE - frag_offset));
skb_add_rx_frag(rxq->skb, frag_num, page,
frag_offset, frag_size,
PAGE_SIZE);

page_pool_release_page(rxq->page_pool, page);
rxq->left_size -= frag_size;
}
mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, page);
} /* Middle or Last descriptor */

if (!(rx_status & MVNETA_RXD_LAST_DESC))
Expand All @@ -2091,7 +2103,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,

/* clean uncomplete skb pointer in queue */
rxq->skb = NULL;
rxq->left_size = 0;
}

if (rcvd_pkts)
Expand Down Expand Up @@ -2954,7 +2965,7 @@ static void mvneta_rxq_hw_init(struct mvneta_port *pp,
/* Set Offset */
mvneta_rxq_offset_set(pp, rxq, 0);
mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
PAGE_SIZE :
MVNETA_MAX_RX_BUF_SIZE :
MVNETA_RX_BUF_SIZE(pp->pkt_size));
mvneta_rxq_bm_disable(pp, rxq);
mvneta_rxq_fill(pp, rxq, rxq->size);
Expand Down Expand Up @@ -4664,7 +4675,7 @@ static int mvneta_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);

pp->id = global_port_id++;
pp->rx_offset_correction = 0; /* not relevant for SW BM */
pp->rx_offset_correction = NET_SKB_PAD;

/* Obtain access to BM resources if enabled and already initialized */
bm_node = of_parse_phandle(dn, "buffer-manager", 0);
Expand Down

0 comments on commit 8dc9a08

Please sign in to comment.