Skip to content

Commit

Permalink
ravb: Group descriptor types used in Rx ring
Browse files Browse the repository at this point in the history
[ Upstream commit 4123c3f ]

The Rx ring can either be made up of normal or extended descriptors, not
a mix of the two at the same time. Make this explicit by grouping the
two variables in a rx_ring union.

The extension of the storage for more than one queue of normal
descriptors from a single to NUM_RX_QUEUE queues have no practical
effect. But aids in making the code readable as the code that uses it
already piggyback on other members of struct ravb_private that are
arrays of max length NUM_RX_QUEUE, e.g. rx_desc_dma. This will also make
further refactoring easier.

While at it, rename the normal descriptor Rx ring to make it clear it's
not strictly related to the GbEthernet E-MAC IP found in RZ/G2L, normal
descriptors could be used on R-Car SoCs too.

Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
Reviewed-by: Paul Barker <paul.barker.ct@bp.renesas.com>
Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru>
Signed-off-by: David S. Miller <davem@davemloft.net>
Stable-dep-of: def52db ("net: ravb: Count packets instead of descriptors in R-Car RX path")
Signed-off-by: Sasha Levin <sashal@kernel.org>
  • Loading branch information
Niklas Söderlund authored and gregkh committed Apr 27, 2024
1 parent d44fc95 commit 888e365
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 30 deletions.
6 changes: 4 additions & 2 deletions drivers/net/ethernet/renesas/ravb.h
Expand Up @@ -1060,8 +1060,10 @@ struct ravb_private {
struct ravb_desc *desc_bat;
dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
struct ravb_rx_desc *gbeth_rx_ring;
struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
union {
struct ravb_rx_desc *desc;
struct ravb_ex_rx_desc *ex_desc;
} rx_ring[NUM_RX_QUEUE];
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
void *tx_align[NUM_TX_QUEUE];
struct sk_buff *rx_1st_skb;
Expand Down
57 changes: 29 additions & 28 deletions drivers/net/ethernet/renesas/ravb_main.c
Expand Up @@ -250,11 +250,11 @@ static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
unsigned int ring_size;
unsigned int i;

if (!priv->gbeth_rx_ring)
if (!priv->rx_ring[q].desc)
return;

for (i = 0; i < priv->num_rx_ring[q]; i++) {
struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];
struct ravb_rx_desc *desc = &priv->rx_ring[q].desc[i];

if (!dma_mapping_error(ndev->dev.parent,
le32_to_cpu(desc->dptr)))
Expand All @@ -264,9 +264,9 @@ static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
DMA_FROM_DEVICE);
}
ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].desc,
priv->rx_desc_dma[q]);
priv->gbeth_rx_ring = NULL;
priv->rx_ring[q].desc = NULL;
}

static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
Expand All @@ -275,11 +275,11 @@ static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
unsigned int ring_size;
unsigned int i;

if (!priv->rx_ring[q])
if (!priv->rx_ring[q].ex_desc)
return;

for (i = 0; i < priv->num_rx_ring[q]; i++) {
struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
struct ravb_ex_rx_desc *desc = &priv->rx_ring[q].ex_desc[i];

if (!dma_mapping_error(ndev->dev.parent,
le32_to_cpu(desc->dptr)))
Expand All @@ -290,9 +290,9 @@ static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
}
ring_size = sizeof(struct ravb_ex_rx_desc) *
(priv->num_rx_ring[q] + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].ex_desc,
priv->rx_desc_dma[q]);
priv->rx_ring[q] = NULL;
priv->rx_ring[q].ex_desc = NULL;
}

/* Free skb's and DMA buffers for Ethernet AVB */
Expand Down Expand Up @@ -344,11 +344,11 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
unsigned int i;

rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
memset(priv->gbeth_rx_ring, 0, rx_ring_size);
memset(priv->rx_ring[q].desc, 0, rx_ring_size);
/* Build RX ring buffer */
for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */
rx_desc = &priv->gbeth_rx_ring[i];
rx_desc = &priv->rx_ring[q].desc[i];
rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
GBETH_RX_BUFF_MAX,
Expand All @@ -361,7 +361,7 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
rx_desc->dptr = cpu_to_le32(dma_addr);
rx_desc->die_dt = DT_FEMPTY;
}
rx_desc = &priv->gbeth_rx_ring[i];
rx_desc = &priv->rx_ring[q].desc[i];
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
rx_desc->die_dt = DT_LINKFIX; /* type */
}
Expand All @@ -374,11 +374,11 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
dma_addr_t dma_addr;
unsigned int i;

memset(priv->rx_ring[q], 0, rx_ring_size);
memset(priv->rx_ring[q].ex_desc, 0, rx_ring_size);
/* Build RX ring buffer */
for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */
rx_desc = &priv->rx_ring[q][i];
rx_desc = &priv->rx_ring[q].ex_desc[i];
rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
RX_BUF_SZ,
Expand All @@ -391,7 +391,7 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
rx_desc->dptr = cpu_to_le32(dma_addr);
rx_desc->die_dt = DT_FEMPTY;
}
rx_desc = &priv->rx_ring[q][i];
rx_desc = &priv->rx_ring[q].ex_desc[i];
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
rx_desc->die_dt = DT_LINKFIX; /* type */
}
Expand Down Expand Up @@ -446,10 +446,10 @@ static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)

ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);

priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
&priv->rx_desc_dma[q],
GFP_KERNEL);
return priv->gbeth_rx_ring;
priv->rx_ring[q].desc = dma_alloc_coherent(ndev->dev.parent, ring_size,
&priv->rx_desc_dma[q],
GFP_KERNEL);
return priv->rx_ring[q].desc;
}

static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
Expand All @@ -459,10 +459,11 @@ static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)

ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);

priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
&priv->rx_desc_dma[q],
GFP_KERNEL);
return priv->rx_ring[q];
priv->rx_ring[q].ex_desc = dma_alloc_coherent(ndev->dev.parent,
ring_size,
&priv->rx_desc_dma[q],
GFP_KERNEL);
return priv->rx_ring[q].ex_desc;
}

/* Init skb and descriptor buffer for Ethernet AVB */
Expand Down Expand Up @@ -784,7 +785,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
stats = &priv->stats[q];

desc = &priv->gbeth_rx_ring[entry];
desc = &priv->rx_ring[q].desc[entry];
for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
Expand Down Expand Up @@ -851,13 +852,13 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
}

entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
desc = &priv->gbeth_rx_ring[entry];
desc = &priv->rx_ring[q].desc[entry];
}

/* Refill the RX ring buffers. */
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
desc = &priv->gbeth_rx_ring[entry];
desc = &priv->rx_ring[q].desc[entry];
desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);

if (!priv->rx_skb[q][entry]) {
Expand Down Expand Up @@ -907,7 +908,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)

boguscnt = min(boguscnt, *quota);
limit = boguscnt;
desc = &priv->rx_ring[q][entry];
desc = &priv->rx_ring[q].ex_desc[entry];
while (desc->die_dt != DT_FEMPTY) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
Expand Down Expand Up @@ -967,13 +968,13 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
}

entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
desc = &priv->rx_ring[q][entry];
desc = &priv->rx_ring[q].ex_desc[entry];
}

/* Refill the RX ring buffers. */
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q][entry];
desc = &priv->rx_ring[q].ex_desc[entry];
desc->ds_cc = cpu_to_le16(RX_BUF_SZ);

if (!priv->rx_skb[q][entry]) {
Expand Down

0 comments on commit 888e365

Please sign in to comment.