Skip to content

Commit

Permalink
ravb: Group descriptor types used in Rx ring
Browse files Browse the repository at this point in the history
The Rx ring can either be made up of normal or extended descriptors, not
a mix of the two at the same time. Make this explicit by grouping the
two variables in a rx_ring union.

The extension of the storage for more than one queue of normal
descriptors from a single to NUM_RX_QUEUE queues have no practical
effect. But aids in making the code readable as the code that uses it
already piggyback on other members of struct ravb_private that are
arrays of max length NUM_RX_QUEUE, e.g. rx_desc_dma. This will also make
further refactoring easier.

While at it, rename the normal descriptor Rx ring to make it clear it's
not strictly related to the GbEthernet E-MAC IP found in RZ/G2L, normal
descriptors could be used on R-Car SoCs too.

Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
Reviewed-by: Paul Barker <paul.barker.ct@bp.renesas.com>
Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Niklas Söderlund authored and davem330 committed Mar 6, 2024
1 parent dbb0b6c commit 4123c3f
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 30 deletions.
6 changes: 4 additions & 2 deletions drivers/net/ethernet/renesas/ravb.h
Original file line number Diff line number Diff line change
Expand Up @@ -1092,8 +1092,10 @@ struct ravb_private {
struct ravb_desc *desc_bat;
dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
struct ravb_rx_desc *gbeth_rx_ring;
struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
union {
struct ravb_rx_desc *desc;
struct ravb_ex_rx_desc *ex_desc;
} rx_ring[NUM_RX_QUEUE];
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
void *tx_align[NUM_TX_QUEUE];
struct sk_buff *rx_1st_skb;
Expand Down
57 changes: 29 additions & 28 deletions drivers/net/ethernet/renesas/ravb_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -241,11 +241,11 @@ static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
unsigned int ring_size;
unsigned int i;

if (!priv->gbeth_rx_ring)
if (!priv->rx_ring[q].desc)
return;

for (i = 0; i < priv->num_rx_ring[q]; i++) {
struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];
struct ravb_rx_desc *desc = &priv->rx_ring[q].desc[i];

if (!dma_mapping_error(ndev->dev.parent,
le32_to_cpu(desc->dptr)))
Expand All @@ -255,9 +255,9 @@ static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
DMA_FROM_DEVICE);
}
ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].desc,
priv->rx_desc_dma[q]);
priv->gbeth_rx_ring = NULL;
priv->rx_ring[q].desc = NULL;
}

static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
Expand All @@ -266,11 +266,11 @@ static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
unsigned int ring_size;
unsigned int i;

if (!priv->rx_ring[q])
if (!priv->rx_ring[q].ex_desc)
return;

for (i = 0; i < priv->num_rx_ring[q]; i++) {
struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
struct ravb_ex_rx_desc *desc = &priv->rx_ring[q].ex_desc[i];

if (!dma_mapping_error(ndev->dev.parent,
le32_to_cpu(desc->dptr)))
Expand All @@ -281,9 +281,9 @@ static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
}
ring_size = sizeof(struct ravb_ex_rx_desc) *
(priv->num_rx_ring[q] + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].ex_desc,
priv->rx_desc_dma[q]);
priv->rx_ring[q] = NULL;
priv->rx_ring[q].ex_desc = NULL;
}

/* Free skb's and DMA buffers for Ethernet AVB */
Expand Down Expand Up @@ -335,11 +335,11 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
unsigned int i;

rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
memset(priv->gbeth_rx_ring, 0, rx_ring_size);
memset(priv->rx_ring[q].desc, 0, rx_ring_size);
/* Build RX ring buffer */
for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */
rx_desc = &priv->gbeth_rx_ring[i];
rx_desc = &priv->rx_ring[q].desc[i];
rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
GBETH_RX_BUFF_MAX,
Expand All @@ -352,7 +352,7 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
rx_desc->dptr = cpu_to_le32(dma_addr);
rx_desc->die_dt = DT_FEMPTY;
}
rx_desc = &priv->gbeth_rx_ring[i];
rx_desc = &priv->rx_ring[q].desc[i];
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
rx_desc->die_dt = DT_LINKFIX; /* type */
}
Expand All @@ -365,11 +365,11 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
dma_addr_t dma_addr;
unsigned int i;

memset(priv->rx_ring[q], 0, rx_ring_size);
memset(priv->rx_ring[q].ex_desc, 0, rx_ring_size);
/* Build RX ring buffer */
for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */
rx_desc = &priv->rx_ring[q][i];
rx_desc = &priv->rx_ring[q].ex_desc[i];
rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
RX_BUF_SZ,
Expand All @@ -382,7 +382,7 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
rx_desc->dptr = cpu_to_le32(dma_addr);
rx_desc->die_dt = DT_FEMPTY;
}
rx_desc = &priv->rx_ring[q][i];
rx_desc = &priv->rx_ring[q].ex_desc[i];
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
rx_desc->die_dt = DT_LINKFIX; /* type */
}
Expand Down Expand Up @@ -437,10 +437,10 @@ static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)

ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);

priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
&priv->rx_desc_dma[q],
GFP_KERNEL);
return priv->gbeth_rx_ring;
priv->rx_ring[q].desc = dma_alloc_coherent(ndev->dev.parent, ring_size,
&priv->rx_desc_dma[q],
GFP_KERNEL);
return priv->rx_ring[q].desc;
}

static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
Expand All @@ -450,10 +450,11 @@ static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)

ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);

priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
&priv->rx_desc_dma[q],
GFP_KERNEL);
return priv->rx_ring[q];
priv->rx_ring[q].ex_desc = dma_alloc_coherent(ndev->dev.parent,
ring_size,
&priv->rx_desc_dma[q],
GFP_KERNEL);
return priv->rx_ring[q].ex_desc;
}

/* Init skb and descriptor buffer for Ethernet AVB */
Expand Down Expand Up @@ -830,7 +831,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
stats = &priv->stats[q];

desc = &priv->gbeth_rx_ring[entry];
desc = &priv->rx_ring[q].desc[entry];
for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
Expand Down Expand Up @@ -901,13 +902,13 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
}

entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
desc = &priv->gbeth_rx_ring[entry];
desc = &priv->rx_ring[q].desc[entry];
}

/* Refill the RX ring buffers. */
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
desc = &priv->gbeth_rx_ring[entry];
desc = &priv->rx_ring[q].desc[entry];
desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);

if (!priv->rx_skb[q][entry]) {
Expand Down Expand Up @@ -957,7 +958,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)

boguscnt = min(boguscnt, *quota);
limit = boguscnt;
desc = &priv->rx_ring[q][entry];
desc = &priv->rx_ring[q].ex_desc[entry];
while (desc->die_dt != DT_FEMPTY) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
Expand Down Expand Up @@ -1017,13 +1018,13 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
}

entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
desc = &priv->rx_ring[q][entry];
desc = &priv->rx_ring[q].ex_desc[entry];
}

/* Refill the RX ring buffers. */
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q][entry];
desc = &priv->rx_ring[q].ex_desc[entry];
desc->ds_cc = cpu_to_le16(RX_BUF_SZ);

if (!priv->rx_skb[q][entry]) {
Expand Down

0 comments on commit 4123c3f

Please sign in to comment.