@@ -202,6 +202,13 @@ static const struct mdiobb_ops bb_ops = {
202202 .get_mdio_data = ravb_get_mdio_data ,
203203};
204204
205+ static struct ravb_rx_desc *
206+ ravb_rx_get_desc (struct ravb_private * priv , unsigned int q ,
207+ unsigned int i )
208+ {
209+ return priv -> rx_ring [q ].raw + priv -> info -> rx_desc_size * i ;
210+ }
211+
205212/* Free TX skb function for AVB-IP */
206213static int ravb_tx_free (struct net_device * ndev , int q , bool free_txed_only )
207214{
@@ -246,17 +253,17 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
246253 return free_num ;
247254}
248255
249- static void ravb_rx_ring_free_gbeth (struct net_device * ndev , int q )
256+ static void ravb_rx_ring_free (struct net_device * ndev , int q )
250257{
251258 struct ravb_private * priv = netdev_priv (ndev );
252259 unsigned int ring_size ;
253260 unsigned int i ;
254261
255- if (!priv -> rx_ring [q ].desc )
262+ if (!priv -> rx_ring [q ].raw )
256263 return ;
257264
258265 for (i = 0 ; i < priv -> num_rx_ring [q ]; i ++ ) {
259- struct ravb_rx_desc * desc = & priv -> rx_ring [ q ]. desc [ i ] ;
266+ struct ravb_rx_desc * desc = ravb_rx_get_desc ( priv , q , i ) ;
260267
261268 if (!dma_mapping_error (ndev -> dev .parent ,
262269 le32_to_cpu (desc -> dptr )))
@@ -265,48 +272,21 @@ static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
265272 priv -> info -> rx_max_frame_size ,
266273 DMA_FROM_DEVICE );
267274 }
268- ring_size = sizeof ( struct ravb_rx_desc ) * (priv -> num_rx_ring [q ] + 1 );
269- dma_free_coherent (ndev -> dev .parent , ring_size , priv -> rx_ring [q ].desc ,
275+ ring_size = priv -> info -> rx_desc_size * (priv -> num_rx_ring [q ] + 1 );
276+ dma_free_coherent (ndev -> dev .parent , ring_size , priv -> rx_ring [q ].raw ,
270277 priv -> rx_desc_dma [q ]);
271- priv -> rx_ring [q ].desc = NULL ;
272- }
273-
274- static void ravb_rx_ring_free_rcar (struct net_device * ndev , int q )
275- {
276- struct ravb_private * priv = netdev_priv (ndev );
277- unsigned int ring_size ;
278- unsigned int i ;
279-
280- if (!priv -> rx_ring [q ].ex_desc )
281- return ;
282-
283- for (i = 0 ; i < priv -> num_rx_ring [q ]; i ++ ) {
284- struct ravb_ex_rx_desc * desc = & priv -> rx_ring [q ].ex_desc [i ];
285-
286- if (!dma_mapping_error (ndev -> dev .parent ,
287- le32_to_cpu (desc -> dptr )))
288- dma_unmap_single (ndev -> dev .parent ,
289- le32_to_cpu (desc -> dptr ),
290- priv -> info -> rx_max_frame_size ,
291- DMA_FROM_DEVICE );
292- }
293- ring_size = sizeof (struct ravb_ex_rx_desc ) *
294- (priv -> num_rx_ring [q ] + 1 );
295- dma_free_coherent (ndev -> dev .parent , ring_size , priv -> rx_ring [q ].ex_desc ,
296- priv -> rx_desc_dma [q ]);
297- priv -> rx_ring [q ].ex_desc = NULL ;
278+ priv -> rx_ring [q ].raw = NULL ;
298279}
299280
300281/* Free skb's and DMA buffers for Ethernet AVB */
301282static void ravb_ring_free (struct net_device * ndev , int q )
302283{
303284 struct ravb_private * priv = netdev_priv (ndev );
304- const struct ravb_hw_info * info = priv -> info ;
305285 unsigned int num_tx_desc = priv -> num_tx_desc ;
306286 unsigned int ring_size ;
307287 unsigned int i ;
308288
309- info -> rx_ring_free (ndev , q );
289+ ravb_rx_ring_free (ndev , q );
310290
311291 if (priv -> tx_ring [q ]) {
312292 ravb_tx_free (ndev , q , false);
@@ -337,50 +317,20 @@ static void ravb_ring_free(struct net_device *ndev, int q)
337317 priv -> tx_skb [q ] = NULL ;
338318}
339319
340- static void ravb_rx_ring_format_gbeth (struct net_device * ndev , int q )
320+ static void ravb_rx_ring_format (struct net_device * ndev , int q )
341321{
342322 struct ravb_private * priv = netdev_priv (ndev );
343323 struct ravb_rx_desc * rx_desc ;
344324 unsigned int rx_ring_size ;
345325 dma_addr_t dma_addr ;
346326 unsigned int i ;
347327
348- rx_ring_size = sizeof (* rx_desc ) * priv -> num_rx_ring [q ];
349- memset (priv -> rx_ring [q ].desc , 0 , rx_ring_size );
350- /* Build RX ring buffer */
351- for (i = 0 ; i < priv -> num_rx_ring [q ]; i ++ ) {
352- /* RX descriptor */
353- rx_desc = & priv -> rx_ring [q ].desc [i ];
354- rx_desc -> ds_cc = cpu_to_le16 (priv -> info -> rx_max_desc_use );
355- dma_addr = dma_map_single (ndev -> dev .parent , priv -> rx_skb [q ][i ]-> data ,
356- priv -> info -> rx_max_frame_size ,
357- DMA_FROM_DEVICE );
358- /* We just set the data size to 0 for a failed mapping which
359- * should prevent DMA from happening...
360- */
361- if (dma_mapping_error (ndev -> dev .parent , dma_addr ))
362- rx_desc -> ds_cc = cpu_to_le16 (0 );
363- rx_desc -> dptr = cpu_to_le32 (dma_addr );
364- rx_desc -> die_dt = DT_FEMPTY ;
365- }
366- rx_desc = & priv -> rx_ring [q ].desc [i ];
367- rx_desc -> dptr = cpu_to_le32 ((u32 )priv -> rx_desc_dma [q ]);
368- rx_desc -> die_dt = DT_LINKFIX ; /* type */
369- }
370-
371- static void ravb_rx_ring_format_rcar (struct net_device * ndev , int q )
372- {
373- struct ravb_private * priv = netdev_priv (ndev );
374- struct ravb_ex_rx_desc * rx_desc ;
375- unsigned int rx_ring_size = sizeof (* rx_desc ) * priv -> num_rx_ring [q ];
376- dma_addr_t dma_addr ;
377- unsigned int i ;
378-
379- memset (priv -> rx_ring [q ].ex_desc , 0 , rx_ring_size );
328+ rx_ring_size = priv -> info -> rx_desc_size * priv -> num_rx_ring [q ];
329+ memset (priv -> rx_ring [q ].raw , 0 , rx_ring_size );
380330 /* Build RX ring buffer */
381331 for (i = 0 ; i < priv -> num_rx_ring [q ]; i ++ ) {
382332 /* RX descriptor */
383- rx_desc = & priv -> rx_ring [ q ]. ex_desc [ i ] ;
333+ rx_desc = ravb_rx_get_desc ( priv , q , i ) ;
384334 rx_desc -> ds_cc = cpu_to_le16 (priv -> info -> rx_max_desc_use );
385335 dma_addr = dma_map_single (ndev -> dev .parent , priv -> rx_skb [q ][i ]-> data ,
386336 priv -> info -> rx_max_frame_size ,
@@ -393,7 +343,7 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
393343 rx_desc -> dptr = cpu_to_le32 (dma_addr );
394344 rx_desc -> die_dt = DT_FEMPTY ;
395345 }
396- rx_desc = & priv -> rx_ring [ q ]. ex_desc [ i ] ;
346+ rx_desc = ravb_rx_get_desc ( priv , q , i ) ;
397347 rx_desc -> dptr = cpu_to_le32 ((u32 )priv -> rx_desc_dma [q ]);
398348 rx_desc -> die_dt = DT_LINKFIX ; /* type */
399349}
@@ -402,7 +352,6 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
402352static void ravb_ring_format (struct net_device * ndev , int q )
403353{
404354 struct ravb_private * priv = netdev_priv (ndev );
405- const struct ravb_hw_info * info = priv -> info ;
406355 unsigned int num_tx_desc = priv -> num_tx_desc ;
407356 struct ravb_tx_desc * tx_desc ;
408357 struct ravb_desc * desc ;
@@ -415,7 +364,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
415364 priv -> dirty_rx [q ] = 0 ;
416365 priv -> dirty_tx [q ] = 0 ;
417366
418- info -> rx_ring_format (ndev , q );
367+ ravb_rx_ring_format (ndev , q );
419368
420369 memset (priv -> tx_ring [q ], 0 , tx_ring_size );
421370 /* Build TX ring buffer */
@@ -441,31 +390,18 @@ static void ravb_ring_format(struct net_device *ndev, int q)
441390 desc -> dptr = cpu_to_le32 ((u32 )priv -> tx_desc_dma [q ]);
442391}
443392
444- static void * ravb_alloc_rx_desc_gbeth (struct net_device * ndev , int q )
393+ static void * ravb_alloc_rx_desc (struct net_device * ndev , int q )
445394{
446395 struct ravb_private * priv = netdev_priv (ndev );
447396 unsigned int ring_size ;
448397
449- ring_size = sizeof (struct ravb_rx_desc ) * (priv -> num_rx_ring [q ] + 1 );
450-
451- priv -> rx_ring [q ].desc = dma_alloc_coherent (ndev -> dev .parent , ring_size ,
452- & priv -> rx_desc_dma [q ],
453- GFP_KERNEL );
454- return priv -> rx_ring [q ].desc ;
455- }
456-
457- static void * ravb_alloc_rx_desc_rcar (struct net_device * ndev , int q )
458- {
459- struct ravb_private * priv = netdev_priv (ndev );
460- unsigned int ring_size ;
398+ ring_size = priv -> info -> rx_desc_size * (priv -> num_rx_ring [q ] + 1 );
461399
462- ring_size = sizeof (struct ravb_ex_rx_desc ) * (priv -> num_rx_ring [q ] + 1 );
400+ priv -> rx_ring [q ].raw = dma_alloc_coherent (ndev -> dev .parent , ring_size ,
401+ & priv -> rx_desc_dma [q ],
402+ GFP_KERNEL );
463403
464- priv -> rx_ring [q ].ex_desc = dma_alloc_coherent (ndev -> dev .parent ,
465- ring_size ,
466- & priv -> rx_desc_dma [q ],
467- GFP_KERNEL );
468- return priv -> rx_ring [q ].ex_desc ;
404+ return priv -> rx_ring [q ].raw ;
469405}
470406
471407/* Init skb and descriptor buffer for Ethernet AVB */
@@ -502,7 +438,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
502438 }
503439
504440 /* Allocate all RX descriptors. */
505- if (!info -> alloc_rx_desc (ndev , q ))
441+ if (!ravb_alloc_rx_desc (ndev , q ))
506442 goto error ;
507443
508444 priv -> dirty_rx [q ] = 0 ;
@@ -2679,9 +2615,6 @@ static int ravb_mdio_release(struct ravb_private *priv)
26792615}
26802616
26812617static const struct ravb_hw_info ravb_gen3_hw_info = {
2682- .rx_ring_free = ravb_rx_ring_free_rcar ,
2683- .rx_ring_format = ravb_rx_ring_format_rcar ,
2684- .alloc_rx_desc = ravb_alloc_rx_desc_rcar ,
26852618 .receive = ravb_rx_rcar ,
26862619 .set_rate = ravb_set_rate_rcar ,
26872620 .set_feature = ravb_set_features_rcar ,
@@ -2695,6 +2628,7 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
26952628 .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3 ,
26962629 .rx_max_frame_size = SZ_2K ,
26972630 .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof (__sum16 ),
2631+ .rx_desc_size = sizeof (struct ravb_ex_rx_desc ),
26982632 .internal_delay = 1 ,
26992633 .tx_counters = 1 ,
27002634 .multi_irqs = 1 ,
@@ -2705,9 +2639,6 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
27052639};
27062640
27072641static const struct ravb_hw_info ravb_gen2_hw_info = {
2708- .rx_ring_free = ravb_rx_ring_free_rcar ,
2709- .rx_ring_format = ravb_rx_ring_format_rcar ,
2710- .alloc_rx_desc = ravb_alloc_rx_desc_rcar ,
27112642 .receive = ravb_rx_rcar ,
27122643 .set_rate = ravb_set_rate_rcar ,
27132644 .set_feature = ravb_set_features_rcar ,
@@ -2721,16 +2652,14 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
27212652 .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3 ,
27222653 .rx_max_frame_size = SZ_2K ,
27232654 .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof (__sum16 ),
2655+ .rx_desc_size = sizeof (struct ravb_ex_rx_desc ),
27242656 .aligned_tx = 1 ,
27252657 .gptp = 1 ,
27262658 .nc_queues = 1 ,
27272659 .magic_pkt = 1 ,
27282660};
27292661
27302662static const struct ravb_hw_info ravb_rzv2m_hw_info = {
2731- .rx_ring_free = ravb_rx_ring_free_rcar ,
2732- .rx_ring_format = ravb_rx_ring_format_rcar ,
2733- .alloc_rx_desc = ravb_alloc_rx_desc_rcar ,
27342663 .receive = ravb_rx_rcar ,
27352664 .set_rate = ravb_set_rate_rcar ,
27362665 .set_feature = ravb_set_features_rcar ,
@@ -2744,6 +2673,7 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
27442673 .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3 ,
27452674 .rx_max_frame_size = SZ_2K ,
27462675 .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof (__sum16 ),
2676+ .rx_desc_size = sizeof (struct ravb_ex_rx_desc ),
27472677 .multi_irqs = 1 ,
27482678 .err_mgmt_irqs = 1 ,
27492679 .gptp = 1 ,
@@ -2753,9 +2683,6 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
27532683};
27542684
27552685static const struct ravb_hw_info gbeth_hw_info = {
2756- .rx_ring_free = ravb_rx_ring_free_gbeth ,
2757- .rx_ring_format = ravb_rx_ring_format_gbeth ,
2758- .alloc_rx_desc = ravb_alloc_rx_desc_gbeth ,
27592686 .receive = ravb_rx_gbeth ,
27602687 .set_rate = ravb_set_rate_gbeth ,
27612688 .set_feature = ravb_set_features_gbeth ,
@@ -2769,6 +2696,7 @@ static const struct ravb_hw_info gbeth_hw_info = {
27692696 .tccr_mask = TCCR_TSRQ0 ,
27702697 .rx_max_frame_size = SZ_8K ,
27712698 .rx_max_desc_use = 4080 ,
2699+ .rx_desc_size = sizeof (struct ravb_rx_desc ),
27722700 .aligned_tx = 1 ,
27732701 .tx_counters = 1 ,
27742702 .carrier_counters = 1 ,
0 commit comments