@@ -240,7 +240,7 @@ static int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq)
240240
241241static bool rswitch_is_queue_rxed (struct rswitch_gwca_queue * gq )
242242{
243- struct rswitch_ext_ts_desc * desc = & gq -> ts_ring [gq -> dirty ];
243+ struct rswitch_ext_ts_desc * desc = & gq -> rx_ring [gq -> dirty ];
244244
245245 if ((desc -> desc .die_dt & DT_MASK ) != DT_FEMPTY )
246246 return true;
@@ -283,13 +283,13 @@ static void rswitch_gwca_queue_free(struct net_device *ndev,
283283 if (gq -> gptp ) {
284284 dma_free_coherent (ndev -> dev .parent ,
285285 sizeof (struct rswitch_ext_ts_desc ) *
286- (gq -> ring_size + 1 ), gq -> ts_ring , gq -> ring_dma );
287- gq -> ts_ring = NULL ;
286+ (gq -> ring_size + 1 ), gq -> rx_ring , gq -> ring_dma );
287+ gq -> rx_ring = NULL ;
288288 } else {
289289 dma_free_coherent (ndev -> dev .parent ,
290290 sizeof (struct rswitch_ext_desc ) *
291- (gq -> ring_size + 1 ), gq -> ring , gq -> ring_dma );
292- gq -> ring = NULL ;
291+ (gq -> ring_size + 1 ), gq -> tx_ring , gq -> ring_dma );
292+ gq -> tx_ring = NULL ;
293293 }
294294
295295 if (!gq -> dir_tx ) {
@@ -321,14 +321,14 @@ static int rswitch_gwca_queue_alloc(struct net_device *ndev,
321321 rswitch_gwca_queue_alloc_skb (gq , 0 , gq -> ring_size );
322322
323323 if (gptp )
324- gq -> ts_ring = dma_alloc_coherent (ndev -> dev .parent ,
324+ gq -> rx_ring = dma_alloc_coherent (ndev -> dev .parent ,
325325 sizeof (struct rswitch_ext_ts_desc ) *
326326 (gq -> ring_size + 1 ), & gq -> ring_dma , GFP_KERNEL );
327327 else
328- gq -> ring = dma_alloc_coherent (ndev -> dev .parent ,
329- sizeof (struct rswitch_ext_desc ) *
330- (gq -> ring_size + 1 ), & gq -> ring_dma , GFP_KERNEL );
331- if (!gq -> ts_ring && !gq -> ring )
328+ gq -> tx_ring = dma_alloc_coherent (ndev -> dev .parent ,
329+ sizeof (struct rswitch_ext_desc ) *
330+ (gq -> ring_size + 1 ), & gq -> ring_dma , GFP_KERNEL );
331+ if (!gq -> rx_ring && !gq -> tx_ring )
332332 goto out ;
333333
334334 i = gq -> index / 32 ;
@@ -361,14 +361,14 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
361361 struct rswitch_private * priv ,
362362 struct rswitch_gwca_queue * gq )
363363{
364- int tx_ring_size = sizeof (struct rswitch_ext_desc ) * gq -> ring_size ;
364+ int ring_size = sizeof (struct rswitch_ext_desc ) * gq -> ring_size ;
365365 struct rswitch_ext_desc * desc ;
366366 struct rswitch_desc * linkfix ;
367367 dma_addr_t dma_addr ;
368368 int i ;
369369
370- memset (gq -> ring , 0 , tx_ring_size );
371- for (i = 0 , desc = gq -> ring ; i < gq -> ring_size ; i ++ , desc ++ ) {
370+ memset (gq -> tx_ring , 0 , ring_size );
371+ for (i = 0 , desc = gq -> tx_ring ; i < gq -> ring_size ; i ++ , desc ++ ) {
372372 if (!gq -> dir_tx ) {
373373 dma_addr = dma_map_single (ndev -> dev .parent ,
374374 gq -> skbs [i ]-> data , PKT_BUF_SZ ,
@@ -397,7 +397,7 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
397397
398398err :
399399 if (!gq -> dir_tx ) {
400- for (i -- , desc = gq -> ring ; i >= 0 ; i -- , desc ++ ) {
400+ for (i -- , desc = gq -> tx_ring ; i >= 0 ; i -- , desc ++ ) {
401401 dma_addr = rswitch_desc_get_dptr (& desc -> desc );
402402 dma_unmap_single (ndev -> dev .parent , dma_addr , PKT_BUF_SZ ,
403403 DMA_FROM_DEVICE );
@@ -407,9 +407,9 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
407407 return - ENOMEM ;
408408}
409409
410- static int rswitch_gwca_queue_ts_fill (struct net_device * ndev ,
411- struct rswitch_gwca_queue * gq ,
412- int start_index , int num )
410+ static int rswitch_gwca_queue_ext_ts_fill (struct net_device * ndev ,
411+ struct rswitch_gwca_queue * gq ,
412+ int start_index , int num )
413413{
414414 struct rswitch_device * rdev = netdev_priv (ndev );
415415 struct rswitch_ext_ts_desc * desc ;
@@ -418,7 +418,7 @@ static int rswitch_gwca_queue_ts_fill(struct net_device *ndev,
418418
419419 for (i = 0 ; i < num ; i ++ ) {
420420 index = (i + start_index ) % gq -> ring_size ;
421- desc = & gq -> ts_ring [index ];
421+ desc = & gq -> rx_ring [index ];
422422 if (!gq -> dir_tx ) {
423423 dma_addr = dma_map_single (ndev -> dev .parent ,
424424 gq -> skbs [index ]-> data , PKT_BUF_SZ ,
@@ -442,7 +442,7 @@ static int rswitch_gwca_queue_ts_fill(struct net_device *ndev,
442442 if (!gq -> dir_tx ) {
443443 for (i -- ; i >= 0 ; i -- ) {
444444 index = (i + start_index ) % gq -> ring_size ;
445- desc = & gq -> ts_ring [index ];
445+ desc = & gq -> rx_ring [index ];
446446 dma_addr = rswitch_desc_get_dptr (& desc -> desc );
447447 dma_unmap_single (ndev -> dev .parent , dma_addr , PKT_BUF_SZ ,
448448 DMA_FROM_DEVICE );
@@ -452,21 +452,21 @@ static int rswitch_gwca_queue_ts_fill(struct net_device *ndev,
452452 return - ENOMEM ;
453453}
454454
455- static int rswitch_gwca_queue_ts_format (struct net_device * ndev ,
456- struct rswitch_private * priv ,
457- struct rswitch_gwca_queue * gq )
455+ static int rswitch_gwca_queue_ext_ts_format (struct net_device * ndev ,
456+ struct rswitch_private * priv ,
457+ struct rswitch_gwca_queue * gq )
458458{
459- int tx_ts_ring_size = sizeof (struct rswitch_ext_ts_desc ) * gq -> ring_size ;
459+ int ring_size = sizeof (struct rswitch_ext_ts_desc ) * gq -> ring_size ;
460460 struct rswitch_ext_ts_desc * desc ;
461461 struct rswitch_desc * linkfix ;
462462 int err ;
463463
464- memset (gq -> ts_ring , 0 , tx_ts_ring_size );
465- err = rswitch_gwca_queue_ts_fill (ndev , gq , 0 , gq -> ring_size );
464+ memset (gq -> rx_ring , 0 , ring_size );
465+ err = rswitch_gwca_queue_ext_ts_fill (ndev , gq , 0 , gq -> ring_size );
466466 if (err < 0 )
467467 return err ;
468468
469- desc = & gq -> ts_ring [gq -> ring_size ]; /* Last */
469+ desc = & gq -> rx_ring [gq -> ring_size ]; /* Last */
470470 rswitch_desc_set_dptr (& desc -> desc , gq -> ring_dma );
471471 desc -> desc .die_dt = DT_LINKFIX ;
472472
@@ -594,7 +594,7 @@ static int rswitch_rxdmac_init(struct rswitch_private *priv, int index)
594594 struct rswitch_device * rdev = priv -> rdev [index ];
595595 struct net_device * ndev = rdev -> ndev ;
596596
597- return rswitch_gwca_queue_ts_format (ndev , priv , rdev -> rx_queue );
597+ return rswitch_gwca_queue_ext_ts_format (ndev , priv , rdev -> rx_queue );
598598}
599599
600600static int rswitch_gwca_hw_init (struct rswitch_private * priv )
@@ -675,7 +675,7 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
675675 boguscnt = min_t (int , gq -> ring_size , * quota );
676676 limit = boguscnt ;
677677
678- desc = & gq -> ts_ring [gq -> cur ];
678+ desc = & gq -> rx_ring [gq -> cur ];
679679 while ((desc -> desc .die_dt & DT_MASK ) != DT_FEMPTY ) {
680680 if (-- boguscnt < 0 )
681681 break ;
@@ -703,14 +703,14 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
703703 rdev -> ndev -> stats .rx_bytes += pkt_len ;
704704
705705 gq -> cur = rswitch_next_queue_index (gq , true, 1 );
706- desc = & gq -> ts_ring [gq -> cur ];
706+ desc = & gq -> rx_ring [gq -> cur ];
707707 }
708708
709709 num = rswitch_get_num_cur_queues (gq );
710710 ret = rswitch_gwca_queue_alloc_skb (gq , gq -> dirty , num );
711711 if (ret < 0 )
712712 goto err ;
713- ret = rswitch_gwca_queue_ts_fill (ndev , gq , gq -> dirty , num );
713+ ret = rswitch_gwca_queue_ext_ts_fill (ndev , gq , gq -> dirty , num );
714714 if (ret < 0 )
715715 goto err ;
716716 gq -> dirty = rswitch_next_queue_index (gq , false, num );
@@ -737,7 +737,7 @@ static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only)
737737
738738 for (; rswitch_get_num_cur_queues (gq ) > 0 ;
739739 gq -> dirty = rswitch_next_queue_index (gq , false, 1 )) {
740- desc = & gq -> ring [gq -> dirty ];
740+ desc = & gq -> tx_ring [gq -> dirty ];
741741 if (free_txed_only && (desc -> desc .die_dt & DT_MASK ) != DT_FEMPTY )
742742 break ;
743743
@@ -1390,7 +1390,7 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
13901390 }
13911391
13921392 gq -> skbs [gq -> cur ] = skb ;
1393- desc = & gq -> ring [gq -> cur ];
1393+ desc = & gq -> tx_ring [gq -> cur ];
13941394 rswitch_desc_set_dptr (& desc -> desc , dma_addr );
13951395 desc -> desc .info_ds = cpu_to_le16 (skb -> len );
13961396
0 commit comments