@@ -436,6 +436,150 @@ static void rtase_rx_ring_clear(struct page_pool *page_pool,
436436 }
437437}
438438
439+ static int rtase_fragmented_frame (u32 status )
440+ {
441+ return (status & (RTASE_RX_FIRST_FRAG | RTASE_RX_LAST_FRAG )) !=
442+ (RTASE_RX_FIRST_FRAG | RTASE_RX_LAST_FRAG );
443+ }
444+
445+ static void rtase_rx_csum (const struct rtase_private * tp , struct sk_buff * skb ,
446+ const union rtase_rx_desc * desc )
447+ {
448+ u32 opts2 = le32_to_cpu (desc -> desc_status .opts2 );
449+
450+ /* rx csum offload */
451+ if (((opts2 & RTASE_RX_V4F ) && !(opts2 & RTASE_RX_IPF )) ||
452+ (opts2 & RTASE_RX_V6F )) {
453+ if (((opts2 & RTASE_RX_TCPT ) && !(opts2 & RTASE_RX_TCPF )) ||
454+ ((opts2 & RTASE_RX_UDPT ) && !(opts2 & RTASE_RX_UDPF )))
455+ skb -> ip_summed = CHECKSUM_UNNECESSARY ;
456+ else
457+ skb -> ip_summed = CHECKSUM_NONE ;
458+ } else {
459+ skb -> ip_summed = CHECKSUM_NONE ;
460+ }
461+ }
462+
463+ static void rtase_rx_vlan_skb (union rtase_rx_desc * desc , struct sk_buff * skb )
464+ {
465+ u32 opts2 = le32_to_cpu (desc -> desc_status .opts2 );
466+
467+ if (!(opts2 & RTASE_RX_VLAN_TAG ))
468+ return ;
469+
470+ __vlan_hwaccel_put_tag (skb , htons (ETH_P_8021Q ),
471+ swab16 (opts2 & RTASE_VLAN_TAG_MASK ));
472+ }
473+
474+ static void rtase_rx_skb (const struct rtase_ring * ring , struct sk_buff * skb )
475+ {
476+ struct rtase_int_vector * ivec = ring -> ivec ;
477+
478+ napi_gro_receive (& ivec -> napi , skb );
479+ }
480+
481+ static int rx_handler (struct rtase_ring * ring , int budget )
482+ {
483+ union rtase_rx_desc * desc_base = ring -> desc ;
484+ u32 pkt_size , cur_rx , delta , entry , status ;
485+ struct rtase_private * tp = ring -> ivec -> tp ;
486+ struct net_device * dev = tp -> dev ;
487+ union rtase_rx_desc * desc ;
488+ struct sk_buff * skb ;
489+ int workdone = 0 ;
490+
491+ cur_rx = ring -> cur_idx ;
492+ entry = cur_rx % RTASE_NUM_DESC ;
493+ desc = & desc_base [entry ];
494+
495+ while (workdone < budget ) {
496+ status = le32_to_cpu (desc -> desc_status .opts1 );
497+
498+ if (status & RTASE_DESC_OWN )
499+ break ;
500+
501+ /* This barrier is needed to keep us from reading
502+ * any other fields out of the rx descriptor until
503+ * we know the status of RTASE_DESC_OWN
504+ */
505+ dma_rmb ();
506+
507+ if (unlikely (status & RTASE_RX_RES )) {
508+ if (net_ratelimit ())
509+ netdev_warn (dev , "Rx ERROR. status = %08x\n" ,
510+ status );
511+
512+ tp -> stats .rx_errors ++ ;
513+
514+ if (status & (RTASE_RX_RWT | RTASE_RX_RUNT ))
515+ tp -> stats .rx_length_errors ++ ;
516+
517+ if (status & RTASE_RX_CRC )
518+ tp -> stats .rx_crc_errors ++ ;
519+
520+ if (dev -> features & NETIF_F_RXALL )
521+ goto process_pkt ;
522+
523+ rtase_mark_to_asic (desc , tp -> rx_buf_sz );
524+ goto skip_process_pkt ;
525+ }
526+
527+ process_pkt :
528+ pkt_size = status & RTASE_RX_PKT_SIZE_MASK ;
529+ if (likely (!(dev -> features & NETIF_F_RXFCS )))
530+ pkt_size -= ETH_FCS_LEN ;
531+
532+ /* The driver does not support incoming fragmented frames.
533+ * They are seen as a symptom of over-mtu sized frames.
534+ */
535+ if (unlikely (rtase_fragmented_frame (status ))) {
536+ tp -> stats .rx_dropped ++ ;
537+ tp -> stats .rx_length_errors ++ ;
538+ rtase_mark_to_asic (desc , tp -> rx_buf_sz );
539+ goto skip_process_pkt ;
540+ }
541+
542+ dma_sync_single_for_cpu (& tp -> pdev -> dev ,
543+ ring -> mis .data_phy_addr [entry ],
544+ tp -> rx_buf_sz , DMA_FROM_DEVICE );
545+
546+ skb = build_skb (ring -> data_buf [entry ], PAGE_SIZE );
547+ if (!skb ) {
548+ tp -> stats .rx_dropped ++ ;
549+ rtase_mark_to_asic (desc , tp -> rx_buf_sz );
550+ goto skip_process_pkt ;
551+ }
552+ ring -> data_buf [entry ] = NULL ;
553+
554+ if (dev -> features & NETIF_F_RXCSUM )
555+ rtase_rx_csum (tp , skb , desc );
556+
557+ skb_put (skb , pkt_size );
558+ skb_mark_for_recycle (skb );
559+ skb -> protocol = eth_type_trans (skb , dev );
560+
561+ if (skb -> pkt_type == PACKET_MULTICAST )
562+ tp -> stats .multicast ++ ;
563+
564+ rtase_rx_vlan_skb (desc , skb );
565+ rtase_rx_skb (ring , skb );
566+
567+ dev_sw_netstats_rx_add (dev , pkt_size );
568+
569+ skip_process_pkt :
570+ workdone ++ ;
571+ cur_rx ++ ;
572+ entry = cur_rx % RTASE_NUM_DESC ;
573+ desc = ring -> desc + sizeof (union rtase_rx_desc ) * entry ;
574+ }
575+
576+ ring -> cur_idx = cur_rx ;
577+ delta = rtase_rx_ring_fill (ring , ring -> dirty_idx , ring -> cur_idx );
578+ ring -> dirty_idx += delta ;
579+
580+ return workdone ;
581+ }
582+
439583static void rtase_rx_desc_init (struct rtase_private * tp , u16 idx )
440584{
441585 struct rtase_ring * ring = & tp -> rx_ring [idx ];
0 commit comments