7070#define SQ_MASKED_IDX (sq , idx ) ((idx) & (sq)->wq->mask)
7171#define RQ_MASKED_IDX (rq , idx ) ((idx) & (rq)->wq->mask)
7272
73- #define TX_MAX_MSS_DEFAULT 0x3E00
74-
7573enum sq_wqe_type {
7674 SQ_NORMAL_WQE = 0 ,
7775};
@@ -494,33 +492,16 @@ static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, u16 prod_idx,
494492 HINIC_SQ_CTRL_SET (SQ_NORMAL_WQE , DATA_FORMAT ) |
495493 HINIC_SQ_CTRL_SET (ctrl_size , LEN );
496494
497- ctrl -> queue_info = HINIC_SQ_CTRL_SET (TX_MAX_MSS_DEFAULT ,
498- QUEUE_INFO_MSS );
495+ ctrl -> queue_info = HINIC_SQ_CTRL_SET (HINIC_MSS_DEFAULT ,
496+ QUEUE_INFO_MSS ) |
497+ HINIC_SQ_CTRL_SET (1 , QUEUE_INFO_UC );
499498}
500499
501500static void sq_prepare_task (struct hinic_sq_task * task )
502501{
503- task -> pkt_info0 =
504- HINIC_SQ_TASK_INFO0_SET (0 , L2HDR_LEN ) |
505- HINIC_SQ_TASK_INFO0_SET (HINIC_L4_OFF_DISABLE , L4_OFFLOAD ) |
506- HINIC_SQ_TASK_INFO0_SET (HINIC_OUTER_L3TYPE_UNKNOWN ,
507- INNER_L3TYPE ) |
508- HINIC_SQ_TASK_INFO0_SET (HINIC_VLAN_OFF_DISABLE ,
509- VLAN_OFFLOAD ) |
510- HINIC_SQ_TASK_INFO0_SET (HINIC_PKT_NOT_PARSED , PARSE_FLAG );
511-
512- task -> pkt_info1 =
513- HINIC_SQ_TASK_INFO1_SET (HINIC_MEDIA_UNKNOWN , MEDIA_TYPE ) |
514- HINIC_SQ_TASK_INFO1_SET (0 , INNER_L4_LEN ) |
515- HINIC_SQ_TASK_INFO1_SET (0 , INNER_L3_LEN );
516-
517- task -> pkt_info2 =
518- HINIC_SQ_TASK_INFO2_SET (0 , TUNNEL_L4_LEN ) |
519- HINIC_SQ_TASK_INFO2_SET (0 , OUTER_L3_LEN ) |
520- HINIC_SQ_TASK_INFO2_SET (HINIC_TUNNEL_L4TYPE_UNKNOWN ,
521- TUNNEL_L4TYPE ) |
522- HINIC_SQ_TASK_INFO2_SET (HINIC_OUTER_L3TYPE_UNKNOWN ,
523- OUTER_L3TYPE );
502+ task -> pkt_info0 = 0 ;
503+ task -> pkt_info1 = 0 ;
504+ task -> pkt_info2 = 0 ;
524505
525506 task -> ufo_v6_identify = 0 ;
526507
@@ -529,6 +510,86 @@ static void sq_prepare_task(struct hinic_sq_task *task)
529510 task -> zero_pad = 0 ;
530511}
531512
513+ void hinic_task_set_l2hdr (struct hinic_sq_task * task , u32 len )
514+ {
515+ task -> pkt_info0 |= HINIC_SQ_TASK_INFO0_SET (len , L2HDR_LEN );
516+ }
517+
518+ void hinic_task_set_outter_l3 (struct hinic_sq_task * task ,
519+ enum hinic_l3_offload_type l3_type ,
520+ u32 network_len )
521+ {
522+ task -> pkt_info2 |= HINIC_SQ_TASK_INFO2_SET (l3_type , OUTER_L3TYPE ) |
523+ HINIC_SQ_TASK_INFO2_SET (network_len , OUTER_L3LEN );
524+ }
525+
526+ void hinic_task_set_inner_l3 (struct hinic_sq_task * task ,
527+ enum hinic_l3_offload_type l3_type ,
528+ u32 network_len )
529+ {
530+ task -> pkt_info0 |= HINIC_SQ_TASK_INFO0_SET (l3_type , INNER_L3TYPE );
531+ task -> pkt_info1 |= HINIC_SQ_TASK_INFO1_SET (network_len , INNER_L3LEN );
532+ }
533+
534+ void hinic_task_set_tunnel_l4 (struct hinic_sq_task * task ,
535+ enum hinic_l4_offload_type l4_type ,
536+ u32 tunnel_len )
537+ {
538+ task -> pkt_info2 |= HINIC_SQ_TASK_INFO2_SET (l4_type , TUNNEL_L4TYPE ) |
539+ HINIC_SQ_TASK_INFO2_SET (tunnel_len , TUNNEL_L4LEN );
540+ }
541+
542+ void hinic_set_cs_inner_l4 (struct hinic_sq_task * task , u32 * queue_info ,
543+ enum hinic_l4_offload_type l4_offload ,
544+ u32 l4_len , u32 offset )
545+ {
546+ u32 tcp_udp_cs = 0 , sctp = 0 ;
547+ u32 mss = HINIC_MSS_DEFAULT ;
548+
549+ if (l4_offload == TCP_OFFLOAD_ENABLE ||
550+ l4_offload == UDP_OFFLOAD_ENABLE )
551+ tcp_udp_cs = 1 ;
552+ else if (l4_offload == SCTP_OFFLOAD_ENABLE )
553+ sctp = 1 ;
554+
555+ task -> pkt_info0 |= HINIC_SQ_TASK_INFO0_SET (l4_offload , L4_OFFLOAD );
556+ task -> pkt_info1 |= HINIC_SQ_TASK_INFO1_SET (l4_len , INNER_L4LEN );
557+
558+ * queue_info |= HINIC_SQ_CTRL_SET (offset , QUEUE_INFO_PLDOFF ) |
559+ HINIC_SQ_CTRL_SET (tcp_udp_cs , QUEUE_INFO_TCPUDP_CS ) |
560+ HINIC_SQ_CTRL_SET (sctp , QUEUE_INFO_SCTP );
561+
562+ * queue_info = HINIC_SQ_CTRL_CLEAR (* queue_info , QUEUE_INFO_MSS );
563+ * queue_info |= HINIC_SQ_CTRL_SET (mss , QUEUE_INFO_MSS );
564+ }
565+
566+ void hinic_set_tso_inner_l4 (struct hinic_sq_task * task , u32 * queue_info ,
567+ enum hinic_l4_offload_type l4_offload ,
568+ u32 l4_len , u32 offset , u32 ip_ident , u32 mss )
569+ {
570+ u32 tso = 0 , ufo = 0 ;
571+
572+ if (l4_offload == TCP_OFFLOAD_ENABLE )
573+ tso = 1 ;
574+ else if (l4_offload == UDP_OFFLOAD_ENABLE )
575+ ufo = 1 ;
576+
577+ task -> ufo_v6_identify = ip_ident ;
578+
579+ task -> pkt_info0 |= HINIC_SQ_TASK_INFO0_SET (l4_offload , L4_OFFLOAD );
580+ task -> pkt_info0 |= HINIC_SQ_TASK_INFO0_SET (tso || ufo , TSO_FLAG );
581+ task -> pkt_info1 |= HINIC_SQ_TASK_INFO1_SET (l4_len , INNER_L4LEN );
582+
583+ * queue_info |= HINIC_SQ_CTRL_SET (offset , QUEUE_INFO_PLDOFF ) |
584+ HINIC_SQ_CTRL_SET (tso , QUEUE_INFO_TSO ) |
585+ HINIC_SQ_CTRL_SET (ufo , QUEUE_INFO_UFO ) |
586+ HINIC_SQ_CTRL_SET (!!l4_offload , QUEUE_INFO_TCPUDP_CS );
587+
588+ /* set MSS value */
589+ * queue_info = HINIC_SQ_CTRL_CLEAR (* queue_info , QUEUE_INFO_MSS );
590+ * queue_info |= HINIC_SQ_CTRL_SET (mss , QUEUE_INFO_MSS );
591+ }
592+
532593/**
533594 * hinic_sq_prepare_wqe - prepare wqe before insert to the queue
534595 * @sq: send queue
@@ -612,6 +673,16 @@ struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq,
612673 return & hw_wqe -> sq_wqe ;
613674}
614675
676+ /**
677+ * hinic_sq_return_wqe - return the wqe to the sq
678+ * @sq: send queue
679+ * @wqe_size: the size of the wqe
680+ **/
681+ void hinic_sq_return_wqe (struct hinic_sq * sq , unsigned int wqe_size )
682+ {
683+ hinic_return_wqe (sq -> wq , wqe_size );
684+ }
685+
615686/**
616687 * hinic_sq_write_wqe - write the wqe to the sq
617688 * @sq: send queue
0 commit comments