@@ -71,6 +71,11 @@ MODULE_DESCRIPTION("QLogic IB driver");
7171 */
7272#define QIB_PIO_MAXIBHDR 128
7373
74+ /*
75+ * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt.
76+ */
77+ #define QIB_MAX_PKT_RECV 64
78+
7479struct qlogic_ib_stats qib_stats ;
7580
7681const char * qib_get_unit_name (int unit )
@@ -284,14 +289,147 @@ static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
284289 * Returns 1 if error was a CRC, else 0.
285290 * Needed for some chip's synthesized error counters.
286291 */
287- static u32 qib_rcv_hdrerr (struct qib_pportdata * ppd , u32 ctxt ,
288- u32 eflags , u32 l , u32 etail , __le32 * rhf_addr ,
289- struct qib_message_header * hdr )
292+ static u32 qib_rcv_hdrerr (struct qib_ctxtdata * rcd , struct qib_pportdata * ppd ,
293+ u32 ctxt , u32 eflags , u32 l , u32 etail ,
294+ __le32 * rhf_addr , struct qib_message_header * rhdr )
290295{
291296 u32 ret = 0 ;
292297
293298 if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR ))
294299 ret = 1 ;
300+ else if (eflags == QLOGIC_IB_RHF_H_TIDERR ) {
301+ /* For TIDERR and RC QPs premptively schedule a NAK */
302+ struct qib_ib_header * hdr = (struct qib_ib_header * ) rhdr ;
303+ struct qib_other_headers * ohdr = NULL ;
304+ struct qib_ibport * ibp = & ppd -> ibport_data ;
305+ struct qib_qp * qp = NULL ;
306+ u32 tlen = qib_hdrget_length_in_bytes (rhf_addr );
307+ u16 lid = be16_to_cpu (hdr -> lrh [1 ]);
308+ int lnh = be16_to_cpu (hdr -> lrh [0 ]) & 3 ;
309+ u32 qp_num ;
310+ u32 opcode ;
311+ u32 psn ;
312+ int diff ;
313+ unsigned long flags ;
314+
315+ /* Sanity check packet */
316+ if (tlen < 24 )
317+ goto drop ;
318+
319+ if (lid < QIB_MULTICAST_LID_BASE ) {
320+ lid &= ~((1 << ppd -> lmc ) - 1 );
321+ if (unlikely (lid != ppd -> lid ))
322+ goto drop ;
323+ }
324+
325+ /* Check for GRH */
326+ if (lnh == QIB_LRH_BTH )
327+ ohdr = & hdr -> u .oth ;
328+ else if (lnh == QIB_LRH_GRH ) {
329+ u32 vtf ;
330+
331+ ohdr = & hdr -> u .l .oth ;
332+ if (hdr -> u .l .grh .next_hdr != IB_GRH_NEXT_HDR )
333+ goto drop ;
334+ vtf = be32_to_cpu (hdr -> u .l .grh .version_tclass_flow );
335+ if ((vtf >> IB_GRH_VERSION_SHIFT ) != IB_GRH_VERSION )
336+ goto drop ;
337+ } else
338+ goto drop ;
339+
340+ /* Get opcode and PSN from packet */
341+ opcode = be32_to_cpu (ohdr -> bth [0 ]);
342+ opcode >>= 24 ;
343+ psn = be32_to_cpu (ohdr -> bth [2 ]);
344+
345+ /* Get the destination QP number. */
346+ qp_num = be32_to_cpu (ohdr -> bth [1 ]) & QIB_QPN_MASK ;
347+ if (qp_num != QIB_MULTICAST_QPN ) {
348+ int ruc_res ;
349+ qp = qib_lookup_qpn (ibp , qp_num );
350+ if (!qp )
351+ goto drop ;
352+
353+ /*
354+ * Handle only RC QPs - for other QP types drop error
355+ * packet.
356+ */
357+ spin_lock (& qp -> r_lock );
358+
359+ /* Check for valid receive state. */
360+ if (!(ib_qib_state_ops [qp -> state ] &
361+ QIB_PROCESS_RECV_OK )) {
362+ ibp -> n_pkt_drops ++ ;
363+ goto unlock ;
364+ }
365+
366+ switch (qp -> ibqp .qp_type ) {
367+ case IB_QPT_RC :
368+ spin_lock_irqsave (& qp -> s_lock , flags );
369+ ruc_res =
370+ qib_ruc_check_hdr (
371+ ibp , hdr ,
372+ lnh == QIB_LRH_GRH ,
373+ qp ,
374+ be32_to_cpu (ohdr -> bth [0 ]));
375+ if (ruc_res ) {
376+ spin_unlock_irqrestore (& qp -> s_lock ,
377+ flags );
378+ goto unlock ;
379+ }
380+ spin_unlock_irqrestore (& qp -> s_lock , flags );
381+
382+ /* Only deal with RDMA Writes for now */
383+ if (opcode <
384+ IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST ) {
385+ diff = qib_cmp24 (psn , qp -> r_psn );
386+ if (!qp -> r_nak_state && diff >= 0 ) {
387+ ibp -> n_rc_seqnak ++ ;
388+ qp -> r_nak_state =
389+ IB_NAK_PSN_ERROR ;
390+ /* Use the expected PSN. */
391+ qp -> r_ack_psn = qp -> r_psn ;
392+ /*
393+ * Wait to send the sequence
394+ * NAK until all packets
395+ * in the receive queue have
396+ * been processed.
397+ * Otherwise, we end up
398+ * propagating congestion.
399+ */
400+ if (list_empty (& qp -> rspwait )) {
401+ qp -> r_flags |=
402+ QIB_R_RSP_NAK ;
403+ atomic_inc (
404+ & qp -> refcount );
405+ list_add_tail (
406+ & qp -> rspwait ,
407+ & rcd -> qp_wait_list );
408+ }
409+ } /* Out of sequence NAK */
410+ } /* QP Request NAKs */
411+ break ;
412+ case IB_QPT_SMI :
413+ case IB_QPT_GSI :
414+ case IB_QPT_UD :
415+ case IB_QPT_UC :
416+ default :
417+ /* For now don't handle any other QP types */
418+ break ;
419+ }
420+
421+ unlock :
422+ spin_unlock (& qp -> r_lock );
423+ /*
424+ * Notify qib_destroy_qp() if it is waiting
425+ * for us to finish.
426+ */
427+ if (atomic_dec_and_test (& qp -> refcount ))
428+ wake_up (& qp -> wait );
429+ } /* Unicast QP */
430+ } /* Valid packet with TIDErr */
431+
432+ drop :
295433 return ret ;
296434}
297435
@@ -335,7 +473,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
335473 smp_rmb (); /* prevent speculative reads of dma'ed hdrq */
336474 }
337475
338- for (last = 0 , i = 1 ; !last && i <= 64 ; i += !last ) {
476+ for (last = 0 , i = 1 ; !last ; i += !last ) {
339477 hdr = dd -> f_get_msgheader (dd , rhf_addr );
340478 eflags = qib_hdrget_err_flags (rhf_addr );
341479 etype = qib_hdrget_rcv_type (rhf_addr );
@@ -371,7 +509,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
371509 * packets; only qibhdrerr should be set.
372510 */
373511 if (unlikely (eflags ))
374- crcs += qib_rcv_hdrerr (ppd , rcd -> ctxt , eflags , l ,
512+ crcs += qib_rcv_hdrerr (rcd , ppd , rcd -> ctxt , eflags , l ,
375513 etail , rhf_addr , hdr );
376514 else if (etype == RCVHQ_RCV_TYPE_NON_KD ) {
377515 qib_ib_rcv (rcd , hdr , ebuf , tlen );
@@ -384,6 +522,9 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
384522 l += rsize ;
385523 if (l >= maxcnt )
386524 l = 0 ;
525+ if (i == QIB_MAX_PKT_RECV )
526+ last = 1 ;
527+
387528 rhf_addr = (__le32 * ) rcd -> rcvhdrq + l + dd -> rhf_offset ;
388529 if (dd -> flags & QIB_NODMA_RTAIL ) {
389530 u32 seq = qib_hdrget_seq (rhf_addr );
@@ -402,7 +543,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
402543 */
403544 lval = l ;
404545 if (!last && !(i & 0xf )) {
405- dd -> f_update_usrhead (rcd , lval , updegr , etail );
546+ dd -> f_update_usrhead (rcd , lval , updegr , etail , i );
406547 updegr = 0 ;
407548 }
408549 }
@@ -444,7 +585,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
444585 * if no packets were processed.
445586 */
446587 lval = (u64 )rcd -> head | dd -> rhdrhead_intr_off ;
447- dd -> f_update_usrhead (rcd , lval , updegr , etail );
588+ dd -> f_update_usrhead (rcd , lval , updegr , etail , i );
448589 return crcs ;
449590}
450591
0 commit comments