@@ -1027,6 +1027,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
10271027 struct bnxt_qplib_dev_attr * dev_attr = & rdev -> dev_attr ;
10281028 struct bnxt_re_qp * qp ;
10291029 struct bnxt_re_cq * cq ;
1030+ struct bnxt_re_srq * srq ;
10301031 int rc , entries ;
10311032
10321033 if ((qp_init_attr -> cap .max_send_wr > dev_attr -> max_qp_wqes ) ||
@@ -1082,9 +1083,15 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
10821083 }
10831084
10841085 if (qp_init_attr -> srq ) {
1085- dev_err (rdev_to_dev (rdev ), "SRQ not supported" );
1086- rc = - ENOTSUPP ;
1087- goto fail ;
1086+ srq = container_of (qp_init_attr -> srq , struct bnxt_re_srq ,
1087+ ib_srq );
1088+ if (!srq ) {
1089+ dev_err (rdev_to_dev (rdev ), "SRQ not found" );
1090+ rc = - EINVAL ;
1091+ goto fail ;
1092+ }
1093+ qp -> qplib_qp .srq = & srq -> qplib_srq ;
1094+ qp -> qplib_qp .rq .max_wqe = 0 ;
10881095 } else {
10891096 /* Allocate 1 more than what's provided so posting max doesn't
10901097 * mean empty
@@ -1289,6 +1296,237 @@ static enum ib_mtu __to_ib_mtu(u32 mtu)
12891296 }
12901297}
12911298
1299+ /* Shared Receive Queues */
1300+ int bnxt_re_destroy_srq (struct ib_srq * ib_srq )
1301+ {
1302+ struct bnxt_re_srq * srq = container_of (ib_srq , struct bnxt_re_srq ,
1303+ ib_srq );
1304+ struct bnxt_re_dev * rdev = srq -> rdev ;
1305+ struct bnxt_qplib_srq * qplib_srq = & srq -> qplib_srq ;
1306+ struct bnxt_qplib_nq * nq = NULL ;
1307+ int rc ;
1308+
1309+ if (qplib_srq -> cq )
1310+ nq = qplib_srq -> cq -> nq ;
1311+ rc = bnxt_qplib_destroy_srq (& rdev -> qplib_res , qplib_srq );
1312+ if (rc ) {
1313+ dev_err (rdev_to_dev (rdev ), "Destroy HW SRQ failed!" );
1314+ return rc ;
1315+ }
1316+
1317+ if (srq -> umem && !IS_ERR (srq -> umem ))
1318+ ib_umem_release (srq -> umem );
1319+ kfree (srq );
1320+ atomic_dec (& rdev -> srq_count );
1321+ if (nq )
1322+ nq -> budget -- ;
1323+ return 0 ;
1324+ }
1325+
1326+ static int bnxt_re_init_user_srq (struct bnxt_re_dev * rdev ,
1327+ struct bnxt_re_pd * pd ,
1328+ struct bnxt_re_srq * srq ,
1329+ struct ib_udata * udata )
1330+ {
1331+ struct bnxt_re_srq_req ureq ;
1332+ struct bnxt_qplib_srq * qplib_srq = & srq -> qplib_srq ;
1333+ struct ib_umem * umem ;
1334+ int bytes = 0 ;
1335+ struct ib_ucontext * context = pd -> ib_pd .uobject -> context ;
1336+ struct bnxt_re_ucontext * cntx = container_of (context ,
1337+ struct bnxt_re_ucontext ,
1338+ ib_uctx );
1339+ if (ib_copy_from_udata (& ureq , udata , sizeof (ureq )))
1340+ return - EFAULT ;
1341+
1342+ bytes = (qplib_srq -> max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE );
1343+ bytes = PAGE_ALIGN (bytes );
1344+ umem = ib_umem_get (context , ureq .srqva , bytes ,
1345+ IB_ACCESS_LOCAL_WRITE , 1 );
1346+ if (IS_ERR (umem ))
1347+ return PTR_ERR (umem );
1348+
1349+ srq -> umem = umem ;
1350+ qplib_srq -> nmap = umem -> nmap ;
1351+ qplib_srq -> sglist = umem -> sg_head .sgl ;
1352+ qplib_srq -> srq_handle = ureq .srq_handle ;
1353+ qplib_srq -> dpi = & cntx -> dpi ;
1354+
1355+ return 0 ;
1356+ }
1357+
1358+ struct ib_srq * bnxt_re_create_srq (struct ib_pd * ib_pd ,
1359+ struct ib_srq_init_attr * srq_init_attr ,
1360+ struct ib_udata * udata )
1361+ {
1362+ struct bnxt_re_pd * pd = container_of (ib_pd , struct bnxt_re_pd , ib_pd );
1363+ struct bnxt_re_dev * rdev = pd -> rdev ;
1364+ struct bnxt_qplib_dev_attr * dev_attr = & rdev -> dev_attr ;
1365+ struct bnxt_re_srq * srq ;
1366+ struct bnxt_qplib_nq * nq = NULL ;
1367+ int rc , entries ;
1368+
1369+ if (srq_init_attr -> attr .max_wr >= dev_attr -> max_srq_wqes ) {
1370+ dev_err (rdev_to_dev (rdev ), "Create CQ failed - max exceeded" );
1371+ rc = - EINVAL ;
1372+ goto exit ;
1373+ }
1374+
1375+ if (srq_init_attr -> srq_type != IB_SRQT_BASIC ) {
1376+ rc = - ENOTSUPP ;
1377+ goto exit ;
1378+ }
1379+
1380+ srq = kzalloc (sizeof (* srq ), GFP_KERNEL );
1381+ if (!srq ) {
1382+ rc = - ENOMEM ;
1383+ goto exit ;
1384+ }
1385+ srq -> rdev = rdev ;
1386+ srq -> qplib_srq .pd = & pd -> qplib_pd ;
1387+ srq -> qplib_srq .dpi = & rdev -> dpi_privileged ;
1388+ /* Allocate 1 more than what's provided so posting max doesn't
1389+ * mean empty
1390+ */
1391+ entries = roundup_pow_of_two (srq_init_attr -> attr .max_wr + 1 );
1392+ if (entries > dev_attr -> max_srq_wqes + 1 )
1393+ entries = dev_attr -> max_srq_wqes + 1 ;
1394+
1395+ srq -> qplib_srq .max_wqe = entries ;
1396+ srq -> qplib_srq .max_sge = srq_init_attr -> attr .max_sge ;
1397+ srq -> qplib_srq .threshold = srq_init_attr -> attr .srq_limit ;
1398+ srq -> srq_limit = srq_init_attr -> attr .srq_limit ;
1399+ srq -> qplib_srq .eventq_hw_ring_id = rdev -> nq [0 ].ring_id ;
1400+ nq = & rdev -> nq [0 ];
1401+
1402+ if (udata ) {
1403+ rc = bnxt_re_init_user_srq (rdev , pd , srq , udata );
1404+ if (rc )
1405+ goto fail ;
1406+ }
1407+
1408+ rc = bnxt_qplib_create_srq (& rdev -> qplib_res , & srq -> qplib_srq );
1409+ if (rc ) {
1410+ dev_err (rdev_to_dev (rdev ), "Create HW SRQ failed!" );
1411+ goto fail ;
1412+ }
1413+
1414+ if (udata ) {
1415+ struct bnxt_re_srq_resp resp ;
1416+
1417+ resp .srqid = srq -> qplib_srq .id ;
1418+ rc = ib_copy_to_udata (udata , & resp , sizeof (resp ));
1419+ if (rc ) {
1420+ dev_err (rdev_to_dev (rdev ), "SRQ copy to udata failed!" );
1421+ bnxt_qplib_destroy_srq (& rdev -> qplib_res ,
1422+ & srq -> qplib_srq );
1423+ goto exit ;
1424+ }
1425+ }
1426+ if (nq )
1427+ nq -> budget ++ ;
1428+ atomic_inc (& rdev -> srq_count );
1429+
1430+ return & srq -> ib_srq ;
1431+
1432+ fail :
1433+ if (udata && srq -> umem && !IS_ERR (srq -> umem )) {
1434+ ib_umem_release (srq -> umem );
1435+ srq -> umem = NULL ;
1436+ }
1437+
1438+ kfree (srq );
1439+ exit :
1440+ return ERR_PTR (rc );
1441+ }
1442+
1443+ int bnxt_re_modify_srq (struct ib_srq * ib_srq , struct ib_srq_attr * srq_attr ,
1444+ enum ib_srq_attr_mask srq_attr_mask ,
1445+ struct ib_udata * udata )
1446+ {
1447+ struct bnxt_re_srq * srq = container_of (ib_srq , struct bnxt_re_srq ,
1448+ ib_srq );
1449+ struct bnxt_re_dev * rdev = srq -> rdev ;
1450+ int rc ;
1451+
1452+ switch (srq_attr_mask ) {
1453+ case IB_SRQ_MAX_WR :
1454+ /* SRQ resize is not supported */
1455+ break ;
1456+ case IB_SRQ_LIMIT :
1457+ /* Change the SRQ threshold */
1458+ if (srq_attr -> srq_limit > srq -> qplib_srq .max_wqe )
1459+ return - EINVAL ;
1460+
1461+ srq -> qplib_srq .threshold = srq_attr -> srq_limit ;
1462+ rc = bnxt_qplib_modify_srq (& rdev -> qplib_res , & srq -> qplib_srq );
1463+ if (rc ) {
1464+ dev_err (rdev_to_dev (rdev ), "Modify HW SRQ failed!" );
1465+ return rc ;
1466+ }
1467+ /* On success, update the shadow */
1468+ srq -> srq_limit = srq_attr -> srq_limit ;
1469+ /* No need to Build and send response back to udata */
1470+ break ;
1471+ default :
1472+ dev_err (rdev_to_dev (rdev ),
1473+ "Unsupported srq_attr_mask 0x%x" , srq_attr_mask );
1474+ return - EINVAL ;
1475+ }
1476+ return 0 ;
1477+ }
1478+
1479+ int bnxt_re_query_srq (struct ib_srq * ib_srq , struct ib_srq_attr * srq_attr )
1480+ {
1481+ struct bnxt_re_srq * srq = container_of (ib_srq , struct bnxt_re_srq ,
1482+ ib_srq );
1483+ struct bnxt_re_srq tsrq ;
1484+ struct bnxt_re_dev * rdev = srq -> rdev ;
1485+ int rc ;
1486+
1487+ /* Get live SRQ attr */
1488+ tsrq .qplib_srq .id = srq -> qplib_srq .id ;
1489+ rc = bnxt_qplib_query_srq (& rdev -> qplib_res , & tsrq .qplib_srq );
1490+ if (rc ) {
1491+ dev_err (rdev_to_dev (rdev ), "Query HW SRQ failed!" );
1492+ return rc ;
1493+ }
1494+ srq_attr -> max_wr = srq -> qplib_srq .max_wqe ;
1495+ srq_attr -> max_sge = srq -> qplib_srq .max_sge ;
1496+ srq_attr -> srq_limit = tsrq .qplib_srq .threshold ;
1497+
1498+ return 0 ;
1499+ }
1500+
1501+ int bnxt_re_post_srq_recv (struct ib_srq * ib_srq , struct ib_recv_wr * wr ,
1502+ struct ib_recv_wr * * bad_wr )
1503+ {
1504+ struct bnxt_re_srq * srq = container_of (ib_srq , struct bnxt_re_srq ,
1505+ ib_srq );
1506+ struct bnxt_qplib_swqe wqe ;
1507+ unsigned long flags ;
1508+ int rc = 0 , payload_sz = 0 ;
1509+
1510+ spin_lock_irqsave (& srq -> lock , flags );
1511+ while (wr ) {
1512+ /* Transcribe each ib_recv_wr to qplib_swqe */
1513+ wqe .num_sge = wr -> num_sge ;
1514+ payload_sz = bnxt_re_build_sgl (wr -> sg_list , wqe .sg_list ,
1515+ wr -> num_sge );
1516+ wqe .wr_id = wr -> wr_id ;
1517+ wqe .type = BNXT_QPLIB_SWQE_TYPE_RECV ;
1518+
1519+ rc = bnxt_qplib_post_srq_recv (& srq -> qplib_srq , & wqe );
1520+ if (rc ) {
1521+ * bad_wr = wr ;
1522+ break ;
1523+ }
1524+ wr = wr -> next ;
1525+ }
1526+ spin_unlock_irqrestore (& srq -> lock , flags );
1527+
1528+ return rc ;
1529+ }
12921530static int bnxt_re_modify_shadow_qp (struct bnxt_re_dev * rdev ,
12931531 struct bnxt_re_qp * qp1_qp ,
12941532 int qp_attr_mask )
0 commit comments