@@ -1291,7 +1291,7 @@ mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state)
12911291 bool ret ;
12921292
12931293 ret = wait_event_timeout (dev -> reset_wait ,
1294- (READ_ONCE (dev -> reset_state ) & state ),
1294+ (READ_ONCE (dev -> recovery . state ) & state ),
12951295 MT7915_RESET_TIMEOUT );
12961296
12971297 WARN (!ret , "Timeout waiting for MCU reset state %x\n" , state );
@@ -1346,6 +1346,168 @@ void mt7915_tx_token_put(struct mt7915_dev *dev)
13461346 idr_destroy (& dev -> mt76 .token );
13471347}
13481348
1349+ static int
1350+ mt7915_mac_restart (struct mt7915_dev * dev )
1351+ {
1352+ struct mt7915_phy * phy2 ;
1353+ struct mt76_phy * ext_phy ;
1354+ struct mt76_dev * mdev = & dev -> mt76 ;
1355+ int i , ret ;
1356+
1357+ ext_phy = dev -> mt76 .phys [MT_BAND1 ];
1358+ phy2 = ext_phy ? ext_phy -> priv : NULL ;
1359+
1360+ if (dev -> hif2 ) {
1361+ mt76_wr (dev , MT_INT1_MASK_CSR , 0x0 );
1362+ mt76_wr (dev , MT_INT1_SOURCE_CSR , ~0 );
1363+ }
1364+
1365+ if (dev_is_pci (mdev -> dev )) {
1366+ mt76_wr (dev , MT_PCIE_MAC_INT_ENABLE , 0x0 );
1367+ if (dev -> hif2 )
1368+ mt76_wr (dev , MT_PCIE1_MAC_INT_ENABLE , 0x0 );
1369+ }
1370+
1371+ set_bit (MT76_RESET , & dev -> mphy .state );
1372+ set_bit (MT76_MCU_RESET , & dev -> mphy .state );
1373+ wake_up (& dev -> mt76 .mcu .wait );
1374+ if (ext_phy ) {
1375+ set_bit (MT76_RESET , & ext_phy -> state );
1376+ set_bit (MT76_MCU_RESET , & ext_phy -> state );
1377+ }
1378+
1379+ /* lock/unlock all queues to ensure that no tx is pending */
1380+ mt76_txq_schedule_all (& dev -> mphy );
1381+ if (ext_phy )
1382+ mt76_txq_schedule_all (ext_phy );
1383+
1384+ /* disable all tx/rx napi */
1385+ mt76_worker_disable (& dev -> mt76 .tx_worker );
1386+ mt76_for_each_q_rx (mdev , i ) {
1387+ if (mdev -> q_rx [i ].ndesc )
1388+ napi_disable (& dev -> mt76 .napi [i ]);
1389+ }
1390+ napi_disable (& dev -> mt76 .tx_napi );
1391+
1392+ /* token reinit */
1393+ mt7915_tx_token_put (dev );
1394+ idr_init (& dev -> mt76 .token );
1395+
1396+ mt7915_dma_reset (dev , true);
1397+
1398+ local_bh_disable ();
1399+ mt76_for_each_q_rx (mdev , i ) {
1400+ if (mdev -> q_rx [i ].ndesc ) {
1401+ napi_enable (& dev -> mt76 .napi [i ]);
1402+ napi_schedule (& dev -> mt76 .napi [i ]);
1403+ }
1404+ }
1405+ local_bh_enable ();
1406+ clear_bit (MT76_MCU_RESET , & dev -> mphy .state );
1407+ clear_bit (MT76_STATE_MCU_RUNNING , & dev -> mphy .state );
1408+
1409+ mt76_wr (dev , MT_INT_MASK_CSR , dev -> mt76 .mmio .irqmask );
1410+ mt76_wr (dev , MT_INT_SOURCE_CSR , ~0 );
1411+
1412+ if (dev -> hif2 ) {
1413+ mt76_wr (dev , MT_INT1_MASK_CSR , dev -> mt76 .mmio .irqmask );
1414+ mt76_wr (dev , MT_INT1_SOURCE_CSR , ~0 );
1415+ }
1416+ if (dev_is_pci (mdev -> dev )) {
1417+ mt76_wr (dev , MT_PCIE_MAC_INT_ENABLE , 0xff );
1418+ if (dev -> hif2 )
1419+ mt76_wr (dev , MT_PCIE1_MAC_INT_ENABLE , 0xff );
1420+ }
1421+
1422+ /* load firmware */
1423+ ret = mt7915_mcu_init_firmware (dev );
1424+ if (ret )
1425+ goto out ;
1426+
1427+ /* set the necessary init items */
1428+ ret = mt7915_mcu_set_eeprom (dev );
1429+ if (ret )
1430+ goto out ;
1431+
1432+ mt7915_mac_init (dev );
1433+ mt7915_init_txpower (dev , & dev -> mphy .sband_2g .sband );
1434+ mt7915_init_txpower (dev , & dev -> mphy .sband_5g .sband );
1435+ ret = mt7915_txbf_init (dev );
1436+
1437+ if (test_bit (MT76_STATE_RUNNING , & dev -> mphy .state )) {
1438+ ret = mt7915_run (dev -> mphy .hw );
1439+ if (ret )
1440+ goto out ;
1441+ }
1442+
1443+ if (ext_phy && test_bit (MT76_STATE_RUNNING , & ext_phy -> state )) {
1444+ ret = mt7915_run (ext_phy -> hw );
1445+ if (ret )
1446+ goto out ;
1447+ }
1448+
1449+ out :
1450+ /* reset done */
1451+ clear_bit (MT76_RESET , & dev -> mphy .state );
1452+ if (phy2 )
1453+ clear_bit (MT76_RESET , & phy2 -> mt76 -> state );
1454+
1455+ local_bh_disable ();
1456+ napi_enable (& dev -> mt76 .tx_napi );
1457+ napi_schedule (& dev -> mt76 .tx_napi );
1458+ local_bh_enable ();
1459+
1460+ mt76_worker_enable (& dev -> mt76 .tx_worker );
1461+
1462+ return ret ;
1463+ }
1464+
1465+ static void
1466+ mt7915_mac_full_reset (struct mt7915_dev * dev )
1467+ {
1468+ struct mt76_phy * ext_phy ;
1469+ int i ;
1470+
1471+ ext_phy = dev -> mt76 .phys [MT_BAND1 ];
1472+
1473+ dev -> recovery .hw_full_reset = true;
1474+
1475+ wake_up (& dev -> mt76 .mcu .wait );
1476+ ieee80211_stop_queues (mt76_hw (dev ));
1477+ if (ext_phy )
1478+ ieee80211_stop_queues (ext_phy -> hw );
1479+
1480+ cancel_delayed_work_sync (& dev -> mphy .mac_work );
1481+ if (ext_phy )
1482+ cancel_delayed_work_sync (& ext_phy -> mac_work );
1483+
1484+ mutex_lock (& dev -> mt76 .mutex );
1485+ for (i = 0 ; i < 10 ; i ++ ) {
1486+ if (!mt7915_mac_restart (dev ))
1487+ break ;
1488+ }
1489+ mutex_unlock (& dev -> mt76 .mutex );
1490+
1491+ if (i == 10 )
1492+ dev_err (dev -> mt76 .dev , "chip full reset failed\n" );
1493+
1494+ ieee80211_restart_hw (mt76_hw (dev ));
1495+ if (ext_phy )
1496+ ieee80211_restart_hw (ext_phy -> hw );
1497+
1498+ ieee80211_wake_queues (mt76_hw (dev ));
1499+ if (ext_phy )
1500+ ieee80211_wake_queues (ext_phy -> hw );
1501+
1502+ dev -> recovery .hw_full_reset = false;
1503+ ieee80211_queue_delayed_work (mt76_hw (dev ), & dev -> mphy .mac_work ,
1504+ MT7915_WATCHDOG_TIME );
1505+ if (ext_phy )
1506+ ieee80211_queue_delayed_work (ext_phy -> hw ,
1507+ & ext_phy -> mac_work ,
1508+ MT7915_WATCHDOG_TIME );
1509+ }
1510+
13491511/* system error recovery */
13501512void mt7915_mac_reset_work (struct work_struct * work )
13511513{
@@ -1358,7 +1520,28 @@ void mt7915_mac_reset_work(struct work_struct *work)
13581520 ext_phy = dev -> mt76 .phys [MT_BAND1 ];
13591521 phy2 = ext_phy ? ext_phy -> priv : NULL ;
13601522
1361- if (!(READ_ONCE (dev -> reset_state ) & MT_MCU_CMD_STOP_DMA ))
1523+ /* chip full reset */
1524+ if (dev -> recovery .restart ) {
1525+ /* disable WA/WM WDT */
1526+ mt76_clear (dev , MT_WFDMA0_MCU_HOST_INT_ENA ,
1527+ MT_MCU_CMD_WDT_MASK );
1528+
1529+ mt7915_mac_full_reset (dev );
1530+
1531+ /* enable mcu irq */
1532+ mt7915_irq_enable (dev , MT_INT_MCU_CMD );
1533+ mt7915_irq_disable (dev , 0 );
1534+
1535+ /* enable WA/WM WDT */
1536+ mt76_set (dev , MT_WFDMA0_MCU_HOST_INT_ENA , MT_MCU_CMD_WDT_MASK );
1537+
1538+ dev -> recovery .state = MT_MCU_CMD_NORMAL_STATE ;
1539+ dev -> recovery .restart = false;
1540+ return ;
1541+ }
1542+
1543+ /* chip partial reset */
1544+ if (!(READ_ONCE (dev -> recovery .state ) & MT_MCU_CMD_STOP_DMA ))
13621545 return ;
13631546
13641547 ieee80211_stop_queues (mt76_hw (dev ));
@@ -1432,6 +1615,30 @@ void mt7915_mac_reset_work(struct work_struct *work)
14321615 MT7915_WATCHDOG_TIME );
14331616}
14341617
1618+ void mt7915_reset (struct mt7915_dev * dev )
1619+ {
1620+ if (!dev -> recovery .hw_init_done )
1621+ return ;
1622+
1623+ if (dev -> recovery .hw_full_reset )
1624+ return ;
1625+
1626+ /* wm/wa exception: do full recovery */
1627+ if (READ_ONCE (dev -> recovery .state ) & MT_MCU_CMD_WDT_MASK ) {
1628+ dev -> recovery .restart = true;
1629+ dev_info (dev -> mt76 .dev ,
1630+ "%s indicated firmware crash, attempting recovery\n" ,
1631+ wiphy_name (dev -> mt76 .hw -> wiphy ));
1632+
1633+ mt7915_irq_disable (dev , MT_INT_MCU_CMD );
1634+ queue_work (dev -> mt76 .wq , & dev -> reset_work );
1635+ return ;
1636+ }
1637+
1638+ queue_work (dev -> mt76 .wq , & dev -> reset_work );
1639+ wake_up (& dev -> reset_wait );
1640+ }
1641+
14351642void mt7915_mac_update_stats (struct mt7915_phy * phy )
14361643{
14371644 struct mt7915_dev * dev = phy -> dev ;
0 commit comments