@@ -134,6 +134,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
134134
135135 mutex_init (& adev -> vcn .inst [i ].vcn1_jpeg1_workaround );
136136 mutex_init (& adev -> vcn .inst [i ].vcn_pg_lock );
137+ mutex_init (& adev -> vcn .inst [i ].engine_reset_mutex );
137138 atomic_set (& adev -> vcn .inst [i ].total_submission_cnt , 0 );
138139 INIT_DELAYED_WORK (& adev -> vcn .inst [i ].idle_work , amdgpu_vcn_idle_work_handler );
139140 atomic_set (& adev -> vcn .inst [i ].dpg_enc_submission_cnt , 0 );
@@ -1451,3 +1452,81 @@ int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
14511452
14521453 return ret ;
14531454}
1455+
1456+ /**
1457+ * amdgpu_vcn_reset_engine - Reset a specific VCN engine
1458+ * @adev: Pointer to the AMDGPU device
1459+ * @instance_id: VCN engine instance to reset
1460+ *
1461+ * Returns: 0 on success, or a negative error code on failure.
1462+ */
1463+ static int amdgpu_vcn_reset_engine (struct amdgpu_device * adev ,
1464+ uint32_t instance_id )
1465+ {
1466+ struct amdgpu_vcn_inst * vinst = & adev -> vcn .inst [instance_id ];
1467+ int r , i ;
1468+
1469+ mutex_lock (& vinst -> engine_reset_mutex );
1470+ /* Stop the scheduler's work queue for the dec and enc rings if they are running.
1471+ * This ensures that no new tasks are submitted to the queues while
1472+ * the reset is in progress.
1473+ */
1474+ drm_sched_wqueue_stop (& vinst -> ring_dec .sched );
1475+ for (i = 0 ; i < vinst -> num_enc_rings ; i ++ )
1476+ drm_sched_wqueue_stop (& vinst -> ring_enc [i ].sched );
1477+
1478+ /* Perform the VCN reset for the specified instance */
1479+ r = vinst -> reset (vinst );
1480+ if (r )
1481+ goto unlock ;
1482+ r = amdgpu_ring_test_ring (& vinst -> ring_dec );
1483+ if (r )
1484+ goto unlock ;
1485+ for (i = 0 ; i < vinst -> num_enc_rings ; i ++ ) {
1486+ r = amdgpu_ring_test_ring (& vinst -> ring_enc [i ]);
1487+ if (r )
1488+ goto unlock ;
1489+ }
1490+ amdgpu_fence_driver_force_completion (& vinst -> ring_dec );
1491+ for (i = 0 ; i < vinst -> num_enc_rings ; i ++ )
1492+ amdgpu_fence_driver_force_completion (& vinst -> ring_enc [i ]);
1493+
1494+ /* Restart the scheduler's work queue for the dec and enc rings
1495+ * if they were stopped by this function. This allows new tasks
1496+ * to be submitted to the queues after the reset is complete.
1497+ */
1498+ drm_sched_wqueue_start (& vinst -> ring_dec .sched );
1499+ for (i = 0 ; i < vinst -> num_enc_rings ; i ++ )
1500+ drm_sched_wqueue_start (& vinst -> ring_enc [i ].sched );
1501+
1502+ unlock :
1503+ mutex_unlock (& vinst -> engine_reset_mutex );
1504+
1505+ return r ;
1506+ }
1507+
1508+ /**
1509+ * amdgpu_vcn_ring_reset - Reset a VCN ring
1510+ * @ring: ring to reset
1511+ * @vmid: vmid of guilty job
1512+ * @timedout_fence: fence of timed out job
1513+ *
1514+ * This helper is for VCN blocks without unified queues because
1515+ * resetting the engine resets all queues in that case. With
1516+ * unified queues we have one queue per engine.
1517+ * Returns: 0 on success, or a negative error code on failure.
1518+ */
1519+ int amdgpu_vcn_ring_reset (struct amdgpu_ring * ring ,
1520+ unsigned int vmid ,
1521+ struct amdgpu_fence * timedout_fence )
1522+ {
1523+ struct amdgpu_device * adev = ring -> adev ;
1524+
1525+ if (!(adev -> vcn .supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE ))
1526+ return - EOPNOTSUPP ;
1527+
1528+ if (adev -> vcn .inst [ring -> me ].using_unified_queue )
1529+ return - EINVAL ;
1530+
1531+ return amdgpu_vcn_reset_engine (adev , ring -> me );
1532+ }
0 commit comments