@@ -63,7 +63,7 @@ static void drm_mock_sched_job_complete(struct drm_mock_sched_job *job)
6363 lockdep_assert_held (& sched -> lock );
6464
6565 job -> flags |= DRM_MOCK_SCHED_JOB_DONE ;
66- list_move_tail (& job -> link , & sched -> done_list );
66+ list_del (& job -> link );
6767 dma_fence_signal_locked (& job -> hw_fence );
6868 complete (& job -> done );
6969}
@@ -236,26 +236,41 @@ mock_sched_timedout_job(struct drm_sched_job *sched_job)
236236
237237static void mock_sched_free_job (struct drm_sched_job * sched_job )
238238{
239- struct drm_mock_scheduler * sched =
240- drm_sched_to_mock_sched (sched_job -> sched );
241239 struct drm_mock_sched_job * job = drm_sched_job_to_mock_job (sched_job );
242- unsigned long flags ;
243240
244- /* Remove from the scheduler done list. */
245- spin_lock_irqsave (& sched -> lock , flags );
246- list_del (& job -> link );
247- spin_unlock_irqrestore (& sched -> lock , flags );
248241 dma_fence_put (& job -> hw_fence );
249-
250242 drm_sched_job_cleanup (sched_job );
251243
252244 /* Mock job itself is freed by the kunit framework. */
253245}
254246
247+ static void mock_sched_cancel_job (struct drm_sched_job * sched_job )
248+ {
249+ struct drm_mock_scheduler * sched = drm_sched_to_mock_sched (sched_job -> sched );
250+ struct drm_mock_sched_job * job = drm_sched_job_to_mock_job (sched_job );
251+ unsigned long flags ;
252+
253+ hrtimer_cancel (& job -> timer );
254+
255+ spin_lock_irqsave (& sched -> lock , flags );
256+ if (!dma_fence_is_signaled_locked (& job -> hw_fence )) {
257+ list_del (& job -> link );
258+ dma_fence_set_error (& job -> hw_fence , - ECANCELED );
259+ dma_fence_signal_locked (& job -> hw_fence );
260+ }
261+ spin_unlock_irqrestore (& sched -> lock , flags );
262+
263+ /*
264+ * The GPU Scheduler will call drm_sched_backend_ops.free_job(), still.
265+ * Mock job itself is freed by the kunit framework.
266+ */
267+ }
268+
255269static const struct drm_sched_backend_ops drm_mock_scheduler_ops = {
256270 .run_job = mock_sched_run_job ,
257271 .timedout_job = mock_sched_timedout_job ,
258- .free_job = mock_sched_free_job
272+ .free_job = mock_sched_free_job ,
273+ .cancel_job = mock_sched_cancel_job ,
259274};
260275
261276/**
@@ -289,7 +304,6 @@ struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout)
289304 sched -> hw_timeline .context = dma_fence_context_alloc (1 );
290305 atomic_set (& sched -> hw_timeline .next_seqno , 0 );
291306 INIT_LIST_HEAD (& sched -> job_list );
292- INIT_LIST_HEAD (& sched -> done_list );
293307 spin_lock_init (& sched -> lock );
294308
295309 return sched ;
@@ -304,38 +318,6 @@ struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout)
304318 */
305319void drm_mock_sched_fini (struct drm_mock_scheduler * sched )
306320{
307- struct drm_mock_sched_job * job , * next ;
308- unsigned long flags ;
309- LIST_HEAD (list );
310-
311- drm_sched_wqueue_stop (& sched -> base );
312-
313- /* Force complete all unfinished jobs. */
314- spin_lock_irqsave (& sched -> lock , flags );
315- list_for_each_entry_safe (job , next , & sched -> job_list , link )
316- list_move_tail (& job -> link , & list );
317- spin_unlock_irqrestore (& sched -> lock , flags );
318-
319- list_for_each_entry (job , & list , link )
320- hrtimer_cancel (& job -> timer );
321-
322- spin_lock_irqsave (& sched -> lock , flags );
323- list_for_each_entry_safe (job , next , & list , link )
324- drm_mock_sched_job_complete (job );
325- spin_unlock_irqrestore (& sched -> lock , flags );
326-
327- /*
328- * Free completed jobs and jobs not yet processed by the DRM scheduler
329- * free worker.
330- */
331- spin_lock_irqsave (& sched -> lock , flags );
332- list_for_each_entry_safe (job , next , & sched -> done_list , link )
333- list_move_tail (& job -> link , & list );
334- spin_unlock_irqrestore (& sched -> lock , flags );
335-
336- list_for_each_entry_safe (job , next , & list , link )
337- mock_sched_free_job (& job -> base );
338-
339321 drm_sched_fini (& sched -> base );
340322}
341323
0 commit comments