Skip to content

Commit

Permalink
drm/panfrost: Fix job timeout handling
Browse files Browse the repository at this point in the history
commit 1a11a88 upstream.

If more than two jobs end up timeout-ing concurrently, only one of them
(the one attached to the scheduler acquiring the lock) is fully handled.
The other one remains in a dangling state where it's no longer part of
the scheduling queue, but still blocks something in scheduler, leading
to repetitive timeouts when new jobs are queued.

Let's make sure all bad jobs are properly handled by the thread
acquiring the lock.

v3:
- Add Steven's R-b
- Don't take the sched_lock when stopping the schedulers

v2:
- Fix the subject prefix
- Stop the scheduler before returning from panfrost_job_timedout()
- Call cancel_delayed_work_sync() after drm_sched_stop() to make sure
  no timeout handlers are in flight when we reset the GPU (Steven Price)
- Make sure we release the reset lock before restarting the
  schedulers (Steven Price)

Fixes: f3ba912 ("drm/panfrost: Add initial panfrost driver")
Cc: <stable@vger.kernel.org>
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Signed-off-by: Steven Price <steven.price@arm.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201002122506.1374183-1-boris.brezillon@collabora.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
bbrezillon authored and gregkh committed Dec 30, 2020
1 parent c2032bf commit a61da03
Showing 1 changed file with 53 additions and 9 deletions.
62 changes: 53 additions & 9 deletions drivers/gpu/drm/panfrost/panfrost_job.c
Expand Up @@ -25,7 +25,8 @@

struct panfrost_queue_state {
struct drm_gpu_scheduler sched;

bool stopped;
struct mutex lock;
u64 fence_context;
u64 emit_seqno;
};
Expand Down Expand Up @@ -369,6 +370,24 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
job_write(pfdev, JOB_INT_MASK, irq_mask);
}

static bool panfrost_scheduler_stop(struct panfrost_queue_state *queue,
struct drm_sched_job *bad)
{
bool stopped = false;

mutex_lock(&queue->lock);
if (!queue->stopped) {
drm_sched_stop(&queue->sched, bad);
if (bad)
drm_sched_increase_karma(bad);
queue->stopped = true;
stopped = true;
}
mutex_unlock(&queue->lock);

return stopped;
}

static void panfrost_job_timedout(struct drm_sched_job *sched_job)
{
struct panfrost_job *job = to_panfrost_job(sched_job);
Expand All @@ -392,19 +411,39 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
job_read(pfdev, JS_TAIL_LO(js)),
sched_job);

/* Scheduler is already stopped, nothing to do. */
if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job))
return;

if (!mutex_trylock(&pfdev->reset_lock))
return;

for (i = 0; i < NUM_JOB_SLOTS; i++) {
struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;

drm_sched_stop(sched, sched_job);
if (js != i)
/* Ensure any timeouts on other slots have finished */
/*
* If the queue is still active, make sure we wait for any
* pending timeouts.
*/
if (!pfdev->js->queue[i].stopped)
cancel_delayed_work_sync(&sched->work_tdr);
}

drm_sched_increase_karma(sched_job);
/*
* If the scheduler was not already stopped, there's a tiny
* chance a timeout has expired just before we stopped it, and
* drm_sched_stop() does not flush pending works. Let's flush
* them now so the timeout handler doesn't get called in the
* middle of a reset.
*/
if (panfrost_scheduler_stop(&pfdev->js->queue[i], NULL))
cancel_delayed_work_sync(&sched->work_tdr);

/*
* Now that we cancelled the pending timeouts, we can safely
* reset the stopped state.
*/
pfdev->js->queue[i].stopped = false;
}

spin_lock_irqsave(&pfdev->js->job_lock, flags);
for (i = 0; i < NUM_JOB_SLOTS; i++) {
Expand All @@ -421,11 +460,11 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
for (i = 0; i < NUM_JOB_SLOTS; i++)
drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);

mutex_unlock(&pfdev->reset_lock);

/* restart scheduler after GPU is usable again */
for (i = 0; i < NUM_JOB_SLOTS; i++)
drm_sched_start(&pfdev->js->queue[i].sched, true);

mutex_unlock(&pfdev->reset_lock);
}

static const struct drm_sched_backend_ops panfrost_sched_ops = {
Expand Down Expand Up @@ -558,6 +597,7 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
int ret, i;

for (i = 0; i < NUM_JOB_SLOTS; i++) {
mutex_init(&js->queue[i].lock);
sched = &js->queue[i].sched;
ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
DRM_SCHED_PRIORITY_NORMAL, &sched,
Expand All @@ -570,10 +610,14 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)

void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
{
struct panfrost_device *pfdev = panfrost_priv->pfdev;
struct panfrost_job_slot *js = pfdev->js;
int i;

for (i = 0; i < NUM_JOB_SLOTS; i++)
for (i = 0; i < NUM_JOB_SLOTS; i++) {
drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
mutex_destroy(&js->queue[i].lock);
}
}

int panfrost_job_is_idle(struct panfrost_device *pfdev)
Expand Down

0 comments on commit a61da03

Please sign in to comment.