Skip to content

Commit

Permalink
drm/i915: Convert i915_gem_object/client_blt.c to use ww locking as w…
Browse files Browse the repository at this point in the history
…ell, v2.

This is the last part outside of selftests that still don't use the
correct lock ordering of timeline->mutex vs resv_lock.

With gem fixed, there are a few places that still get locking wrong:
- gvt/scheduler.c
- i915_perf.c
- Most if not all selftests.

Changes since v1:
- Add intel_engine_pm_get/put() calls to fix use-after-free when using
  intel_engine_get_pool().

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
  • Loading branch information
mlankhorst authored and intel-lab-lkp committed Jul 3, 2020
1 parent 6afff68 commit db8aba4
Show file tree
Hide file tree
Showing 3 changed files with 163 additions and 70 deletions.
78 changes: 61 additions & 17 deletions drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,7 @@ static void clear_pages_worker(struct work_struct *work)
struct clear_pages_work *w = container_of(work, typeof(*w), work);
struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
struct i915_gem_ww_ctx ww;
struct i915_request *rq;
struct i915_vma *batch;
int err = w->dma.error;
Expand All @@ -171,17 +172,20 @@ static void clear_pages_worker(struct work_struct *work)
obj->read_domains = I915_GEM_GPU_DOMAINS;
obj->write_domain = 0;

err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (unlikely(err))
i915_gem_ww_ctx_init(&ww, false);
intel_engine_pm_get(w->ce->engine);
retry:
err = intel_context_pin_ww(w->ce, &ww);
if (err)
goto out_signal;

batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
batch = intel_emit_vma_fill_blt(w->ce, vma, &ww, w->value);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto out_unpin;
goto out_ctx;
}

rq = intel_context_create_request(w->ce);
rq = i915_request_create(w->ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_batch;
Expand Down Expand Up @@ -223,16 +227,64 @@ static void clear_pages_worker(struct work_struct *work)
i915_request_add(rq);
out_batch:
intel_emit_vma_release(w->ce, batch);
out_unpin:
i915_vma_unpin(vma);
out_ctx:
intel_context_unpin(w->ce);
out_signal:
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);

i915_vma_unpin(w->sleeve->vma);
intel_engine_pm_put(w->ce->engine);

if (unlikely(err)) {
dma_fence_set_error(&w->dma, err);
dma_fence_signal(&w->dma);
dma_fence_put(&w->dma);
}
}

static int pin_wait_clear_pages_work(struct clear_pages_work *w,
struct intel_context *ce)
{
struct i915_vma *vma = w->sleeve->vma;
struct i915_gem_ww_ctx ww;
int err;

i915_gem_ww_ctx_init(&ww, false);
retry:
err = i915_gem_object_lock(vma->obj, &ww);
if (err)
goto out;

err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
if (unlikely(err))
goto out;

err = i915_sw_fence_await_reservation(&w->wait,
vma->obj->base.resv, NULL,
true, 0, I915_FENCE_GFP);
if (err)
goto err_unpin_vma;

dma_resv_add_excl_fence(vma->obj->base.resv, &w->dma);

err_unpin_vma:
if (err)
i915_vma_unpin(vma);
out:
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
return err;
}

static int __i915_sw_fence_call
clear_pages_work_notify(struct i915_sw_fence *fence,
enum i915_sw_fence_notify state)
Expand Down Expand Up @@ -286,17 +338,9 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
i915_sw_fence_init(&work->wait, clear_pages_work_notify);

i915_gem_object_lock(obj, NULL);
err = i915_sw_fence_await_reservation(&work->wait,
obj->base.resv, NULL, true, 0,
I915_FENCE_GFP);
if (err < 0) {
err = pin_wait_clear_pages_work(work, ce);
if (err < 0)
dma_fence_set_error(&work->dma, err);
} else {
dma_resv_add_excl_fence(obj->base.resv, &work->dma);
err = 0;
}
i915_gem_object_unlock(obj);

dma_fence_get(&work->dma);
i915_sw_fence_commit(&work->wait);
Expand Down

0 comments on commit db8aba4

Please sign in to comment.