Skip to content

Commit

Permalink
drm/i915/gem: Roll all of context creation together
Browse files Browse the repository at this point in the history
Now that we have the whole engine set and VM at context creation time,
we can just assign those fields instead of creating first and handling
the VM and engines later.  This lets us avoid creating useless VMs and
engine sets and lets us get rid of the complex VM setting code.

Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
  • Loading branch information
gfxstrand authored and intel-lab-lkp committed Jun 9, 2021
1 parent fa5fb14 commit 82e92cf
Show file tree
Hide file tree
Showing 2 changed files with 73 additions and 136 deletions.
176 changes: 59 additions & 117 deletions drivers/gpu/drm/i915/gem/i915_gem_context.c
Expand Up @@ -1279,56 +1279,6 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
return 0;
}

static struct i915_gem_context *
__create_context(struct drm_i915_private *i915,
const struct i915_gem_proto_context *pc)
{
struct i915_gem_context *ctx;
struct i915_gem_engines *e;
int err;
int i;

ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);

kref_init(&ctx->ref);
ctx->i915 = i915;
ctx->sched = pc->sched;
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->link);

spin_lock_init(&ctx->stale.lock);
INIT_LIST_HEAD(&ctx->stale.engines);

mutex_init(&ctx->engines_mutex);
e = default_engines(ctx, pc->legacy_rcs_sseu);
if (IS_ERR(e)) {
err = PTR_ERR(e);
goto err_free;
}
RCU_INIT_POINTER(ctx->engines, e);

INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
mutex_init(&ctx->lut_mutex);

/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
ctx->remap_slice = ALL_L3_SLICES(i915);

ctx->user_flags = pc->user_flags;

for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;

return ctx;

err_free:
kfree(ctx);
return ERR_PTR(err);
}

static inline struct i915_gem_engines *
__context_engines_await(const struct i915_gem_context *ctx,
bool *user_engines)
Expand Down Expand Up @@ -1372,105 +1322,97 @@ context_apply_all(struct i915_gem_context *ctx,
i915_sw_fence_complete(&e->fence);
}

static void __apply_ppgtt(struct intel_context *ce, void *vm)
{
i915_vm_put(ce->vm);
ce->vm = i915_vm_get(vm);
}

static struct i915_address_space *
__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
{
struct i915_address_space *old;

old = rcu_replace_pointer(ctx->vm,
i915_vm_open(vm),
lockdep_is_held(&ctx->mutex));
GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));

context_apply_all(ctx, __apply_ppgtt, vm);

return old;
}

static void __assign_ppgtt(struct i915_gem_context *ctx,
struct i915_address_space *vm)
{
if (vm == rcu_access_pointer(ctx->vm))
return;

vm = __set_ppgtt(ctx, vm);
if (vm)
i915_vm_close(vm);
}

static struct i915_gem_context *
i915_gem_create_context(struct drm_i915_private *i915,
const struct i915_gem_proto_context *pc)
{
struct i915_gem_context *ctx;
int ret;
struct i915_address_space *vm = NULL;
struct i915_gem_engines *e;
int err;
int i;

ctx = __create_context(i915, pc);
if (IS_ERR(ctx))
return ctx;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);

kref_init(&ctx->ref);
ctx->i915 = i915;
ctx->sched = pc->sched;
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->link);

spin_lock_init(&ctx->stale.lock);
INIT_LIST_HEAD(&ctx->stale.engines);

if (pc->vm) {
/* __assign_ppgtt() requires this mutex to be held */
mutex_lock(&ctx->mutex);
__assign_ppgtt(ctx, pc->vm);
mutex_unlock(&ctx->mutex);
vm = i915_vm_get(pc->vm);
} else if (HAS_FULL_PPGTT(i915)) {
struct i915_ppgtt *ppgtt;

ppgtt = i915_ppgtt_create(&i915->gt);
if (IS_ERR(ppgtt)) {
drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
context_close(ctx);
return ERR_CAST(ppgtt);
err = PTR_ERR(ppgtt);
goto err_ctx;
}
vm = &ppgtt->vm;
}
if (vm) {
RCU_INIT_POINTER(ctx->vm, i915_vm_open(vm));

/* __assign_ppgtt() requires this mutex to be held */
mutex_lock(&ctx->mutex);
__assign_ppgtt(ctx, &ppgtt->vm);
mutex_unlock(&ctx->mutex);

/* __assign_ppgtt() takes another reference for us */
i915_vm_put(&ppgtt->vm);
/* i915_vm_open() takes a reference */
i915_vm_put(vm);
}

mutex_init(&ctx->engines_mutex);
if (pc->num_user_engines >= 0) {
struct i915_gem_engines *engines;
i915_gem_context_set_user_engines(ctx);
e = user_engines(ctx, pc->num_user_engines, pc->user_engines);
} else {
i915_gem_context_clear_user_engines(ctx);
e = default_engines(ctx, pc->legacy_rcs_sseu);
}
if (IS_ERR(e)) {
err = PTR_ERR(e);
goto err_vm;
}
RCU_INIT_POINTER(ctx->engines, e);

engines = user_engines(ctx, pc->num_user_engines,
pc->user_engines);
if (IS_ERR(engines)) {
context_close(ctx);
return ERR_CAST(engines);
}
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
mutex_init(&ctx->lut_mutex);

mutex_lock(&ctx->engines_mutex);
i915_gem_context_set_user_engines(ctx);
engines = rcu_replace_pointer(ctx->engines, engines, 1);
mutex_unlock(&ctx->engines_mutex);
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
ctx->remap_slice = ALL_L3_SLICES(i915);

free_engines(engines);
}
ctx->user_flags = pc->user_flags;

for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;

if (pc->single_timeline) {
ret = drm_syncobj_create(&ctx->syncobj,
err = drm_syncobj_create(&ctx->syncobj,
DRM_SYNCOBJ_CREATE_SIGNALED,
NULL);
if (ret) {
context_close(ctx);
return ERR_PTR(ret);
}
if (err)
goto err_engines;
}

trace_i915_context_create(ctx);

return ctx;

err_engines:
free_engines(e);
err_vm:
if (ctx->vm)
i915_vm_close(ctx->vm);
err_ctx:
kfree(ctx);
return ERR_PTR(err);
}

static void init_contexts(struct i915_gem_contexts *gc)
Expand Down
33 changes: 14 additions & 19 deletions drivers/gpu/drm/i915/gem/selftests/mock_context.c
Expand Up @@ -31,41 +31,36 @@ mock_context(struct drm_i915_private *i915,

i915_gem_context_set_persistence(ctx);

mutex_init(&ctx->engines_mutex);
e = default_engines(ctx, null_sseu);
if (IS_ERR(e))
goto err_free;
RCU_INIT_POINTER(ctx->engines, e);

INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
mutex_init(&ctx->lut_mutex);

if (name) {
struct i915_ppgtt *ppgtt;

strncpy(ctx->name, name, sizeof(ctx->name) - 1);

ppgtt = mock_ppgtt(i915, name);
if (!ppgtt)
goto err_put;

mutex_lock(&ctx->mutex);
__set_ppgtt(ctx, &ppgtt->vm);
mutex_unlock(&ctx->mutex);
goto err_free;

ctx->vm = i915_vm_open(&ppgtt->vm);
i915_vm_put(&ppgtt->vm);
}

mutex_init(&ctx->engines_mutex);
e = default_engines(ctx, null_sseu);
if (IS_ERR(e))
goto err_vm;
RCU_INIT_POINTER(ctx->engines, e);

INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
mutex_init(&ctx->lut_mutex);

return ctx;

err_vm:
if (ctx->vm)
i915_vm_close(ctx->vm);
err_free:
kfree(ctx);
return NULL;

err_put:
i915_gem_context_set_closed(ctx);
i915_gem_context_put(ctx);
return NULL;
}

void mock_context_close(struct i915_gem_context *ctx)
Expand Down

0 comments on commit 82e92cf

Please sign in to comment.