Skip to content

Commit feb8ef4

Browse files
robclarkRob Clark
authored andcommitted
drm/msm: Add opt-in for VM_BIND
Add a SET_PARAM for userspace to request to manage to the VM itself, instead of getting a kernel managed VM. In order to transition to a userspace managed VM, this param must be set before any mappings are created. Signed-off-by: Rob Clark <robdclark@chromium.org> Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com> Tested-by: Antonino Maniscalco <antomani103@gmail.com> Reviewed-by: Antonino Maniscalco <antomani103@gmail.com> Patchwork: https://patchwork.freedesktop.org/patch/661494/
1 parent 6bf32af commit feb8ef4

File tree

7 files changed

+99
-8
lines changed

7 files changed

+99
-8
lines changed

drivers/gpu/drm/msm/adreno/a6xx_gpu.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2276,7 +2276,7 @@ a6xx_create_vm(struct msm_gpu *gpu, struct platform_device *pdev)
22762276
}
22772277

22782278
static struct drm_gpuvm *
2279-
a6xx_create_private_vm(struct msm_gpu *gpu)
2279+
a6xx_create_private_vm(struct msm_gpu *gpu, bool kernel_managed)
22802280
{
22812281
struct msm_mmu *mmu;
22822282

@@ -2286,7 +2286,7 @@ a6xx_create_private_vm(struct msm_gpu *gpu)
22862286
return ERR_CAST(mmu);
22872287

22882288
return msm_gem_vm_create(gpu->dev, mmu, "gpu", ADRENO_VM_START,
2289-
adreno_private_vm_size(gpu), true);
2289+
adreno_private_vm_size(gpu), kernel_managed);
22902290
}
22912291

22922292
static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)

drivers/gpu/drm/msm/adreno/adreno_gpu.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -504,6 +504,21 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_context *ctx,
504504
if (!capable(CAP_SYS_ADMIN))
505505
return UERR(EPERM, drm, "invalid permissions");
506506
return msm_context_set_sysprof(ctx, gpu, value);
507+
case MSM_PARAM_EN_VM_BIND:
508+
/* We can only support VM_BIND with per-process pgtables: */
509+
if (ctx->vm == gpu->vm)
510+
return UERR(EINVAL, drm, "requires per-process pgtables");
511+
512+
/*
513+
* We can only swtich to VM_BIND mode if the VM has not yet
514+
* been created:
515+
*/
516+
if (ctx->vm)
517+
return UERR(EBUSY, drm, "VM already created");
518+
519+
ctx->userspace_managed_vm = value;
520+
521+
return 0;
507522
default:
508523
return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param);
509524
}

drivers/gpu/drm/msm/msm_drv.c

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -232,9 +232,21 @@ static void load_gpu(struct drm_device *dev)
232232
*/
233233
struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx)
234234
{
235+
static DEFINE_MUTEX(init_lock);
235236
struct msm_drm_private *priv = dev->dev_private;
236-
if (!ctx->vm)
237-
ctx->vm = msm_gpu_create_private_vm(priv->gpu, current);
237+
238+
/* Once ctx->vm is created it is valid for the lifetime of the context: */
239+
if (ctx->vm)
240+
return ctx->vm;
241+
242+
mutex_lock(&init_lock);
243+
if (!ctx->vm) {
244+
ctx->vm = msm_gpu_create_private_vm(
245+
priv->gpu, current, !ctx->userspace_managed_vm);
246+
247+
}
248+
mutex_unlock(&init_lock);
249+
238250
return ctx->vm;
239251
}
240252

@@ -424,6 +436,9 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
424436
if (!priv->gpu)
425437
return -EINVAL;
426438

439+
if (msm_context_is_vmbind(ctx))
440+
return UERR(EINVAL, dev, "VM_BIND is enabled");
441+
427442
if (should_fail(&fail_gem_iova, obj->size))
428443
return -ENOMEM;
429444

@@ -445,6 +460,9 @@ static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
445460
if (!priv->gpu)
446461
return -EINVAL;
447462

463+
if (msm_context_is_vmbind(ctx))
464+
return UERR(EINVAL, dev, "VM_BIND is enabled");
465+
448466
/* Only supported if per-process address space is supported: */
449467
if (priv->gpu->vm == vm)
450468
return UERR(EOPNOTSUPP, dev, "requires per-process pgtables");

drivers/gpu/drm/msm/msm_gem.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,14 @@ static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
8181
if (!ctx->vm)
8282
return;
8383

84+
/*
85+
* VM_BIND does not depend on implicit teardown of VMAs on handle
86+
* close, but instead on implicit teardown of the VM when the device
87+
* is closed (see msm_gem_vm_close())
88+
*/
89+
if (msm_context_is_vmbind(ctx))
90+
return;
91+
8492
/*
8593
* TODO we might need to kick this to a queue to avoid blocking
8694
* in CLOSE ioctl

drivers/gpu/drm/msm/msm_gpu.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -829,7 +829,8 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
829829

830830
/* Return a new address space for a msm_drm_private instance */
831831
struct drm_gpuvm *
832-
msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task)
832+
msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
833+
bool kernel_managed)
833834
{
834835
struct drm_gpuvm *vm = NULL;
835836

@@ -841,7 +842,7 @@ msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task)
841842
* the global one
842843
*/
843844
if (gpu->funcs->create_private_vm) {
844-
vm = gpu->funcs->create_private_vm(gpu);
845+
vm = gpu->funcs->create_private_vm(gpu, kernel_managed);
845846
if (!IS_ERR(vm))
846847
to_msm_vm(vm)->pid = get_pid(task_pid(task));
847848
}

drivers/gpu/drm/msm/msm_gpu.h

Lines changed: 27 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ struct msm_gpu_funcs {
7979
void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp,
8080
bool suspended);
8181
struct drm_gpuvm *(*create_vm)(struct msm_gpu *gpu, struct platform_device *pdev);
82-
struct drm_gpuvm *(*create_private_vm)(struct msm_gpu *gpu);
82+
struct drm_gpuvm *(*create_private_vm)(struct msm_gpu *gpu, bool kernel_managed);
8383
uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
8484

8585
/**
@@ -364,6 +364,14 @@ struct msm_context {
364364
*/
365365
bool closed;
366366

367+
/**
368+
* @userspace_managed_vm:
369+
*
370+
* Has userspace opted-in to userspace managed VM (ie. VM_BIND) via
371+
* MSM_PARAM_EN_VM_BIND?
372+
*/
373+
bool userspace_managed_vm;
374+
367375
/**
368376
* @vm:
369377
*
@@ -456,6 +464,22 @@ struct msm_context {
456464

457465
struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx);
458466

467+
/**
468+
* msm_context_is_vm_bind() - has userspace opted in to VM_BIND?
469+
*
470+
* @ctx: the drm_file context
471+
*
472+
* See MSM_PARAM_EN_VM_BIND. If userspace is managing the VM, it can
473+
* do sparse binding including having multiple, potentially partial,
474+
* mappings in the VM. Therefore certain legacy uabi (ie. GET_IOVA,
475+
* SET_IOVA) are rejected because they don't have a sensible meaning.
476+
*/
477+
static inline bool
478+
msm_context_is_vmbind(struct msm_context *ctx)
479+
{
480+
return ctx->userspace_managed_vm;
481+
}
482+
459483
/**
460484
* msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
461485
*
@@ -683,7 +707,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
683707
const char *name, struct msm_gpu_config *config);
684708

685709
struct drm_gpuvm *
686-
msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task);
710+
msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
711+
bool kernel_managed);
687712

688713
void msm_gpu_cleanup(struct msm_gpu *gpu);
689714

include/uapi/drm/msm_drm.h

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,30 @@ struct drm_msm_timespec {
9393
#define MSM_PARAM_UCHE_TRAP_BASE 0x14 /* RO */
9494
/* PRR (Partially Resident Region) is required for sparse residency: */
9595
#define MSM_PARAM_HAS_PRR 0x15 /* RO */
96+
/* MSM_PARAM_EN_VM_BIND is set to 1 to enable VM_BIND ops.
97+
*
98+
* With VM_BIND enabled, userspace is required to allocate iova and use the
99+
* VM_BIND ops for map/unmap ioctls. MSM_INFO_SET_IOVA and MSM_INFO_GET_IOVA
100+
* will be rejected. (The latter does not have a sensible meaning when a BO
101+
* can have multiple and/or partial mappings.)
102+
*
103+
* With VM_BIND enabled, userspace does not include a submit_bo table in the
104+
* SUBMIT ioctl (this will be rejected), the resident set is determined by
105+
* the the VM_BIND ops.
106+
*
107+
* Enabling VM_BIND will fail on devices which do not have per-process pgtables.
108+
* And it is not allowed to disable VM_BIND once it has been enabled.
109+
*
110+
* Enabling VM_BIND should be done (attempted) prior to allocating any BOs or
111+
* submitqueues of type MSM_SUBMITQUEUE_VM_BIND.
112+
*
113+
* Relatedly, when VM_BIND mode is enabled, the kernel will not try to recover
114+
* from GPU faults or failed async VM_BIND ops, in particular because it is
115+
* difficult to communicate to userspace which op failed so that userspace
116+
* could rewind and try again. When the VM is marked unusable, the SUBMIT
117+
* ioctl will throw -EPIPE.
118+
*/
119+
#define MSM_PARAM_EN_VM_BIND 0x16 /* WO, once */
96120

97121
/* For backwards compat. The original support for preemption was based on
98122
* a single ring per priority level so # of priority levels equals the #

0 commit comments

Comments
 (0)