Skip to content

Commit 000a45d

Browse files
bbrezillonmbrost05
authored andcommitted
drm/gpuvm: Pass map arguments through a struct
We are about to pass more arguments to drm_gpuvm_sm_map[_ops_create](), so, before we do that, let's pass arguments through a struct instead of changing each call site every time a new optional argument is added. Cc: Danilo Krummrich <dakr@kernel.org> Cc: Brendan King <Brendan.King@imgtec.com> Cc: Matt Coster <matt.coster@imgtec.com> Cc: Boris Brezillon <bbrezillon@kernel.org> Cc: Caterina Shablia <caterina.shablia@collabora.com> Cc: Rob Clark <robin.clark@oss.qualcomm.com> Cc: Matthew Brost <matthew.brost@intel.com> Cc: <dri-devel@lists.freedesktop.org> Co-developed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com> Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com> Acked-by: Danilo Krummrich <dakr@kernel.org> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Rob Clark <robin.clark@oss.qualcomm.com> Reviewed-by: Matt Coster <matt.coster@imgtec.com> # imagination/pvr_vm.c Acked-by: Matt Coster <matt.coster@imgtec.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Link: https://lore.kernel.org/r/20250819162058.2777306-2-himal.prasad.ghimiray@intel.com
1 parent f1f2a22 commit 000a45d

File tree

7 files changed

+102
-71
lines changed

7 files changed

+102
-71
lines changed

drivers/gpu/drm/drm_gpuvm.c

Lines changed: 31 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -486,13 +486,18 @@
486486
* u64 addr, u64 range,
487487
* struct drm_gem_object *obj, u64 offset)
488488
* {
489+
* struct drm_gpuvm_map_req map_req = {
490+
* .map.va.addr = addr,
491+
* .map.va.range = range,
492+
* .map.gem.obj = obj,
493+
* .map.gem.offset = offset,
494+
* };
489495
* struct drm_gpuva_ops *ops;
490496
* struct drm_gpuva_op *op
491497
* struct drm_gpuvm_bo *vm_bo;
492498
*
493499
* driver_lock_va_space();
494-
* ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range,
495-
* obj, offset);
500+
* ops = drm_gpuvm_sm_map_ops_create(gpuvm, &map_req);
496501
* if (IS_ERR(ops))
497502
* return PTR_ERR(ops);
498503
*
@@ -2054,16 +2059,15 @@ EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
20542059

20552060
static int
20562061
op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
2057-
u64 addr, u64 range,
2058-
struct drm_gem_object *obj, u64 offset)
2062+
const struct drm_gpuvm_map_req *req)
20592063
{
20602064
struct drm_gpuva_op op = {};
20612065

20622066
op.op = DRM_GPUVA_OP_MAP;
2063-
op.map.va.addr = addr;
2064-
op.map.va.range = range;
2065-
op.map.gem.obj = obj;
2066-
op.map.gem.offset = offset;
2067+
op.map.va.addr = req->map.va.addr;
2068+
op.map.va.range = req->map.va.range;
2069+
op.map.gem.obj = req->map.gem.obj;
2070+
op.map.gem.offset = req->map.gem.offset;
20672071

20682072
return fn->sm_step_map(&op, priv);
20692073
}
@@ -2102,10 +2106,14 @@ op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
21022106
static int
21032107
__drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
21042108
const struct drm_gpuvm_ops *ops, void *priv,
2105-
u64 req_addr, u64 req_range,
2106-
struct drm_gem_object *req_obj, u64 req_offset)
2109+
const struct drm_gpuvm_map_req *req)
21072110
{
2111+
struct drm_gem_object *req_obj = req->map.gem.obj;
21082112
struct drm_gpuva *va, *next;
2113+
2114+
u64 req_offset = req->map.gem.offset;
2115+
u64 req_range = req->map.va.range;
2116+
u64 req_addr = req->map.va.addr;
21092117
u64 req_end = req_addr + req_range;
21102118
int ret;
21112119

@@ -2236,9 +2244,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
22362244
}
22372245
}
22382246

2239-
return op_map_cb(ops, priv,
2240-
req_addr, req_range,
2241-
req_obj, req_offset);
2247+
return op_map_cb(ops, priv, req);
22422248
}
22432249

22442250
static int
@@ -2303,10 +2309,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
23032309
* drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
23042310
* @gpuvm: the &drm_gpuvm representing the GPU VA space
23052311
* @priv: pointer to a driver private data structure
2306-
* @req_addr: the start address of the new mapping
2307-
* @req_range: the range of the new mapping
2308-
* @req_obj: the &drm_gem_object to map
2309-
* @req_offset: the offset within the &drm_gem_object
2312+
* @req: ptr to struct drm_gpuvm_map_req
23102313
*
23112314
* This function iterates the given range of the GPU VA space. It utilizes the
23122315
* &drm_gpuvm_ops to call back into the driver providing the split and merge
@@ -2333,8 +2336,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
23332336
*/
23342337
int
23352338
drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
2336-
u64 req_addr, u64 req_range,
2337-
struct drm_gem_object *req_obj, u64 req_offset)
2339+
const struct drm_gpuvm_map_req *req)
23382340
{
23392341
const struct drm_gpuvm_ops *ops = gpuvm->ops;
23402342

@@ -2343,9 +2345,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
23432345
ops->sm_step_unmap)))
23442346
return -EINVAL;
23452347

2346-
return __drm_gpuvm_sm_map(gpuvm, ops, priv,
2347-
req_addr, req_range,
2348-
req_obj, req_offset);
2348+
return __drm_gpuvm_sm_map(gpuvm, ops, priv, req);
23492349
}
23502350
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
23512351

@@ -2421,10 +2421,7 @@ static const struct drm_gpuvm_ops lock_ops = {
24212421
* @gpuvm: the &drm_gpuvm representing the GPU VA space
24222422
* @exec: the &drm_exec locking context
24232423
* @num_fences: for newly mapped objects, the # of fences to reserve
2424-
* @req_addr: the start address of the range to unmap
2425-
* @req_range: the range of the mappings to unmap
2426-
* @req_obj: the &drm_gem_object to map
2427-
* @req_offset: the offset within the &drm_gem_object
2424+
* @req: ptr to drm_gpuvm_map_req struct
24282425
*
24292426
* This function locks (drm_exec_lock_obj()) objects that will be unmapped/
24302427
* remapped, and locks+prepares (drm_exec_prepare_object()) objects that
@@ -2445,9 +2442,7 @@ static const struct drm_gpuvm_ops lock_ops = {
24452442
* ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
24462443
* break;
24472444
* case DRIVER_OP_MAP:
2448-
* ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences,
2449-
* op->addr, op->range,
2450-
* obj, op->obj_offset);
2445+
* ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, &req);
24512446
* break;
24522447
* }
24532448
*
@@ -2478,18 +2473,17 @@ static const struct drm_gpuvm_ops lock_ops = {
24782473
int
24792474
drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
24802475
struct drm_exec *exec, unsigned int num_fences,
2481-
u64 req_addr, u64 req_range,
2482-
struct drm_gem_object *req_obj, u64 req_offset)
2476+
struct drm_gpuvm_map_req *req)
24832477
{
2478+
struct drm_gem_object *req_obj = req->map.gem.obj;
2479+
24842480
if (req_obj) {
24852481
int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
24862482
if (ret)
24872483
return ret;
24882484
}
24892485

2490-
return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec,
2491-
req_addr, req_range,
2492-
req_obj, req_offset);
2486+
return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, req);
24932487

24942488
}
24952489
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
@@ -2611,10 +2605,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
26112605
/**
26122606
* drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
26132607
* @gpuvm: the &drm_gpuvm representing the GPU VA space
2614-
* @req_addr: the start address of the new mapping
2615-
* @req_range: the range of the new mapping
2616-
* @req_obj: the &drm_gem_object to map
2617-
* @req_offset: the offset within the &drm_gem_object
2608+
* @req: map request arguments
26182609
*
26192610
* This function creates a list of operations to perform splitting and merging
26202611
* of existent mapping(s) with the newly requested one.
@@ -2642,8 +2633,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
26422633
*/
26432634
struct drm_gpuva_ops *
26442635
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
2645-
u64 req_addr, u64 req_range,
2646-
struct drm_gem_object *req_obj, u64 req_offset)
2636+
const struct drm_gpuvm_map_req *req)
26472637
{
26482638
struct drm_gpuva_ops *ops;
26492639
struct {
@@ -2661,9 +2651,7 @@ drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
26612651
args.vm = gpuvm;
26622652
args.ops = ops;
26632653

2664-
ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args,
2665-
req_addr, req_range,
2666-
req_obj, req_offset);
2654+
ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req);
26672655
if (ret)
26682656
goto err_free_ops;
26692657

drivers/gpu/drm/imagination/pvr_vm.c

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -185,12 +185,17 @@ struct pvr_vm_bind_op {
185185
static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
186186
{
187187
switch (bind_op->type) {
188-
case PVR_VM_BIND_TYPE_MAP:
188+
case PVR_VM_BIND_TYPE_MAP: {
189+
const struct drm_gpuvm_map_req map_req = {
190+
.map.va.addr = bind_op->device_addr,
191+
.map.va.range = bind_op->size,
192+
.map.gem.obj = gem_from_pvr_gem(bind_op->pvr_obj),
193+
.map.gem.offset = bind_op->offset,
194+
};
195+
189196
return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
190-
bind_op, bind_op->device_addr,
191-
bind_op->size,
192-
gem_from_pvr_gem(bind_op->pvr_obj),
193-
bind_op->offset);
197+
bind_op, &map_req);
198+
}
194199

195200
case PVR_VM_BIND_TYPE_UNMAP:
196201
return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,

drivers/gpu/drm/msm/msm_gem_vma.c

Lines changed: 20 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1171,11 +1171,17 @@ vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec)
11711171
op->obj_offset);
11721172
break;
11731173
case MSM_VM_BIND_OP_MAP:
1174-
case MSM_VM_BIND_OP_MAP_NULL:
1175-
ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1,
1176-
op->iova, op->range,
1177-
op->obj, op->obj_offset);
1174+
case MSM_VM_BIND_OP_MAP_NULL: {
1175+
struct drm_gpuvm_map_req map_req = {
1176+
.map.va.addr = op->iova,
1177+
.map.va.range = op->range,
1178+
.map.gem.obj = op->obj,
1179+
.map.gem.offset = op->obj_offset,
1180+
};
1181+
1182+
ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req);
11781183
break;
1184+
}
11791185
default:
11801186
/*
11811187
* lookup_op() should have already thrown an error for
@@ -1282,10 +1288,17 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job)
12821288
if (op->flags & MSM_VM_BIND_OP_DUMP)
12831289
arg.flags |= MSM_VMA_DUMP;
12841290
fallthrough;
1285-
case MSM_VM_BIND_OP_MAP_NULL:
1286-
ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova,
1287-
op->range, op->obj, op->obj_offset);
1291+
case MSM_VM_BIND_OP_MAP_NULL: {
1292+
struct drm_gpuvm_map_req map_req = {
1293+
.map.va.addr = op->iova,
1294+
.map.va.range = op->range,
1295+
.map.gem.obj = op->obj,
1296+
.map.gem.offset = op->obj_offset,
1297+
};
1298+
1299+
ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req);
12881300
break;
1301+
}
12891302
default:
12901303
/*
12911304
* lookup_op() should have already thrown an error for

drivers/gpu/drm/nouveau/nouveau_uvmm.c

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1276,6 +1276,12 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
12761276
break;
12771277
case OP_MAP: {
12781278
struct nouveau_uvma_region *reg;
1279+
struct drm_gpuvm_map_req map_req = {
1280+
.map.va.addr = op->va.addr,
1281+
.map.va.range = op->va.range,
1282+
.map.gem.obj = op->gem.obj,
1283+
.map.gem.offset = op->gem.offset,
1284+
};
12791285

12801286
reg = nouveau_uvma_region_find_first(uvmm,
12811287
op->va.addr,
@@ -1301,10 +1307,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
13011307
}
13021308

13031309
op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base,
1304-
op->va.addr,
1305-
op->va.range,
1306-
op->gem.obj,
1307-
op->gem.offset);
1310+
&map_req);
13081311
if (IS_ERR(op->ops)) {
13091312
ret = PTR_ERR(op->ops);
13101313
goto unwind_continue;

drivers/gpu/drm/panthor/panthor_mmu.c

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2202,15 +2202,22 @@ panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
22022202
mutex_lock(&vm->op_lock);
22032203
vm->op_ctx = op;
22042204
switch (op_type) {
2205-
case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
2205+
case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: {
2206+
const struct drm_gpuvm_map_req map_req = {
2207+
.map.va.addr = op->va.addr,
2208+
.map.va.range = op->va.range,
2209+
.map.gem.obj = op->map.vm_bo->obj,
2210+
.map.gem.offset = op->map.bo_offset,
2211+
};
2212+
22062213
if (vm->unusable) {
22072214
ret = -EINVAL;
22082215
break;
22092216
}
22102217

2211-
ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range,
2212-
op->map.vm_bo->obj, op->map.bo_offset);
2218+
ret = drm_gpuvm_sm_map(&vm->base, vm, &map_req);
22132219
break;
2220+
}
22142221

22152222
case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
22162223
ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);

drivers/gpu/drm/xe/xe_vm.c

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2316,10 +2316,17 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
23162316

23172317
switch (operation) {
23182318
case DRM_XE_VM_BIND_OP_MAP:
2319-
case DRM_XE_VM_BIND_OP_MAP_USERPTR:
2320-
ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2321-
obj, bo_offset_or_userptr);
2319+
case DRM_XE_VM_BIND_OP_MAP_USERPTR: {
2320+
struct drm_gpuvm_map_req map_req = {
2321+
.map.va.addr = addr,
2322+
.map.va.range = range,
2323+
.map.gem.obj = obj,
2324+
.map.gem.offset = bo_offset_or_userptr,
2325+
};
2326+
2327+
ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, &map_req);
23222328
break;
2329+
}
23232330
case DRM_XE_VM_BIND_OP_UNMAP:
23242331
ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
23252332
break;

include/drm/drm_gpuvm.h

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1058,10 +1058,20 @@ struct drm_gpuva_ops {
10581058
*/
10591059
#define drm_gpuva_next_op(op) list_next_entry(op, entry)
10601060

1061+
/**
1062+
* struct drm_gpuvm_map_req - arguments passed to drm_gpuvm_sm_map[_ops_create]()
1063+
*/
1064+
struct drm_gpuvm_map_req {
1065+
/**
1066+
* @op_map: struct drm_gpuva_op_map
1067+
*/
1068+
struct drm_gpuva_op_map map;
1069+
};
1070+
10611071
struct drm_gpuva_ops *
10621072
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
1063-
u64 addr, u64 range,
1064-
struct drm_gem_object *obj, u64 offset);
1073+
const struct drm_gpuvm_map_req *req);
1074+
10651075
struct drm_gpuva_ops *
10661076
drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
10671077
u64 addr, u64 range);
@@ -1205,16 +1215,14 @@ struct drm_gpuvm_ops {
12051215
};
12061216

12071217
int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
1208-
u64 addr, u64 range,
1209-
struct drm_gem_object *obj, u64 offset);
1218+
const struct drm_gpuvm_map_req *req);
12101219

12111220
int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
12121221
u64 addr, u64 range);
12131222

12141223
int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
12151224
struct drm_exec *exec, unsigned int num_fences,
1216-
u64 req_addr, u64 req_range,
1217-
struct drm_gem_object *obj, u64 offset);
1225+
struct drm_gpuvm_map_req *req);
12181226

12191227
int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
12201228
u64 req_addr, u64 req_range);

0 commit comments

Comments
 (0)