486486 * u64 addr, u64 range,
487487 * struct drm_gem_object *obj, u64 offset)
488488 * {
489+ * struct drm_gpuvm_map_req map_req = {
490+ * .map.va.addr = addr,
491+ * .map.va.range = range,
492+ * .map.gem.obj = obj,
493+ * .map.gem.offset = offset,
494+ * };
489495 * struct drm_gpuva_ops *ops;
490496 * struct drm_gpuva_op *op
491497 * struct drm_gpuvm_bo *vm_bo;
492498 *
493499 * driver_lock_va_space();
494- * ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range,
495- * obj, offset);
500+ * ops = drm_gpuvm_sm_map_ops_create(gpuvm, &map_req);
496501 * if (IS_ERR(ops))
497502 * return PTR_ERR(ops);
498503 *
@@ -2054,16 +2059,15 @@ EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
20542059
20552060static int
20562061op_map_cb (const struct drm_gpuvm_ops * fn , void * priv ,
2057- u64 addr , u64 range ,
2058- struct drm_gem_object * obj , u64 offset )
2062+ const struct drm_gpuvm_map_req * req )
20592063{
20602064 struct drm_gpuva_op op = {};
20612065
20622066 op .op = DRM_GPUVA_OP_MAP ;
2063- op .map .va .addr = addr ;
2064- op .map .va .range = range ;
2065- op .map .gem .obj = obj ;
2066- op .map .gem .offset = offset ;
2067+ op .map .va .addr = req -> map . va . addr ;
2068+ op .map .va .range = req -> map . va . range ;
2069+ op .map .gem .obj = req -> map . gem . obj ;
2070+ op .map .gem .offset = req -> map . gem . offset ;
20672071
20682072 return fn -> sm_step_map (& op , priv );
20692073}
@@ -2102,10 +2106,14 @@ op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
21022106static int
21032107__drm_gpuvm_sm_map (struct drm_gpuvm * gpuvm ,
21042108 const struct drm_gpuvm_ops * ops , void * priv ,
2105- u64 req_addr , u64 req_range ,
2106- struct drm_gem_object * req_obj , u64 req_offset )
2109+ const struct drm_gpuvm_map_req * req )
21072110{
2111+ struct drm_gem_object * req_obj = req -> map .gem .obj ;
21082112 struct drm_gpuva * va , * next ;
2113+
2114+ u64 req_offset = req -> map .gem .offset ;
2115+ u64 req_range = req -> map .va .range ;
2116+ u64 req_addr = req -> map .va .addr ;
21092117 u64 req_end = req_addr + req_range ;
21102118 int ret ;
21112119
@@ -2236,9 +2244,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
22362244 }
22372245 }
22382246
2239- return op_map_cb (ops , priv ,
2240- req_addr , req_range ,
2241- req_obj , req_offset );
2247+ return op_map_cb (ops , priv , req );
22422248}
22432249
22442250static int
@@ -2303,10 +2309,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
23032309 * drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
23042310 * @gpuvm: the &drm_gpuvm representing the GPU VA space
23052311 * @priv: pointer to a driver private data structure
2306- * @req_addr: the start address of the new mapping
2307- * @req_range: the range of the new mapping
2308- * @req_obj: the &drm_gem_object to map
2309- * @req_offset: the offset within the &drm_gem_object
2312+ * @req: ptr to struct drm_gpuvm_map_req
23102313 *
23112314 * This function iterates the given range of the GPU VA space. It utilizes the
23122315 * &drm_gpuvm_ops to call back into the driver providing the split and merge
@@ -2333,8 +2336,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
23332336 */
23342337int
23352338drm_gpuvm_sm_map (struct drm_gpuvm * gpuvm , void * priv ,
2336- u64 req_addr , u64 req_range ,
2337- struct drm_gem_object * req_obj , u64 req_offset )
2339+ const struct drm_gpuvm_map_req * req )
23382340{
23392341 const struct drm_gpuvm_ops * ops = gpuvm -> ops ;
23402342
@@ -2343,9 +2345,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
23432345 ops -> sm_step_unmap )))
23442346 return - EINVAL ;
23452347
2346- return __drm_gpuvm_sm_map (gpuvm , ops , priv ,
2347- req_addr , req_range ,
2348- req_obj , req_offset );
2348+ return __drm_gpuvm_sm_map (gpuvm , ops , priv , req );
23492349}
23502350EXPORT_SYMBOL_GPL (drm_gpuvm_sm_map );
23512351
@@ -2421,10 +2421,7 @@ static const struct drm_gpuvm_ops lock_ops = {
24212421 * @gpuvm: the &drm_gpuvm representing the GPU VA space
24222422 * @exec: the &drm_exec locking context
24232423 * @num_fences: for newly mapped objects, the # of fences to reserve
2424- * @req_addr: the start address of the range to unmap
2425- * @req_range: the range of the mappings to unmap
2426- * @req_obj: the &drm_gem_object to map
2427- * @req_offset: the offset within the &drm_gem_object
2424+ * @req: ptr to drm_gpuvm_map_req struct
24282425 *
24292426 * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
24302427 * remapped, and locks+prepares (drm_exec_prepare_object()) objects that
@@ -2445,9 +2442,7 @@ static const struct drm_gpuvm_ops lock_ops = {
24452442 * ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
24462443 * break;
24472444 * case DRIVER_OP_MAP:
2448- * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences,
2449- * op->addr, op->range,
2450- * obj, op->obj_offset);
2445+ * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, &req);
24512446 * break;
24522447 * }
24532448 *
@@ -2478,18 +2473,17 @@ static const struct drm_gpuvm_ops lock_ops = {
24782473int
24792474drm_gpuvm_sm_map_exec_lock (struct drm_gpuvm * gpuvm ,
24802475 struct drm_exec * exec , unsigned int num_fences ,
2481- u64 req_addr , u64 req_range ,
2482- struct drm_gem_object * req_obj , u64 req_offset )
2476+ struct drm_gpuvm_map_req * req )
24832477{
2478+ struct drm_gem_object * req_obj = req -> map .gem .obj ;
2479+
24842480 if (req_obj ) {
24852481 int ret = drm_exec_prepare_obj (exec , req_obj , num_fences );
24862482 if (ret )
24872483 return ret ;
24882484 }
24892485
2490- return __drm_gpuvm_sm_map (gpuvm , & lock_ops , exec ,
2491- req_addr , req_range ,
2492- req_obj , req_offset );
2486+ return __drm_gpuvm_sm_map (gpuvm , & lock_ops , exec , req );
24932487
24942488}
24952489EXPORT_SYMBOL_GPL (drm_gpuvm_sm_map_exec_lock );
@@ -2611,10 +2605,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
26112605/**
26122606 * drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
26132607 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2614- * @req_addr: the start address of the new mapping
2615- * @req_range: the range of the new mapping
2616- * @req_obj: the &drm_gem_object to map
2617- * @req_offset: the offset within the &drm_gem_object
2608+ * @req: map request arguments
26182609 *
26192610 * This function creates a list of operations to perform splitting and merging
26202611 * of existent mapping(s) with the newly requested one.
@@ -2642,8 +2633,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
26422633 */
26432634struct drm_gpuva_ops *
26442635drm_gpuvm_sm_map_ops_create (struct drm_gpuvm * gpuvm ,
2645- u64 req_addr , u64 req_range ,
2646- struct drm_gem_object * req_obj , u64 req_offset )
2636+ const struct drm_gpuvm_map_req * req )
26472637{
26482638 struct drm_gpuva_ops * ops ;
26492639 struct {
@@ -2661,9 +2651,7 @@ drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
26612651 args .vm = gpuvm ;
26622652 args .ops = ops ;
26632653
2664- ret = __drm_gpuvm_sm_map (gpuvm , & gpuvm_list_ops , & args ,
2665- req_addr , req_range ,
2666- req_obj , req_offset );
2654+ ret = __drm_gpuvm_sm_map (gpuvm , & gpuvm_list_ops , & args , req );
26672655 if (ret )
26682656 goto err_free_ops ;
26692657
0 commit comments