|
6 | 6 | #include <linux/firmware.h> |
7 | 7 | #include <linux/module.h> |
8 | 8 | #include <linux/pci.h> |
| 9 | +#include <linux/pm_runtime.h> |
9 | 10 |
|
10 | 11 | #include <drm/drm_accel.h> |
11 | 12 | #include <drm/drm_file.h> |
@@ -66,36 +67,36 @@ struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv) |
66 | 67 | return file_priv; |
67 | 68 | } |
68 | 69 |
|
69 | | -struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id) |
| 70 | +static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv) |
70 | 71 | { |
71 | | - struct ivpu_file_priv *file_priv; |
72 | | - |
73 | | - xa_lock_irq(&vdev->context_xa); |
74 | | - file_priv = xa_load(&vdev->context_xa, id); |
75 | | - /* file_priv may still be in context_xa during file_priv_release() */ |
76 | | - if (file_priv && !kref_get_unless_zero(&file_priv->ref)) |
77 | | - file_priv = NULL; |
78 | | - xa_unlock_irq(&vdev->context_xa); |
79 | | - |
80 | | - if (file_priv) |
81 | | - ivpu_dbg(vdev, KREF, "file_priv get by id: ctx %u refcount %u\n", |
82 | | - file_priv->ctx.id, kref_read(&file_priv->ref)); |
83 | | - |
84 | | - return file_priv; |
| 72 | + mutex_lock(&file_priv->lock); |
| 73 | + if (file_priv->bound) { |
| 74 | + ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id); |
| 75 | + |
| 76 | + ivpu_cmdq_release_all_locked(file_priv); |
| 77 | + ivpu_jsm_context_release(vdev, file_priv->ctx.id); |
| 78 | + ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx); |
| 79 | + ivpu_mmu_user_context_fini(vdev, &file_priv->ctx); |
| 80 | + file_priv->bound = false; |
| 81 | + drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id)); |
| 82 | + } |
| 83 | + mutex_unlock(&file_priv->lock); |
85 | 84 | } |
86 | 85 |
|
87 | 86 | static void file_priv_release(struct kref *ref) |
88 | 87 | { |
89 | 88 | struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref); |
90 | 89 | struct ivpu_device *vdev = file_priv->vdev; |
91 | 90 |
|
92 | | - ivpu_dbg(vdev, FILE, "file_priv release: ctx %u\n", file_priv->ctx.id); |
| 91 | + ivpu_dbg(vdev, FILE, "file_priv release: ctx %u bound %d\n", |
| 92 | + file_priv->ctx.id, (bool)file_priv->bound); |
| 93 | + |
| 94 | + pm_runtime_get_sync(vdev->drm.dev); |
| 95 | + mutex_lock(&vdev->context_list_lock); |
| 96 | + file_priv_unbind(vdev, file_priv); |
| 97 | + mutex_unlock(&vdev->context_list_lock); |
| 98 | + pm_runtime_put_autosuspend(vdev->drm.dev); |
93 | 99 |
|
94 | | - ivpu_cmdq_release_all(file_priv); |
95 | | - ivpu_jsm_context_release(vdev, file_priv->ctx.id); |
96 | | - ivpu_bo_remove_all_bos_from_context(vdev, &file_priv->ctx); |
97 | | - ivpu_mmu_user_context_fini(vdev, &file_priv->ctx); |
98 | | - drm_WARN_ON(&vdev->drm, xa_erase_irq(&vdev->context_xa, file_priv->ctx.id) != file_priv); |
99 | 100 | mutex_destroy(&file_priv->lock); |
100 | 101 | kfree(file_priv); |
101 | 102 | } |
@@ -232,49 +233,53 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file) |
232 | 233 | struct ivpu_device *vdev = to_ivpu_device(dev); |
233 | 234 | struct ivpu_file_priv *file_priv; |
234 | 235 | u32 ctx_id; |
235 | | - void *old; |
236 | | - int ret; |
| 236 | + int idx, ret; |
237 | 237 |
|
238 | | - ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, NULL, vdev->context_xa_limit, GFP_KERNEL); |
239 | | - if (ret) { |
240 | | - ivpu_err(vdev, "Failed to allocate context id: %d\n", ret); |
241 | | - return ret; |
242 | | - } |
| 238 | + if (!drm_dev_enter(dev, &idx)) |
| 239 | + return -ENODEV; |
243 | 240 |
|
244 | 241 | file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); |
245 | 242 | if (!file_priv) { |
246 | 243 | ret = -ENOMEM; |
247 | | - goto err_xa_erase; |
| 244 | + goto err_dev_exit; |
248 | 245 | } |
249 | 246 |
|
250 | 247 | file_priv->vdev = vdev; |
| 248 | + file_priv->bound = true; |
251 | 249 | kref_init(&file_priv->ref); |
252 | 250 | mutex_init(&file_priv->lock); |
253 | 251 |
|
| 252 | + mutex_lock(&vdev->context_list_lock); |
| 253 | + |
| 254 | + ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv, |
| 255 | + vdev->context_xa_limit, GFP_KERNEL); |
| 256 | + if (ret) { |
| 257 | + ivpu_err(vdev, "Failed to allocate context id: %d\n", ret); |
| 258 | + goto err_unlock; |
| 259 | + } |
| 260 | + |
254 | 261 | ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id); |
255 | 262 | if (ret) |
256 | | - goto err_mutex_destroy; |
| 263 | + goto err_xa_erase; |
257 | 264 |
|
258 | | - old = xa_store_irq(&vdev->context_xa, ctx_id, file_priv, GFP_KERNEL); |
259 | | - if (xa_is_err(old)) { |
260 | | - ret = xa_err(old); |
261 | | - ivpu_err(vdev, "Failed to store context %u: %d\n", ctx_id, ret); |
262 | | - goto err_ctx_fini; |
263 | | - } |
| 265 | + mutex_unlock(&vdev->context_list_lock); |
| 266 | + drm_dev_exit(idx); |
| 267 | + |
| 268 | + file->driver_priv = file_priv; |
264 | 269 |
|
265 | 270 | ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n", |
266 | 271 | ctx_id, current->comm, task_pid_nr(current)); |
267 | 272 |
|
268 | | - file->driver_priv = file_priv; |
269 | 273 | return 0; |
270 | 274 |
|
271 | | -err_ctx_fini: |
272 | | - ivpu_mmu_user_context_fini(vdev, &file_priv->ctx); |
273 | | -err_mutex_destroy: |
274 | | - mutex_destroy(&file_priv->lock); |
275 | | - kfree(file_priv); |
276 | 275 | err_xa_erase: |
277 | 276 | xa_erase_irq(&vdev->context_xa, ctx_id); |
| 277 | +err_unlock: |
| 278 | + mutex_unlock(&vdev->context_list_lock); |
| 279 | + mutex_destroy(&file_priv->lock); |
| 280 | + kfree(file_priv); |
| 281 | +err_dev_exit: |
| 282 | + drm_dev_exit(idx); |
278 | 283 | return ret; |
279 | 284 | } |
280 | 285 |
|
@@ -531,6 +536,10 @@ static int ivpu_dev_init(struct ivpu_device *vdev) |
531 | 536 | lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key); |
532 | 537 | INIT_LIST_HEAD(&vdev->bo_list); |
533 | 538 |
|
| 539 | + ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock); |
| 540 | + if (ret) |
| 541 | + goto err_xa_destroy; |
| 542 | + |
534 | 543 | ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock); |
535 | 544 | if (ret) |
536 | 545 | goto err_xa_destroy; |
@@ -602,14 +611,30 @@ static int ivpu_dev_init(struct ivpu_device *vdev) |
602 | 611 | return ret; |
603 | 612 | } |
604 | 613 |
|
| 614 | +static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev) |
| 615 | +{ |
| 616 | + struct ivpu_file_priv *file_priv; |
| 617 | + unsigned long ctx_id; |
| 618 | + |
| 619 | + mutex_lock(&vdev->context_list_lock); |
| 620 | + |
| 621 | + xa_for_each(&vdev->context_xa, ctx_id, file_priv) |
| 622 | + file_priv_unbind(vdev, file_priv); |
| 623 | + |
| 624 | + mutex_unlock(&vdev->context_list_lock); |
| 625 | +} |
| 626 | + |
605 | 627 | static void ivpu_dev_fini(struct ivpu_device *vdev) |
606 | 628 | { |
607 | 629 | ivpu_pm_disable(vdev); |
608 | 630 | ivpu_shutdown(vdev); |
609 | 631 | if (IVPU_WA(d3hot_after_power_off)) |
610 | 632 | pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot); |
| 633 | + |
| 634 | + ivpu_jobs_abort_all(vdev); |
611 | 635 | ivpu_job_done_consumer_fini(vdev); |
612 | 636 | ivpu_pm_cancel_recovery(vdev); |
| 637 | + ivpu_bo_unbind_all_user_contexts(vdev); |
613 | 638 |
|
614 | 639 | ivpu_ipc_fini(vdev); |
615 | 640 | ivpu_fw_fini(vdev); |
|
0 commit comments