Skip to content

Commit dd25b99

Browse files
committed
drm/xe/vm: split userptr bits into separate file
This will simplify compiling out the bits that depend on DRM_GPUSVM in a later patch. Without this we end up littering the code with ifdef checks, plus it becomes hard to be sure that something won't blow at runtime due to something not being initialised, even though it passed the build. Should be no functional change here. Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Link: https://lore.kernel.org/r/20250828142430.615826-16-matthew.auld@intel.com
1 parent 83f706e commit dd25b99

File tree

7 files changed

+411
-348
lines changed

7 files changed

+411
-348
lines changed

drivers/gpu/drm/xe/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -130,6 +130,7 @@ xe-y += xe_bb.o \
130130
xe_tuning.o \
131131
xe_uc.o \
132132
xe_uc_fw.o \
133+
xe_userptr.o \
133134
xe_vm.o \
134135
xe_vm_madvise.o \
135136
xe_vram.o \

drivers/gpu/drm/xe/xe_pt.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
#include "xe_tlb_inval_job.h"
2525
#include "xe_trace.h"
2626
#include "xe_ttm_stolen_mgr.h"
27+
#include "xe_userptr.h"
2728
#include "xe_vm.h"
2829

2930
struct xe_pt_dir {

drivers/gpu/drm/xe/xe_userptr.c

Lines changed: 305 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,305 @@
1+
// SPDX-License-Identifier: MIT
2+
/*
3+
* Copyright © 2025 Intel Corporation
4+
*/
5+
6+
#include "xe_userptr.h"
7+
8+
#include <linux/mm.h>
9+
10+
#include "xe_hmm.h"
11+
#include "xe_trace_bo.h"
12+
13+
/**
14+
* xe_vma_userptr_check_repin() - Advisory check for repin needed
15+
* @uvma: The userptr vma
16+
*
17+
* Check if the userptr vma has been invalidated since last successful
18+
* repin. The check is advisory only and can the function can be called
19+
* without the vm->svm.gpusvm.notifier_lock held. There is no guarantee that the
20+
* vma userptr will remain valid after a lockless check, so typically
21+
* the call needs to be followed by a proper check under the notifier_lock.
22+
*
23+
* Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
24+
*/
25+
int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
26+
{
27+
return mmu_interval_check_retry(&uvma->userptr.notifier,
28+
uvma->userptr.notifier_seq) ?
29+
-EAGAIN : 0;
30+
}
31+
32+
/**
33+
* __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
34+
* that need repinning.
35+
* @vm: The VM.
36+
*
37+
* This function checks for whether the VM has userptrs that need repinning,
38+
* and provides a release-type barrier on the userptr.notifier_lock after
39+
* checking.
40+
*
41+
* Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
42+
*/
43+
int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
44+
{
45+
lockdep_assert_held_read(&vm->userptr.notifier_lock);
46+
47+
return (list_empty(&vm->userptr.repin_list) &&
48+
list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
49+
}
50+
51+
int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
52+
{
53+
struct xe_vma *vma = &uvma->vma;
54+
struct xe_vm *vm = xe_vma_vm(vma);
55+
struct xe_device *xe = vm->xe;
56+
57+
lockdep_assert_held(&vm->lock);
58+
xe_assert(xe, xe_vma_is_userptr(vma));
59+
60+
return xe_hmm_userptr_populate_range(uvma, false);
61+
}
62+
63+
static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma)
64+
{
65+
struct xe_userptr *userptr = &uvma->userptr;
66+
struct xe_vma *vma = &uvma->vma;
67+
struct dma_resv_iter cursor;
68+
struct dma_fence *fence;
69+
long err;
70+
71+
/*
72+
* Tell exec and rebind worker they need to repin and rebind this
73+
* userptr.
74+
*/
75+
if (!xe_vm_in_fault_mode(vm) &&
76+
!(vma->gpuva.flags & XE_VMA_DESTROYED)) {
77+
spin_lock(&vm->userptr.invalidated_lock);
78+
list_move_tail(&userptr->invalidate_link,
79+
&vm->userptr.invalidated);
80+
spin_unlock(&vm->userptr.invalidated_lock);
81+
}
82+
83+
/*
84+
* Preempt fences turn into schedule disables, pipeline these.
85+
* Note that even in fault mode, we need to wait for binds and
86+
* unbinds to complete, and those are attached as BOOKMARK fences
87+
* to the vm.
88+
*/
89+
dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
90+
DMA_RESV_USAGE_BOOKKEEP);
91+
dma_resv_for_each_fence_unlocked(&cursor, fence)
92+
dma_fence_enable_sw_signaling(fence);
93+
dma_resv_iter_end(&cursor);
94+
95+
err = dma_resv_wait_timeout(xe_vm_resv(vm),
96+
DMA_RESV_USAGE_BOOKKEEP,
97+
false, MAX_SCHEDULE_TIMEOUT);
98+
XE_WARN_ON(err <= 0);
99+
100+
if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) {
101+
err = xe_vm_invalidate_vma(vma);
102+
XE_WARN_ON(err);
103+
}
104+
105+
xe_hmm_userptr_unmap(uvma);
106+
}
107+
108+
static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
109+
const struct mmu_notifier_range *range,
110+
unsigned long cur_seq)
111+
{
112+
struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier);
113+
struct xe_vma *vma = &uvma->vma;
114+
struct xe_vm *vm = xe_vma_vm(vma);
115+
116+
xe_assert(vm->xe, xe_vma_is_userptr(vma));
117+
trace_xe_vma_userptr_invalidate(vma);
118+
119+
if (!mmu_notifier_range_blockable(range))
120+
return false;
121+
122+
vm_dbg(&xe_vma_vm(vma)->xe->drm,
123+
"NOTIFIER: addr=0x%016llx, range=0x%016llx",
124+
xe_vma_start(vma), xe_vma_size(vma));
125+
126+
down_write(&vm->userptr.notifier_lock);
127+
mmu_interval_set_seq(mni, cur_seq);
128+
129+
__vma_userptr_invalidate(vm, uvma);
130+
up_write(&vm->userptr.notifier_lock);
131+
trace_xe_vma_userptr_invalidate_complete(vma);
132+
133+
return true;
134+
}
135+
136+
static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
137+
.invalidate = vma_userptr_invalidate,
138+
};
139+
140+
#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
141+
/**
142+
* xe_vma_userptr_force_invalidate() - force invalidate a userptr
143+
* @uvma: The userptr vma to invalidate
144+
*
145+
* Perform a forced userptr invalidation for testing purposes.
146+
*/
147+
void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
148+
{
149+
struct xe_vm *vm = xe_vma_vm(&uvma->vma);
150+
151+
/* Protect against concurrent userptr pinning */
152+
lockdep_assert_held(&vm->lock);
153+
/* Protect against concurrent notifiers */
154+
lockdep_assert_held(&vm->userptr.notifier_lock);
155+
/*
156+
* Protect against concurrent instances of this function and
157+
* the critical exec sections
158+
*/
159+
xe_vm_assert_held(vm);
160+
161+
if (!mmu_interval_read_retry(&uvma->userptr.notifier,
162+
uvma->userptr.notifier_seq))
163+
uvma->userptr.notifier_seq -= 2;
164+
__vma_userptr_invalidate(vm, uvma);
165+
}
166+
#endif
167+
168+
int xe_vm_userptr_pin(struct xe_vm *vm)
169+
{
170+
struct xe_userptr_vma *uvma, *next;
171+
int err = 0;
172+
173+
xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
174+
lockdep_assert_held_write(&vm->lock);
175+
176+
/* Collect invalidated userptrs */
177+
spin_lock(&vm->userptr.invalidated_lock);
178+
xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
179+
list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
180+
userptr.invalidate_link) {
181+
list_del_init(&uvma->userptr.invalidate_link);
182+
list_add_tail(&uvma->userptr.repin_link,
183+
&vm->userptr.repin_list);
184+
}
185+
spin_unlock(&vm->userptr.invalidated_lock);
186+
187+
/* Pin and move to bind list */
188+
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
189+
userptr.repin_link) {
190+
err = xe_vma_userptr_pin_pages(uvma);
191+
if (err == -EFAULT) {
192+
list_del_init(&uvma->userptr.repin_link);
193+
/*
194+
* We might have already done the pin once already, but
195+
* then had to retry before the re-bind happened, due
196+
* some other condition in the caller, but in the
197+
* meantime the userptr got dinged by the notifier such
198+
* that we need to revalidate here, but this time we hit
199+
* the EFAULT. In such a case make sure we remove
200+
* ourselves from the rebind list to avoid going down in
201+
* flames.
202+
*/
203+
if (!list_empty(&uvma->vma.combined_links.rebind))
204+
list_del_init(&uvma->vma.combined_links.rebind);
205+
206+
/* Wait for pending binds */
207+
xe_vm_lock(vm, false);
208+
dma_resv_wait_timeout(xe_vm_resv(vm),
209+
DMA_RESV_USAGE_BOOKKEEP,
210+
false, MAX_SCHEDULE_TIMEOUT);
211+
212+
down_read(&vm->userptr.notifier_lock);
213+
err = xe_vm_invalidate_vma(&uvma->vma);
214+
up_read(&vm->userptr.notifier_lock);
215+
xe_vm_unlock(vm);
216+
if (err)
217+
break;
218+
} else {
219+
if (err)
220+
break;
221+
222+
list_del_init(&uvma->userptr.repin_link);
223+
list_move_tail(&uvma->vma.combined_links.rebind,
224+
&vm->rebind_list);
225+
}
226+
}
227+
228+
if (err) {
229+
down_write(&vm->userptr.notifier_lock);
230+
spin_lock(&vm->userptr.invalidated_lock);
231+
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
232+
userptr.repin_link) {
233+
list_del_init(&uvma->userptr.repin_link);
234+
list_move_tail(&uvma->userptr.invalidate_link,
235+
&vm->userptr.invalidated);
236+
}
237+
spin_unlock(&vm->userptr.invalidated_lock);
238+
up_write(&vm->userptr.notifier_lock);
239+
}
240+
return err;
241+
}
242+
243+
/**
244+
* xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
245+
* that need repinning.
246+
* @vm: The VM.
247+
*
248+
* This function does an advisory check for whether the VM has userptrs that
249+
* need repinning.
250+
*
251+
* Return: 0 if there are no indications of userptrs needing repinning,
252+
* -EAGAIN if there are.
253+
*/
254+
int xe_vm_userptr_check_repin(struct xe_vm *vm)
255+
{
256+
return (list_empty_careful(&vm->userptr.repin_list) &&
257+
list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
258+
}
259+
260+
int xe_userptr_setup(struct xe_userptr_vma *uvma, unsigned long start,
261+
unsigned long range)
262+
{
263+
struct xe_userptr *userptr = &uvma->userptr;
264+
int err;
265+
266+
INIT_LIST_HEAD(&userptr->invalidate_link);
267+
INIT_LIST_HEAD(&userptr->repin_link);
268+
mutex_init(&userptr->unmap_mutex);
269+
270+
err = mmu_interval_notifier_insert(&userptr->notifier, current->mm,
271+
start, range,
272+
&vma_userptr_notifier_ops);
273+
if (err)
274+
return err;
275+
276+
userptr->notifier_seq = LONG_MAX;
277+
278+
return 0;
279+
}
280+
281+
void xe_userptr_remove(struct xe_userptr_vma *uvma)
282+
{
283+
struct xe_userptr *userptr = &uvma->userptr;
284+
285+
if (userptr->sg)
286+
xe_hmm_userptr_free_sg(uvma);
287+
288+
/*
289+
* Since userptr pages are not pinned, we can't remove
290+
* the notifier until we're sure the GPU is not accessing
291+
* them anymore
292+
*/
293+
mmu_interval_notifier_remove(&userptr->notifier);
294+
mutex_destroy(&userptr->unmap_mutex);
295+
}
296+
297+
void xe_userptr_destroy(struct xe_userptr_vma *uvma)
298+
{
299+
struct xe_vm *vm = xe_vma_vm(&uvma->vma);
300+
301+
spin_lock(&vm->userptr.invalidated_lock);
302+
xe_assert(vm->xe, list_empty(&uvma->userptr.repin_link));
303+
list_del(&uvma->userptr.invalidate_link);
304+
spin_unlock(&vm->userptr.invalidated_lock);
305+
}

0 commit comments

Comments
 (0)