Skip to content

Commit

Permalink
drm/amdkfd: handle CPU fault on COW mapping
Browse files Browse the repository at this point in the history
If CPU page fault in a page with zone_device_data svm_bo from another
process, that means it is COW mapping in the child process and the
range is migrated to VRAM by parent process. Migrate the parent
process range back to system memory to recover the CPU page fault.

Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
  • Loading branch information
PhilipYangA authored and alexdeucher committed Sep 13, 2022
1 parent 7a3f8b7 commit e1f84ee
Showing 1 changed file with 29 additions and 13 deletions.
42 changes: 29 additions & 13 deletions drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
Original file line number Diff line number Diff line change
Expand Up @@ -886,37 +886,50 @@ svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
{
unsigned long addr = vmf->address;
struct vm_area_struct *vma;
struct svm_range_bo *svm_bo;
enum svm_work_list_ops op;
struct svm_range *parent;
struct svm_range *prange;
struct kfd_process *p;
struct mm_struct *mm;
int r = 0;

vma = vmf->vma;
mm = vma->vm_mm;
svm_bo = vmf->page->zone_device_data;
if (!svm_bo) {
pr_debug("failed get device page at addr 0x%lx\n", addr);
return VM_FAULT_SIGBUS;
}
if (!mmget_not_zero(svm_bo->eviction_fence->mm)) {
pr_debug("addr 0x%lx of process mm is detroyed\n", addr);
return VM_FAULT_SIGBUS;
}

mm = svm_bo->eviction_fence->mm;
if (mm != vmf->vma->vm_mm)
pr_debug("addr 0x%lx is COW mapping in child process\n", addr);

p = kfd_lookup_process_by_mm(vma->vm_mm);
p = kfd_lookup_process_by_mm(mm);
if (!p) {
pr_debug("failed find process at fault address 0x%lx\n", addr);
return VM_FAULT_SIGBUS;
r = VM_FAULT_SIGBUS;
goto out_mmput;
}
if (READ_ONCE(p->svms.faulting_task) == current) {
pr_debug("skipping ram migration\n");
kfd_unref_process(p);
return 0;
r = 0;
goto out_unref_process;
}
addr >>= PAGE_SHIFT;

pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);
addr >>= PAGE_SHIFT;

mutex_lock(&p->svms.lock);

prange = svm_range_from_addr(&p->svms, addr, &parent);
if (!prange) {
pr_debug("cannot find svm range at 0x%lx\n", addr);
pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr);
r = -EFAULT;
goto out;
goto out_unlock_svms;
}

mutex_lock(&parent->migrate_mutex);
Expand All @@ -940,8 +953,8 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)

r = svm_migrate_vram_to_ram(prange, mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU);
if (r)
pr_debug("failed %d migrate 0x%p [0x%lx 0x%lx] to ram\n", r,
prange, prange->start, prange->last);
pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
r, prange->svms, prange, prange->start, prange->last);

/* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
if (p->xnack_enabled && parent == prange)
Expand All @@ -955,9 +968,12 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
if (prange != parent)
mutex_unlock(&prange->migrate_mutex);
mutex_unlock(&parent->migrate_mutex);
out:
out_unlock_svms:
mutex_unlock(&p->svms.lock);
out_unref_process:
kfd_unref_process(p);
out_mmput:
mmput(mm);

pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);

Expand Down

0 comments on commit e1f84ee

Please sign in to comment.