Skip to content

Commit

Permalink
vfio/type1: Use follow_pte()
Browse files Browse the repository at this point in the history
commit 07956b6 upstream.

follow_pfn() doesn't make sure that we're using the correct page
protections, get the pte with follow_pte() so that we can test
protections and get the pfn from the pte.

Fixes: 5cbf326 ("vfio/type1: Fix VA->PA translation for PFNMAP VMAs in vaddr_get_pfn()")
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
awilliam authored and gregkh committed Mar 7, 2021
1 parent 0697f12 commit 2decd71
Showing 1 changed file with 13 additions and 2 deletions.
15 changes: 13 additions & 2 deletions drivers/vfio/vfio_iommu_type1.c
Expand Up @@ -24,6 +24,7 @@
#include <linux/compat.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mm.h>
Expand Down Expand Up @@ -431,9 +432,11 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
unsigned long vaddr, unsigned long *pfn,
bool write_fault)
{
pte_t *ptep;
spinlock_t *ptl;
int ret;

ret = follow_pfn(vma, vaddr, pfn);
ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
if (ret) {
bool unlocked = false;

Expand All @@ -447,9 +450,17 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
if (ret)
return ret;

ret = follow_pfn(vma, vaddr, pfn);
ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
if (ret)
return ret;
}

if (write_fault && !pte_write(*ptep))
ret = -EFAULT;
else
*pfn = pte_pfn(*ptep);

pte_unmap_unlock(ptep, ptl);
return ret;
}

Expand Down

0 comments on commit 2decd71

Please sign in to comment.