Skip to content

Commit

Permalink
KVM: pfncache: include page offset in uhva and use it consistently
Browse files Browse the repository at this point in the history
Currently the pfncache page offset is sometimes determined using the gpa
and sometimes the khva, whilst the uhva is always page-aligned. After a
subsequent patch is applied the gpa will not always be valid so adjust
the code to include the page offset in the uhva and use it consistently
as the source of truth.

Also, where a page-aligned address is required, use PAGE_ALIGN_DOWN()
for clarity.

No functional change intended.

Signed-off-by: Paul Durrant <pdurrant@amazon.com>
Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
Link: https://lore.kernel.org/r/20240215152916.1158-8-paul@xen.org
Signed-off-by: Sean Christopherson <seanjc@google.com>
  • Loading branch information
Paul Durrant authored and sean-jc committed Feb 20, 2024
1 parent 53e63e9 commit 406c109
Showing 1 changed file with 21 additions and 8 deletions.
29 changes: 21 additions & 8 deletions virt/kvm/pfncache.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,10 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
if (!gpc->active)
return false;

if (offset_in_page(gpc->gpa) + len > PAGE_SIZE)
if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva))
return false;

if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva))
if (offset_in_page(gpc->uhva) + len > PAGE_SIZE)
return false;

if (!gpc->valid)
Expand Down Expand Up @@ -119,7 +119,7 @@ static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_s
static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
{
/* Note, the new page offset may be different than the old! */
void *old_khva = gpc->khva - offset_in_page(gpc->khva);
void *old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
void *new_khva = NULL;
unsigned long mmu_seq;
Expand Down Expand Up @@ -192,7 +192,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)

gpc->valid = true;
gpc->pfn = new_pfn;
gpc->khva = new_khva + offset_in_page(gpc->gpa);
gpc->khva = new_khva + offset_in_page(gpc->uhva);

/*
* Put the reference to the _new_ pfn. The pfn is now tracked by the
Expand All @@ -217,6 +217,7 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
bool unmap_old = false;
unsigned long old_uhva;
kvm_pfn_t old_pfn;
bool hva_change = false;
void *old_khva;
int ret;

Expand All @@ -242,10 +243,10 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
}

old_pfn = gpc->pfn;
old_khva = gpc->khva - offset_in_page(gpc->khva);
old_uhva = gpc->uhva;
old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
old_uhva = PAGE_ALIGN_DOWN(gpc->uhva);

/* If the userspace HVA is invalid, refresh that first */
/* Refresh the userspace HVA if necessary */
if (gpc->gpa != gpa || gpc->generation != slots->generation ||
kvm_is_error_hva(gpc->uhva)) {
gfn_t gfn = gpa_to_gfn(gpa);
Expand All @@ -259,13 +260,25 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
ret = -EFAULT;
goto out;
}

/*
* Even if the GPA and/or the memslot generation changed, the
* HVA may still be the same.
*/
if (gpc->uhva != old_uhva)
hva_change = true;
} else {
gpc->uhva = old_uhva;
}

/* Note: the offset must be correct before calling hva_to_pfn_retry() */
gpc->uhva += page_offset;

/*
* If the userspace HVA changed or the PFN was already invalid,
* drop the lock and do the HVA to PFN lookup again.
*/
if (!gpc->valid || old_uhva != gpc->uhva) {
if (!gpc->valid || hva_change) {
ret = hva_to_pfn_retry(gpc);
} else {
/*
Expand Down

0 comments on commit 406c109

Please sign in to comment.