Skip to content

Commit

Permalink
x86/shadow: suppress "fast fault path" optimization without reserved …
Browse files Browse the repository at this point in the history
…bits

When none of the physical address bits in PTEs are reserved, we can't
create any 4k (leaf) PTEs which would trigger reserved bit faults. Hence
the present SHOPT_FAST_FAULT_PATH machinery needs to be suppressed in
this case, which is most easily achieved by never creating any magic
entries.

To compensate a little, eliminate sh_write_p2m_entry_post()'s impact on
such hardware.

While at it, also avoid using an MMIO magic entry when that would
truncate the incoming GFN.

Requested-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Tim Deegan <tim@xen.org>
Release-Acked-by: Ian Jackson <iwj@xenproject.org>
  • Loading branch information
jbeulich committed Mar 5, 2021
1 parent f40e1c5 commit 9318fdf
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 7 deletions.
3 changes: 2 additions & 1 deletion xen/arch/x86/mm/shadow/multi.c
Original file line number Diff line number Diff line change
Expand Up @@ -499,7 +499,8 @@ _sh_propagate(struct vcpu *v,
{
/* Guest l1e maps emulated MMIO space */
*sp = sh_l1e_mmio(target_gfn, gflags);
d->arch.paging.shadow.has_fast_mmio_entries = true;
if ( sh_l1e_is_magic(*sp) )
d->arch.paging.shadow.has_fast_mmio_entries = true;
goto done;
}

Expand Down
28 changes: 22 additions & 6 deletions xen/arch/x86/mm/shadow/types.h
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,8 @@ static inline shadow_l4e_t shadow_l4e_from_mfn(mfn_t mfn, u32 flags)
* pagetables.
*
* This is only feasible for PAE and 64bit Xen: 32-bit non-PAE PTEs don't
* have reserved bits that we can use for this.
* have reserved bits that we can use for this. And even there it can only
* be used if the processor doesn't use all 52 address bits.
*/

#define SH_L1E_MAGIC 0xffffffff00000001ULL
Expand All @@ -291,14 +292,24 @@ static inline bool sh_l1e_is_magic(shadow_l1e_t sl1e)
}

/* Guest not present: a single magic value */
static inline shadow_l1e_t sh_l1e_gnp(void)
static inline shadow_l1e_t sh_l1e_gnp_raw(void)
{
return (shadow_l1e_t){ -1ULL };
}

static inline shadow_l1e_t sh_l1e_gnp(void)
{
/*
* On systems with no reserved physical address bits we can't engage the
* fast fault path.
*/
return paddr_bits < PADDR_BITS ? sh_l1e_gnp_raw()
: shadow_l1e_empty();
}

static inline bool sh_l1e_is_gnp(shadow_l1e_t sl1e)
{
return sl1e.l1 == sh_l1e_gnp().l1;
return sl1e.l1 == sh_l1e_gnp_raw().l1;
}

/*
Expand All @@ -313,9 +324,14 @@ static inline bool sh_l1e_is_gnp(shadow_l1e_t sl1e)

static inline shadow_l1e_t sh_l1e_mmio(gfn_t gfn, u32 gflags)
{
return (shadow_l1e_t) { (SH_L1E_MMIO_MAGIC
| MASK_INSR(gfn_x(gfn), SH_L1E_MMIO_GFN_MASK)
| (gflags & (_PAGE_USER|_PAGE_RW))) };
unsigned long gfn_val = MASK_INSR(gfn_x(gfn), SH_L1E_MMIO_GFN_MASK);

if ( paddr_bits >= PADDR_BITS ||
gfn_x(gfn) != MASK_EXTR(gfn_val, SH_L1E_MMIO_GFN_MASK) )
return shadow_l1e_empty();

return (shadow_l1e_t) { (SH_L1E_MMIO_MAGIC | gfn_val |
(gflags & (_PAGE_USER | _PAGE_RW))) };
}

static inline bool sh_l1e_is_mmio(shadow_l1e_t sl1e)
Expand Down

0 comments on commit 9318fdf

Please sign in to comment.