Skip to content

Commit

Permalink
x86/p2m: split write_p2m_entry() hook
Browse files Browse the repository at this point in the history
Fair parts of the present handlers are identical; in fact
nestedp2m_write_p2m_entry() lacks a call to p2m_entry_modify(). Move
common parts right into write_p2m_entry(), splitting the hooks into a
"pre" one (needed just by shadow code) and a "post" one.

For the common parts moved I think that the p2m_flush_nestedp2m() is,
at least from an abstract perspective, also applicable in the shadow
case. Hence it doesn't get a 3rd hook put in place.

The initial comment that was in hap_write_p2m_entry() gets dropped: Its
placement was bogus, and looking back the the commit introducing it
(dd6de3a "Implement Nested-on-Nested") I can't see either what use
of a p2m it was meant to be associated with.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Tim Deegan <tim@xen.org>
Acked-by: Roger Pau Monné <roger.pau@citrix.com>
  • Loading branch information
jbeulich committed Nov 18, 2020
1 parent a7ab52f commit dc5616e
Show file tree
Hide file tree
Showing 6 changed files with 75 additions and 96 deletions.
45 changes: 4 additions & 41 deletions xen/arch/x86/mm/hap/hap.c
Original file line number Diff line number Diff line change
Expand Up @@ -774,55 +774,18 @@ static void hap_update_paging_modes(struct vcpu *v)
put_gfn(d, cr3_gfn);
}

static int
hap_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, l1_pgentry_t *p,
l1_pgentry_t new, unsigned int level)
static void
hap_write_p2m_entry_post(struct p2m_domain *p2m, unsigned int oflags)
{
struct domain *d = p2m->domain;
uint32_t old_flags;
mfn_t omfn;
int rc;

/* We know always use the host p2m here, regardless if the vcpu
* is in host or guest mode. The vcpu can be in guest mode by
* a hypercall which passes a domain and chooses mostly the first
* vcpu. */

paging_lock(d);
old_flags = l1e_get_flags(*p);
omfn = l1e_get_mfn(*p);

rc = p2m_entry_modify(p2m, p2m_flags_to_type(l1e_get_flags(new)),
p2m_flags_to_type(old_flags), l1e_get_mfn(new),
omfn, level);
if ( rc )
{
paging_unlock(d);
return rc;
}

safe_write_pte(p, new);
if ( old_flags & _PAGE_PRESENT )
if ( oflags & _PAGE_PRESENT )
guest_flush_tlb_mask(d, d->dirty_cpumask);

paging_unlock(d);

if ( nestedhvm_enabled(d) && (old_flags & _PAGE_PRESENT) &&
!p2m_get_hostp2m(d)->defer_nested_flush &&
/*
* We are replacing a valid entry so we need to flush nested p2ms,
* unless the only change is an increase in access rights.
*/
(!mfn_eq(omfn, l1e_get_mfn(new)) ||
!perms_strictly_increased(old_flags, l1e_get_flags(new))) )
p2m_flush_nestedp2m(d);

return 0;
}

void hap_p2m_init(struct p2m_domain *p2m)
{
p2m->write_p2m_entry = hap_write_p2m_entry;
p2m->write_p2m_entry_post = hap_write_p2m_entry_post;
}

static unsigned long hap_gva_to_gfn_real_mode(
Expand Down
21 changes: 4 additions & 17 deletions xen/arch/x86/mm/hap/nested_hap.c
Original file line number Diff line number Diff line change
Expand Up @@ -71,24 +71,11 @@
/* NESTED VIRT P2M FUNCTIONS */
/********************************************/

int
nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
l1_pgentry_t *p, l1_pgentry_t new, unsigned int level)
void
nestedp2m_write_p2m_entry_post(struct p2m_domain *p2m, unsigned int oflags)
{
struct domain *d = p2m->domain;
uint32_t old_flags;

paging_lock(d);

old_flags = l1e_get_flags(*p);
safe_write_pte(p, new);

if (old_flags & _PAGE_PRESENT)
guest_flush_tlb_mask(d, p2m->dirty_cpumask);

paging_unlock(d);

return 0;
if ( oflags & _PAGE_PRESENT )
guest_flush_tlb_mask(p2m->domain, p2m->dirty_cpumask);
}

/********************************************/
Expand Down
44 changes: 41 additions & 3 deletions xen/arch/x86/mm/p2m-pt.c
Original file line number Diff line number Diff line change
Expand Up @@ -122,17 +122,55 @@ static int write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
{
struct domain *d = p2m->domain;
const struct vcpu *v = current;
int rc = 0;

if ( v->domain != d )
v = d->vcpu ? d->vcpu[0] : NULL;
if ( likely(v && paging_mode_enabled(d) && paging_get_hostmode(v)) ||
p2m_is_nestedp2m(p2m) )
rc = p2m->write_p2m_entry(p2m, gfn, p, new, level);
{
unsigned int oflags;
mfn_t omfn;
int rc;

paging_lock(d);

if ( p2m->write_p2m_entry_pre )
p2m->write_p2m_entry_pre(d, gfn, p, new, level);

oflags = l1e_get_flags(*p);
omfn = l1e_get_mfn(*p);

rc = p2m_entry_modify(p2m, p2m_flags_to_type(l1e_get_flags(new)),
p2m_flags_to_type(oflags), l1e_get_mfn(new),
omfn, level);
if ( rc )
{
paging_unlock(d);
return rc;
}

safe_write_pte(p, new);

if ( p2m->write_p2m_entry_post )
p2m->write_p2m_entry_post(p2m, oflags);

paging_unlock(d);

if ( nestedhvm_enabled(d) && !p2m_is_nestedp2m(p2m) &&
(oflags & _PAGE_PRESENT) &&
!p2m_get_hostp2m(d)->defer_nested_flush &&
/*
* We are replacing a valid entry so we need to flush nested p2ms,
* unless the only change is an increase in access rights.
*/
(!mfn_eq(omfn, l1e_get_mfn(new)) ||
!perms_strictly_increased(oflags, l1e_get_flags(new))) )
p2m_flush_nestedp2m(d);
}
else
safe_write_pte(p, new);

return rc;
return 0;
}

// Find the next level's P2M entry, checking for out-of-range gfn's...
Expand Down
3 changes: 2 additions & 1 deletion xen/arch/x86/mm/p2m.c
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,8 @@ static int p2m_init_nestedp2m(struct domain *d)
return -ENOMEM;
}
p2m->p2m_class = p2m_nested;
p2m->write_p2m_entry = nestedp2m_write_p2m_entry;
p2m->write_p2m_entry_pre = NULL;
p2m->write_p2m_entry_post = nestedp2m_write_p2m_entry_post;
list_add(&p2m->np2m_list, &p2m_get_hostp2m(d)->np2m_list);
}

Expand Down
41 changes: 14 additions & 27 deletions xen/arch/x86/mm/shadow/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -3144,34 +3144,22 @@ static void sh_unshadow_for_p2m_change(struct domain *d, unsigned long gfn,
}
}

static int
shadow_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
l1_pgentry_t *p, l1_pgentry_t new,
unsigned int level)
static void
sh_write_p2m_entry_pre(struct domain *d, unsigned long gfn, l1_pgentry_t *p,
l1_pgentry_t new, unsigned int level)
{
struct domain *d = p2m->domain;
int rc;

paging_lock(d);

/* If there are any shadows, update them. But if shadow_teardown()
* has already been called then it's not safe to try. */
if ( likely(d->arch.paging.shadow.total_pages != 0) )
sh_unshadow_for_p2m_change(d, gfn, p, new, level);

rc = p2m_entry_modify(p2m, p2m_flags_to_type(l1e_get_flags(new)),
p2m_flags_to_type(l1e_get_flags(*p)),
l1e_get_mfn(new), l1e_get_mfn(*p), level);
if ( rc )
{
paging_unlock(d);
return rc;
}

/* Update the entry with new content */
safe_write_pte(p, new);
}

#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
static void
sh_write_p2m_entry_post(struct p2m_domain *p2m, unsigned int oflags)
{
struct domain *d = p2m->domain;

/* If we're doing FAST_FAULT_PATH, then shadow mode may have
cached the fact that this is an mmio region in the shadow
page tables. Blow the tables away to remove the cache.
Expand All @@ -3183,16 +3171,15 @@ shadow_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
shadow_blow_tables(d);
d->arch.paging.shadow.has_fast_mmio_entries = false;
}
#endif

paging_unlock(d);

return 0;
}
#else
# define sh_write_p2m_entry_post NULL
#endif

void shadow_p2m_init(struct p2m_domain *p2m)
{
p2m->write_p2m_entry = shadow_write_p2m_entry;
p2m->write_p2m_entry_pre = sh_write_p2m_entry_pre;
p2m->write_p2m_entry_post = sh_write_p2m_entry_post;
}

/**************************************************************************/
Expand Down
17 changes: 10 additions & 7 deletions xen/include/asm-x86/p2m.h
Original file line number Diff line number Diff line change
Expand Up @@ -272,10 +272,13 @@ struct p2m_domain {
unsigned long first_gfn,
unsigned long last_gfn);
void (*memory_type_changed)(struct p2m_domain *p2m);

int (*write_p2m_entry)(struct p2m_domain *p2m,
unsigned long gfn, l1_pgentry_t *p,
l1_pgentry_t new, unsigned int level);
void (*write_p2m_entry_pre)(struct domain *d,
unsigned long gfn,
l1_pgentry_t *p,
l1_pgentry_t new,
unsigned int level);
void (*write_p2m_entry_post)(struct p2m_domain *p2m,
unsigned int oflags);
#if P2M_AUDIT
long (*audit_p2m)(struct p2m_domain *p2m);
#endif
Expand Down Expand Up @@ -472,7 +475,7 @@ void __put_gfn(struct p2m_domain *p2m, unsigned long gfn);
*
* This is also used in the shadow code whenever the paging lock is
* held -- in those cases, the caller is protected against concurrent
* p2m updates by the fact that shadow_write_p2m_entry() also takes
* p2m updates by the fact that write_p2m_entry() also takes
* the paging lock.
*
* Note that an unlocked accessor only makes sense for a "query" lookup.
Expand Down Expand Up @@ -841,8 +844,8 @@ void np2m_flush_base(struct vcpu *v, unsigned long np2m_base);
void hap_p2m_init(struct p2m_domain *p2m);
void shadow_p2m_init(struct p2m_domain *p2m);

int nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
l1_pgentry_t *p, l1_pgentry_t new, unsigned int level);
void nestedp2m_write_p2m_entry_post(struct p2m_domain *p2m,
unsigned int oflags);

/*
* Alternate p2m: shadow p2m tables used for alternate memory views
Expand Down

0 comments on commit dc5616e

Please sign in to comment.