Skip to content

Commit

Permalink
x86/EPT: ept_set_middle_entry() related adjustments
Browse files Browse the repository at this point in the history
ept_split_super_page() wants to further modify the newly allocated
table, so have ept_set_middle_entry() return the mapped pointer rather
than tearing it down and then getting re-established right again.

Similarly ept_next_level() wants to hand back a mapped pointer of
the next level page, so re-use the one established by
ept_set_middle_entry() in case that path was taken.

Pull the setting of suppress_ve ahead of insertion into the higher level
table, and don't have ept_split_super_page() set the field a 2nd time.

This is part of XSA-328.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
  • Loading branch information
jbeulich committed Jul 7, 2020
1 parent 23a216f commit 1104288
Showing 1 changed file with 18 additions and 23 deletions.
41 changes: 18 additions & 23 deletions xen/arch/x86/mm/p2m-ept.c
Original file line number Diff line number Diff line change
Expand Up @@ -186,16 +186,22 @@ static void ept_p2m_type_to_flags(const struct p2m_domain *p2m,
#define GUEST_TABLE_SUPER_PAGE 2
#define GUEST_TABLE_POD_PAGE 3

/* Fill in middle levels of ept table */
static int ept_set_middle_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry)
/* Fill in middle level of ept table; return pointer to mapped new table. */
static ept_entry_t *ept_set_middle_entry(struct p2m_domain *p2m,
ept_entry_t *ept_entry)
{
mfn_t mfn;
ept_entry_t *table;
unsigned int i;

mfn = p2m_alloc_ptp(p2m, 0);
if ( mfn_eq(mfn, INVALID_MFN) )
return 0;
return NULL;

table = map_domain_page(mfn);

for ( i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
table[i].suppress_ve = 1;

ept_entry->epte = 0;
ept_entry->mfn = mfn_x(mfn);
Expand All @@ -207,14 +213,7 @@ static int ept_set_middle_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry)

ept_entry->suppress_ve = 1;

table = map_domain_page(mfn);

for ( i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
table[i].suppress_ve = 1;

unmap_domain_page(table);

return 1;
return table;
}

/* free ept sub tree behind an entry */
Expand Down Expand Up @@ -252,10 +251,10 @@ static bool_t ept_split_super_page(struct p2m_domain *p2m,

ASSERT(is_epte_superpage(ept_entry));

if ( !ept_set_middle_entry(p2m, &new_ept) )
table = ept_set_middle_entry(p2m, &new_ept);
if ( !table )
return 0;

table = map_domain_page(_mfn(new_ept.mfn));
trunk = 1UL << ((level - 1) * EPT_TABLE_ORDER);

for ( i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
Expand All @@ -266,7 +265,6 @@ static bool_t ept_split_super_page(struct p2m_domain *p2m,
epte->sp = (level > 1);
epte->mfn += i * trunk;
epte->snp = is_iommu_enabled(p2m->domain) && iommu_snoop;
epte->suppress_ve = 1;

ept_p2m_type_to_flags(p2m, epte);

Expand Down Expand Up @@ -305,8 +303,7 @@ static int ept_next_level(struct p2m_domain *p2m, bool_t read_only,
ept_entry_t **table, unsigned long *gfn_remainder,
int next_level)
{
unsigned long mfn;
ept_entry_t *ept_entry, e;
ept_entry_t *ept_entry, *next = NULL, e;
u32 shift, index;

shift = next_level * EPT_TABLE_ORDER;
Expand All @@ -331,19 +328,17 @@ static int ept_next_level(struct p2m_domain *p2m, bool_t read_only,
if ( read_only )
return GUEST_TABLE_MAP_FAILED;

if ( !ept_set_middle_entry(p2m, ept_entry) )
next = ept_set_middle_entry(p2m, ept_entry);
if ( !next )
return GUEST_TABLE_MAP_FAILED;
else
e = atomic_read_ept_entry(ept_entry); /* Refresh */
/* e is now stale and hence may not be used anymore below. */
}

/* The only time sp would be set here is if we had hit a superpage */
if ( is_epte_superpage(&e) )
else if ( is_epte_superpage(&e) )
return GUEST_TABLE_SUPER_PAGE;

mfn = e.mfn;
unmap_domain_page(*table);
*table = map_domain_page(_mfn(mfn));
*table = next ?: map_domain_page(_mfn(e.mfn));
*gfn_remainder &= (1UL << shift) - 1;
return GUEST_TABLE_NORMAL_PAGE;
}
Expand Down

0 comments on commit 1104288

Please sign in to comment.