Skip to content

Commit

Permalink
x86/shadow: sh_{make,destroy}_monitor_table() are "even more" HVM-only
Browse files Browse the repository at this point in the history
With them depending on just the number of shadow levels, there's no need
for more than one instance of them, and hence no need for any hook (IOW
452219e ["x86/shadow: monitor table is HVM-only"] didn't go quite
far enough). Move the functions to hvm.c while dropping the dead
is_pv_32bit_domain() code paths.

While moving the code, replace a stale comment reference to
sh_install_xen_entries_in_l4(). Doing so made me notice the function
also didn't have its prototype dropped in 8d7b633 ("x86/mm:
Consolidate all Xen L4 slot writing into init_xen_l4_slots()"), which
gets done here as well.

Also make their first parameters const.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Roger Pau Monné <roger.pau@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
  • Loading branch information
jbeulich committed Oct 30, 2020
1 parent 8ac7e45 commit 0b84131
Show file tree
Hide file tree
Showing 6 changed files with 100 additions and 142 deletions.
14 changes: 10 additions & 4 deletions xen/arch/x86/mm/shadow/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -2467,7 +2467,9 @@ static void sh_update_paging_modes(struct vcpu *v)

if ( pagetable_is_null(v->arch.hvm.monitor_table) )
{
mfn_t mmfn = v->arch.paging.mode->shadow.make_monitor_table(v);
mfn_t mmfn = sh_make_monitor_table(
v, v->arch.paging.mode->shadow.shadow_levels);

v->arch.hvm.monitor_table = pagetable_from_mfn(mmfn);
make_cr3(v, mmfn);
hvm_update_host_cr3(v);
Expand Down Expand Up @@ -2504,7 +2506,8 @@ static void sh_update_paging_modes(struct vcpu *v)

old_mfn = pagetable_get_mfn(v->arch.hvm.monitor_table);
v->arch.hvm.monitor_table = pagetable_null();
new_mfn = v->arch.paging.mode->shadow.make_monitor_table(v);
new_mfn = sh_make_monitor_table(
v, v->arch.paging.mode->shadow.shadow_levels);
v->arch.hvm.monitor_table = pagetable_from_mfn(new_mfn);
SHADOW_PRINTK("new monitor table %"PRI_mfn "\n",
mfn_x(new_mfn));
Expand All @@ -2516,7 +2519,8 @@ static void sh_update_paging_modes(struct vcpu *v)
if ( v == current )
write_ptbase(v);
hvm_update_host_cr3(v);
old_mode->shadow.destroy_monitor_table(v, old_mfn);
sh_destroy_monitor_table(v, old_mfn,
old_mode->shadow.shadow_levels);
}
}

Expand Down Expand Up @@ -2801,7 +2805,9 @@ void shadow_teardown(struct domain *d, bool *preempted)
mfn_t mfn = pagetable_get_mfn(v->arch.hvm.monitor_table);

if ( mfn_valid(mfn) && (mfn_x(mfn) != 0) )
v->arch.paging.mode->shadow.destroy_monitor_table(v, mfn);
sh_destroy_monitor_table(
v, mfn,
v->arch.paging.mode->shadow.shadow_levels);
v->arch.hvm.monitor_table = pagetable_null();
}
#endif /* CONFIG_HVM */
Expand Down
82 changes: 82 additions & 0 deletions xen/arch/x86/mm/shadow/hvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -691,6 +691,88 @@ static void sh_emulate_unmap_dest(struct vcpu *v, void *addr,
atomic_inc(&v->domain->arch.paging.shadow.gtable_dirty_version);
}

mfn_t sh_make_monitor_table(const struct vcpu *v, unsigned int shadow_levels)
{
struct domain *d = v->domain;
mfn_t m4mfn;
l4_pgentry_t *l4e;

ASSERT(!pagetable_get_pfn(v->arch.hvm.monitor_table));

/* Guarantee we can get the memory we need */
shadow_prealloc(d, SH_type_monitor_table, CONFIG_PAGING_LEVELS);
m4mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m4mfn)->shadow_flags = 4;

l4e = map_domain_page(m4mfn);

/*
* Create a self-linear mapping, but no shadow-linear mapping. A
* shadow-linear mapping will either be inserted below when creating
* lower level monitor tables, or later in sh_update_cr3().
*/
init_xen_l4_slots(l4e, m4mfn, d, INVALID_MFN, false);

if ( shadow_levels < 4 )
{
mfn_t m3mfn, m2mfn;
l3_pgentry_t *l3e;

/*
* Install an l3 table and an l2 table that will hold the shadow
* linear map entries. This overrides the empty entry that was
* installed by init_xen_l4_slots().
*/
m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m3mfn)->shadow_flags = 3;
l4e[l4_table_offset(SH_LINEAR_PT_VIRT_START)]
= l4e_from_mfn(m3mfn, __PAGE_HYPERVISOR_RW);

m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m2mfn)->shadow_flags = 2;
l3e = map_domain_page(m3mfn);
l3e[0] = l3e_from_mfn(m2mfn, __PAGE_HYPERVISOR_RW);
unmap_domain_page(l3e);
}

unmap_domain_page(l4e);

return m4mfn;
}

void sh_destroy_monitor_table(const struct vcpu *v, mfn_t mmfn,
unsigned int shadow_levels)
{
struct domain *d = v->domain;

ASSERT(mfn_to_page(mmfn)->u.sh.type == SH_type_monitor_table);

if ( shadow_levels < 4 )
{
mfn_t m3mfn;
l4_pgentry_t *l4e = map_domain_page(mmfn);
l3_pgentry_t *l3e;
unsigned int linear_slot = l4_table_offset(SH_LINEAR_PT_VIRT_START);

/*
* Need to destroy the l3 and l2 monitor pages used
* for the linear map.
*/
ASSERT(l4e_get_flags(l4e[linear_slot]) & _PAGE_PRESENT);
m3mfn = l4e_get_mfn(l4e[linear_slot]);
l3e = map_domain_page(m3mfn);
ASSERT(l3e_get_flags(l3e[0]) & _PAGE_PRESENT);
shadow_free(d, l3e_get_mfn(l3e[0]));
unmap_domain_page(l3e);
shadow_free(d, m3mfn);

unmap_domain_page(l4e);
}

/* Put the memory back in the pool */
shadow_free(d, mmfn);
}

/**************************************************************************/
/* VRAM dirty tracking support */
int shadow_track_dirty_vram(struct domain *d,
Expand Down
124 changes: 0 additions & 124 deletions xen/arch/x86/mm/shadow/multi.c
Original file line number Diff line number Diff line change
Expand Up @@ -1405,84 +1405,6 @@ make_fl1_shadow(struct domain *d, gfn_t gfn)
}


#if SHADOW_PAGING_LEVELS == GUEST_PAGING_LEVELS && defined(CONFIG_HVM)
mfn_t
sh_make_monitor_table(struct vcpu *v)
{
struct domain *d = v->domain;

ASSERT(pagetable_get_pfn(v->arch.hvm.monitor_table) == 0);

/* Guarantee we can get the memory we need */
shadow_prealloc(d, SH_type_monitor_table, CONFIG_PAGING_LEVELS);

{
mfn_t m4mfn;
l4_pgentry_t *l4e;

m4mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m4mfn)->shadow_flags = 4;

l4e = map_domain_page(m4mfn);

/*
* Create a self-linear mapping, but no shadow-linear mapping. A
* shadow-linear mapping will either be inserted below when creating
* lower level monitor tables, or later in sh_update_cr3().
*/
init_xen_l4_slots(l4e, m4mfn, d, INVALID_MFN, false);

#if SHADOW_PAGING_LEVELS < 4
{
mfn_t m3mfn, m2mfn;
l3_pgentry_t *l3e;
/* Install an l3 table and an l2 table that will hold the shadow
* linear map entries. This overrides the linear map entry that
* was installed by sh_install_xen_entries_in_l4. */

m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m3mfn)->shadow_flags = 3;
l4e[shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START)]
= l4e_from_mfn(m3mfn, __PAGE_HYPERVISOR_RW);

m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m2mfn)->shadow_flags = 2;
l3e = map_domain_page(m3mfn);
l3e[0] = l3e_from_mfn(m2mfn, __PAGE_HYPERVISOR_RW);
unmap_domain_page(l3e);

if ( is_pv_32bit_domain(d) )
{
l2_pgentry_t *l2t;

/* For 32-bit PV guests, we need to map the 32-bit Xen
* area into its usual VAs in the monitor tables */
m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m3mfn)->shadow_flags = 3;
l4e[0] = l4e_from_mfn(m3mfn, __PAGE_HYPERVISOR_RW);

m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m2mfn)->shadow_flags = 2;
l3e = map_domain_page(m3mfn);
l3e[3] = l3e_from_mfn(m2mfn, _PAGE_PRESENT);

l2t = map_domain_page(m2mfn);
init_xen_pae_l2_slots(l2t, d);
unmap_domain_page(l2t);

unmap_domain_page(l3e);
}

}
#endif /* SHADOW_PAGING_LEVELS < 4 */

unmap_domain_page(l4e);

return m4mfn;
}
}
#endif /* SHADOW_PAGING_LEVELS == GUEST_PAGING_LEVELS */

/**************************************************************************/
/* These functions also take a virtual address and return the level-N
* shadow table mfn and entry, but they create the shadow pagetables if
Expand Down Expand Up @@ -1860,50 +1782,6 @@ void sh_destroy_l1_shadow(struct domain *d, mfn_t smfn)
shadow_free(d, smfn);
}

#if SHADOW_PAGING_LEVELS == GUEST_PAGING_LEVELS && defined(CONFIG_HVM)
void sh_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
{
struct domain *d = v->domain;
ASSERT(mfn_to_page(mmfn)->u.sh.type == SH_type_monitor_table);

#if SHADOW_PAGING_LEVELS != 4
{
mfn_t m3mfn;
l4_pgentry_t *l4e = map_domain_page(mmfn);
l3_pgentry_t *l3e;
int linear_slot = shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START);

/* Need to destroy the l3 and l2 monitor pages used
* for the linear map */
ASSERT(l4e_get_flags(l4e[linear_slot]) & _PAGE_PRESENT);
m3mfn = l4e_get_mfn(l4e[linear_slot]);
l3e = map_domain_page(m3mfn);
ASSERT(l3e_get_flags(l3e[0]) & _PAGE_PRESENT);
shadow_free(d, l3e_get_mfn(l3e[0]));
unmap_domain_page(l3e);
shadow_free(d, m3mfn);

if ( is_pv_32bit_domain(d) )
{
/* Need to destroy the l3 and l2 monitor pages that map the
* Xen VAs at 3GB-4GB */
ASSERT(l4e_get_flags(l4e[0]) & _PAGE_PRESENT);
m3mfn = l4e_get_mfn(l4e[0]);
l3e = map_domain_page(m3mfn);
ASSERT(l3e_get_flags(l3e[3]) & _PAGE_PRESENT);
shadow_free(d, l3e_get_mfn(l3e[3]));
unmap_domain_page(l3e);
shadow_free(d, m3mfn);
}
unmap_domain_page(l4e);
}
#endif

/* Put the memory back in the pool */
shadow_free(d, mmfn);
}
#endif

/**************************************************************************/
/* Functions to destroy non-Xen mappings in a pagetable hierarchy.
* These are called from common code when we are running out of shadow
Expand Down Expand Up @@ -4705,8 +4583,6 @@ const struct paging_mode sh_paging_mode = {
.shadow.cmpxchg_guest_entry = sh_cmpxchg_guest_entry,
#endif
#ifdef CONFIG_HVM
.shadow.make_monitor_table = sh_make_monitor_table,
.shadow.destroy_monitor_table = sh_destroy_monitor_table,
#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
.shadow.guess_wrmap = sh_guess_wrmap,
#endif
Expand Down
11 changes: 8 additions & 3 deletions xen/arch/x86/mm/shadow/private.h
Original file line number Diff line number Diff line change
Expand Up @@ -366,9 +366,6 @@ void sh_set_toplevel_shadow(struct vcpu *v,
mfn_t gmfn,
uint32_t shadow_type));

/* Install the xen mappings in various flavours of shadow */
void sh_install_xen_entries_in_l4(struct domain *, mfn_t gl4mfn, mfn_t sl4mfn);

/* Update the shadows in response to a pagetable write from Xen */
int sh_validate_guest_entry(struct vcpu *v, mfn_t gmfn, void *entry, u32 size);

Expand Down Expand Up @@ -410,6 +407,14 @@ void shadow_update_paging_modes(struct vcpu *v);
* With user_only == 1, unhooks only the user-mode mappings. */
void shadow_unhook_mappings(struct domain *d, mfn_t smfn, int user_only);

/*
* sh_{make,destroy}_monitor_table() depend only on the number of shadow
* levels.
*/
mfn_t sh_make_monitor_table(const struct vcpu *v, unsigned int shadow_levels);
void sh_destroy_monitor_table(const struct vcpu *v, mfn_t mmfn,
unsigned int shadow_levels);

/* VRAM dirty tracking helpers. */
void shadow_vram_get_mfn(mfn_t mfn, unsigned int l1f,
mfn_t sl1mfn, const void *sl1e,
Expand Down
9 changes: 0 additions & 9 deletions xen/arch/x86/mm/shadow/types.h
Original file line number Diff line number Diff line change
Expand Up @@ -262,15 +262,6 @@ static inline shadow_l4e_t shadow_l4e_from_mfn(mfn_t mfn, u32 flags)
#define sh_rm_write_access_from_sl1p INTERNAL_NAME(sh_rm_write_access_from_sl1p)
#endif

/* sh_make_monitor_table depends only on the number of shadow levels */
#define sh_make_monitor_table \
SHADOW_SH_NAME(sh_make_monitor_table, SHADOW_PAGING_LEVELS)
#define sh_destroy_monitor_table \
SHADOW_SH_NAME(sh_destroy_monitor_table, SHADOW_PAGING_LEVELS)

mfn_t sh_make_monitor_table(struct vcpu *v);
void sh_destroy_monitor_table(struct vcpu *v, mfn_t mmfn);

#if SHADOW_PAGING_LEVELS == 3
#define MFN_FITS_IN_HVM_CR3(_MFN) !(mfn_x(_MFN) >> 20)
#endif
Expand Down
2 changes: 0 additions & 2 deletions xen/include/asm-x86/paging.h
Original file line number Diff line number Diff line change
Expand Up @@ -107,8 +107,6 @@ struct shadow_paging_mode {
mfn_t gmfn);
#endif
#ifdef CONFIG_HVM
mfn_t (*make_monitor_table )(struct vcpu *v);
void (*destroy_monitor_table )(struct vcpu *v, mfn_t mmfn);
int (*guess_wrmap )(struct vcpu *v,
unsigned long vaddr, mfn_t gmfn);
void (*pagetable_dying )(paddr_t gpa);
Expand Down

0 comments on commit 0b84131

Please sign in to comment.