Skip to content

Commit

Permalink
KVM: Update lpage info when private/shared memory are mixed
Browse files Browse the repository at this point in the history
When private/shared memory are mixed in a large page, the lpage_info may
not be accurate and should be updated with this mixed info. A large page
has mixed pages can't be really mapped as large page since its
private/shared pages are from different physical memory.

This patch updates lpage_info when private/shared memory attribute is
changed.  If both private and shared pages are within a large page
region, it can't be mapped as large page. It's a bit challenge to track
the mixed info in a 'count' like variable, this patch instead reserves a
bit in disallow_lpage to indicate a large page include mixed
private/share pages.

Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
Link: https://lore.kernel.org/r/20220915142913.2213336-7-chao.p.peng@linux.intel.com
  • Loading branch information
chao-p authored and yamahata committed Sep 29, 2022
1 parent 862db15 commit fab0f69
Show file tree
Hide file tree
Showing 5 changed files with 154 additions and 3 deletions.
8 changes: 8 additions & 0 deletions arch/x86/include/asm/kvm_host.h
Expand Up @@ -38,6 +38,7 @@

#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
#define __KVM_HAVE_ZAP_GFN_RANGE
#define __KVM_HAVE_ARCH_UPDATE_MEM_ATTR

#define KVM_MAX_VCPUS 1024

Expand Down Expand Up @@ -946,6 +947,13 @@ struct kvm_vcpu_arch {
#endif
};

/*
* Use a bit in disallow_lpage to indicate private/shared pages mixed at the
* level. The remaining bits will be used as a reference count for other users.
*/
#define KVM_LPAGE_PRIVATE_SHARED_MIXED (1U << 31)
#define KVM_LPAGE_COUNT_MAX ((1U << 31) - 1)

struct kvm_lpage_info {
int disallow_lpage;
};
Expand Down
119 changes: 118 additions & 1 deletion arch/x86/kvm/mmu/mmu.c
Expand Up @@ -762,11 +762,16 @@ static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
{
struct kvm_lpage_info *linfo;
int i;
int disallow_count;

for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
linfo = lpage_info_slot(gfn, slot, i);

disallow_count = linfo->disallow_lpage & KVM_LPAGE_COUNT_MAX;
WARN_ON(disallow_count + count < 0 ||
disallow_count > KVM_LPAGE_COUNT_MAX - count);

linfo->disallow_lpage += count;
WARN_ON(linfo->disallow_lpage < 0);
}
}

Expand Down Expand Up @@ -6896,3 +6901,115 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
if (kvm->arch.nx_lpage_recovery_thread)
kthread_stop(kvm->arch.nx_lpage_recovery_thread);
}

static bool mem_attr_is_mixed(struct kvm *kvm, unsigned int attr,
gfn_t start, gfn_t end)
{
XA_STATE(xas, &kvm->mem_attr_array, start);
gfn_t gfn = start;
void *entry;
bool shared, private;
bool mixed = false;

if (attr == KVM_MEM_ATTR_SHARED) {
shared = true;
private = false;
} else {
shared = false;
private = true;
}

rcu_read_lock();
entry = xas_load(&xas);
while (gfn < end) {
if (xas_retry(&xas, entry))
continue;

KVM_BUG_ON(gfn != xas.xa_index, kvm);

if (entry)
private = true;
else
shared = true;

if (private && shared) {
mixed = true;
goto out;
}

entry = xas_next(&xas);
gfn++;
}
out:
rcu_read_unlock();
return mixed;
}

static inline void update_mixed(struct kvm_lpage_info *linfo, bool mixed)
{
if (mixed)
linfo->disallow_lpage |= KVM_LPAGE_PRIVATE_SHARED_MIXED;
else
linfo->disallow_lpage &= ~KVM_LPAGE_PRIVATE_SHARED_MIXED;
}

static void update_mem_lpage_info(struct kvm *kvm,
struct kvm_memory_slot *slot,
unsigned int attr,
gfn_t start, gfn_t end)
{
unsigned long lpage_start, lpage_end;
unsigned long gfn, pages, mask;
int level;

for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
pages = KVM_PAGES_PER_HPAGE(level);
mask = ~(pages - 1);
lpage_start = start & mask;
lpage_end = (end - 1) & mask;

/*
* We only need to scan the head and tail page, for middle pages
* we know they are not mixed.
*/
update_mixed(lpage_info_slot(lpage_start, slot, level),
mem_attr_is_mixed(kvm, attr, lpage_start,
lpage_start + pages));

if (lpage_start == lpage_end)
return;

for (gfn = lpage_start + pages; gfn < lpage_end; gfn += pages)
update_mixed(lpage_info_slot(gfn, slot, level), false);

update_mixed(lpage_info_slot(lpage_end, slot, level),
mem_attr_is_mixed(kvm, attr, lpage_end,
lpage_end + pages));
}
}

void kvm_arch_update_mem_attr(struct kvm *kvm, unsigned int attr,
gfn_t start, gfn_t end)
{
struct kvm_memory_slot *slot;
struct kvm_memslots *slots;
struct kvm_memslot_iter iter;
int i;

WARN_ONCE(!(attr & (KVM_MEM_ATTR_PRIVATE | KVM_MEM_ATTR_SHARED)),
"Unsupported mem attribute.\n");

for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);

kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
slot = iter.slot;
start = max(start, slot->base_gfn);
end = min(end, slot->base_gfn + slot->npages);
if (WARN_ON_ONCE(start >= end))
continue;

update_mem_lpage_info(kvm, slot, attr, start, end);
}
}
}
2 changes: 2 additions & 0 deletions arch/x86/kvm/x86.c
Expand Up @@ -12495,6 +12495,8 @@ static int kvm_alloc_memslot_metadata(struct kvm *kvm,
if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
linfo[lpages - 1].disallow_lpage = 1;
ugfn = slot->userspace_addr >> PAGE_SHIFT;
if (kvm_slot_can_be_private(slot))
ugfn |= slot->private_offset >> PAGE_SHIFT;
/*
* If the gfn and userspace address are not aligned wrt each
* other, disable large page support for this slot.
Expand Down
17 changes: 17 additions & 0 deletions include/linux/kvm_host.h
Expand Up @@ -2286,4 +2286,21 @@ static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
/* Max number of entries allowed for each kvm dirty ring */
#define KVM_DIRTY_RING_MAX_ENTRIES 65536

#ifdef CONFIG_HAVE_KVM_PRIVATE_MEM

#define KVM_MEM_ATTR_SHARED 0x0001
#define KVM_MEM_ATTR_PRIVATE 0x0002

#ifdef __KVM_HAVE_ARCH_UPDATE_MEM_ATTR
void kvm_arch_update_mem_attr(struct kvm *kvm, unsigned int attr,
gfn_t start, gfn_t end);
#else
static inline void kvm_arch_update_mem_attr(struct kvm *kvm, unsigned int attr,
gfn_t start, gfn_t end)
{
}
#endif

#endif /* CONFIG_HAVE_KVM_PRIVATE_MEM */

#endif
11 changes: 9 additions & 2 deletions virt/kvm/kvm_main.c
Expand Up @@ -934,13 +934,13 @@ static int kvm_init_mmu_notifier(struct kvm *kvm)
#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */

#ifdef CONFIG_HAVE_KVM_PRIVATE_MEM
#define KVM_MEM_ATTR_SHARED 0x0001
static int kvm_vm_ioctl_set_mem_attr(struct kvm *kvm, gpa_t gpa, gpa_t size,
bool is_private)
{
gfn_t start, end;
unsigned long index;
void *entry;
int attr;
int r;

if (size == 0 || gpa + size < gpa)
Expand All @@ -955,7 +955,13 @@ static int kvm_vm_ioctl_set_mem_attr(struct kvm *kvm, gpa_t gpa, gpa_t size,
* Guest memory defaults to private, kvm->mem_attr_array only stores
* shared memory.
*/
entry = is_private ? NULL : xa_mk_value(KVM_MEM_ATTR_SHARED);
if (is_private) {
attr = KVM_MEM_ATTR_PRIVATE;
entry = NULL;
} else {
attr = KVM_MEM_ATTR_SHARED;
entry = xa_mk_value(KVM_MEM_ATTR_SHARED);
}

for (index = start; index < end; index++) {
r = xa_err(xa_store(&kvm->mem_attr_array, index, entry,
Expand All @@ -965,6 +971,7 @@ static int kvm_vm_ioctl_set_mem_attr(struct kvm *kvm, gpa_t gpa, gpa_t size,
}

kvm_zap_gfn_range(kvm, start, end);
kvm_arch_update_mem_attr(kvm, attr, start, end);

return r;
err:
Expand Down

0 comments on commit fab0f69

Please sign in to comment.