Skip to content

Commit 29e749e

Browse files
pgondasean-jc
authored andcommitted
KVM: selftests: Add support for allocating/managing protected guest memory
Add support for differentiating between protected (a.k.a. private, a.k.a. encrypted) memory and normal (a.k.a. shared) memory for VMs that support protected guest memory, e.g. x86's SEV. Provide and manage a common bitmap for tracking whether a given physical page resides in protected memory, as support for protected memory isn't x86 specific, i.e. adding a arch hook would be a net negative now, and in the future. Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Vishal Annapurve <vannapurve@google.com> Cc: Ackerley Tng <ackerleytng@google.com> cc: Andrew Jones <andrew.jones@linux.dev> Cc: Tom Lendacky <thomas.lendacky@amd.com> Cc: Michael Roth <michael.roth@amd.com> Reviewed-by: Itaru Kitayama <itaru.kitayama@fujitsu.com> Tested-by: Carlos Bilbao <carlos.bilbao@amd.com> Originally-by: Michael Roth <michael.roth@amd.com> Signed-off-by: Peter Gonda <pgonda@google.com> Co-developed-by: Sean Christopherson <seanjc@google.com> Link: https://lore.kernel.org/r/20240223004258.3104051-5-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 8811565 commit 29e749e

File tree

2 files changed

+41
-6
lines changed

2 files changed

+41
-6
lines changed

tools/testing/selftests/kvm/include/kvm_util_base.h

Lines changed: 23 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
4646
struct userspace_mem_region {
4747
struct kvm_userspace_memory_region2 region;
4848
struct sparsebit *unused_phy_pages;
49+
struct sparsebit *protected_phy_pages;
4950
int fd;
5051
off_t offset;
5152
enum vm_mem_backing_src_type backing_src_type;
@@ -569,6 +570,13 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
569570
uint64_t guest_paddr, uint32_t slot, uint64_t npages,
570571
uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset);
571572

573+
#ifndef vm_arch_has_protected_memory
574+
static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
575+
{
576+
return false;
577+
}
578+
#endif
579+
572580
void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
573581
void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
574582
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
@@ -832,10 +840,23 @@ const char *exit_reason_str(unsigned int exit_reason);
832840

833841
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
834842
uint32_t memslot);
835-
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
836-
vm_paddr_t paddr_min, uint32_t memslot);
843+
vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
844+
vm_paddr_t paddr_min, uint32_t memslot,
845+
bool protected);
837846
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
838847

848+
static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
849+
vm_paddr_t paddr_min, uint32_t memslot)
850+
{
851+
/*
852+
* By default, allocate memory as protected for VMs that support
853+
* protected memory, as the majority of memory for such VMs is
854+
* protected, i.e. using shared memory is effectively opt-in.
855+
*/
856+
return __vm_phy_pages_alloc(vm, num, paddr_min, memslot,
857+
vm_arch_has_protected_memory(vm));
858+
}
859+
839860
/*
840861
* ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also
841862
* loads the test binary into guest memory and creates an IRQ chip (x86 only).

tools/testing/selftests/kvm/lib/kvm_util.c

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -666,6 +666,7 @@ static void __vm_mem_region_delete(struct kvm_vm *vm,
666666
vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region);
667667

668668
sparsebit_free(&region->unused_phy_pages);
669+
sparsebit_free(&region->protected_phy_pages);
669670
ret = munmap(region->mmap_start, region->mmap_size);
670671
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
671672
if (region->fd >= 0) {
@@ -1047,6 +1048,8 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
10471048
}
10481049

10491050
region->unused_phy_pages = sparsebit_alloc();
1051+
if (vm_arch_has_protected_memory(vm))
1052+
region->protected_phy_pages = sparsebit_alloc();
10501053
sparsebit_set_num(region->unused_phy_pages,
10511054
guest_paddr >> vm->page_shift, npages);
10521055
region->region.slot = slot;
@@ -1873,6 +1876,10 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
18731876
region->host_mem);
18741877
fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
18751878
sparsebit_dump(stream, region->unused_phy_pages, 0);
1879+
if (region->protected_phy_pages) {
1880+
fprintf(stream, "%*sprotected_phy_pages: ", indent + 2, "");
1881+
sparsebit_dump(stream, region->protected_phy_pages, 0);
1882+
}
18761883
}
18771884
fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
18781885
sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
@@ -1974,6 +1981,7 @@ const char *exit_reason_str(unsigned int exit_reason)
19741981
* num - number of pages
19751982
* paddr_min - Physical address minimum
19761983
* memslot - Memory region to allocate page from
1984+
* protected - True if the pages will be used as protected/private memory
19771985
*
19781986
* Output Args: None
19791987
*
@@ -1985,8 +1993,9 @@ const char *exit_reason_str(unsigned int exit_reason)
19851993
* and their base address is returned. A TEST_ASSERT failure occurs if
19861994
* not enough pages are available at or above paddr_min.
19871995
*/
1988-
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
1989-
vm_paddr_t paddr_min, uint32_t memslot)
1996+
vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
1997+
vm_paddr_t paddr_min, uint32_t memslot,
1998+
bool protected)
19901999
{
19912000
struct userspace_mem_region *region;
19922001
sparsebit_idx_t pg, base;
@@ -1999,8 +2008,10 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
19992008
paddr_min, vm->page_size);
20002009

20012010
region = memslot2region(vm, memslot);
2002-
base = pg = paddr_min >> vm->page_shift;
2011+
TEST_ASSERT(!protected || region->protected_phy_pages,
2012+
"Region doesn't support protected memory");
20032013

2014+
base = pg = paddr_min >> vm->page_shift;
20042015
do {
20052016
for (; pg < base + num; ++pg) {
20062017
if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
@@ -2019,8 +2030,11 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
20192030
abort();
20202031
}
20212032

2022-
for (pg = base; pg < base + num; ++pg)
2033+
for (pg = base; pg < base + num; ++pg) {
20232034
sparsebit_clear(region->unused_phy_pages, pg);
2035+
if (protected)
2036+
sparsebit_set(region->protected_phy_pages, pg);
2037+
}
20242038

20252039
return base * vm->page_size;
20262040
}

0 commit comments

Comments
 (0)