Skip to content

Commit

Permalink
x86/mm/cpa: Add support for TDX shared memory
Browse files Browse the repository at this point in the history
TDX steals a bit from the physical address and uses it to indicate
whether the page is private to the guest (bit set 0) or unprotected
and shared with the VMM (bit set 1).

AMD SEV uses a similar scheme, repurposing a bit from the physical address
to indicate encrypted or decrypted pages.

The kernel already has the infrastructure to deal with encrypted/decrypted
pages for AMD SEV. Modify the __set_memory_enc_pgtable() and make it
aware about TDX.

After modifying page table entries, the kernel needs to notify VMM about
the change with tdx_hcall_request_gpa_type().

Tested-by: Kai Huang <kai.huang@linux.intel.com>
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Reviewed-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com>
  • Loading branch information
kiryl committed Nov 28, 2021
1 parent 1c491cc commit 0813e62
Show file tree
Hide file tree
Showing 2 changed files with 36 additions and 4 deletions.
1 change: 1 addition & 0 deletions arch/x86/kernel/cc_platform.c
Expand Up @@ -20,6 +20,7 @@ static bool intel_cc_platform_has(enum cc_attr attr)
case CC_ATTR_GUEST_UNROLL_STRING_IO:
case CC_ATTR_HOTPLUG_DISABLED:
case CC_ATTR_GUEST_TDX:
case CC_ATTR_GUEST_MEM_ENCRYPT:
return true;
default:
return false;
Expand Down
39 changes: 35 additions & 4 deletions arch/x86/mm/pat/set_memory.c
Expand Up @@ -32,6 +32,7 @@
#include <asm/set_memory.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
#include <asm/tdx.h>

#include "../mm_internal.h"

Expand Down Expand Up @@ -1983,12 +1984,21 @@ int set_memory_global(unsigned long addr, int numpages)
__pgprot(_PAGE_GLOBAL), 0);
}

static pgprot_t pgprot_cc_mask(bool enc)
{
if (enc)
return pgprot_cc_encrypted(__pgprot(0));
else
return pgprot_cc_decrypted(__pgprot(0));
}

/*
* __set_memory_enc_pgtable() is used for the hypervisors that get
* informed about "encryption" status via page tables.
*/
static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
{
enum tdx_map_type map_type;
struct cpa_data cpa;
int ret;

Expand All @@ -1999,18 +2009,29 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
memset(&cpa, 0, sizeof(cpa));
cpa.vaddr = &addr;
cpa.numpages = numpages;
cpa.mask_set = enc ? __pgprot(_PAGE_ENC) : __pgprot(0);
cpa.mask_clr = enc ? __pgprot(0) : __pgprot(_PAGE_ENC);

cpa.mask_set = pgprot_cc_mask(enc);
cpa.mask_clr = pgprot_cc_mask(!enc);
map_type = enc ? TDX_MAP_PRIVATE : TDX_MAP_SHARED;

cpa.pgd = init_mm.pgd;

/* Must avoid aliasing mappings in the highmem code */
kmap_flush_unused();
vm_unmap_aliases();

/*
* Before changing the encryption attribute, we need to flush caches.
* Before changing the encryption attribute, flush caches.
*
* For TDX, guest is responsible for flushing caches on private->shared
* transition. VMM is responsible for flushing on shared->private.
*/
cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT));
if (cc_platform_has(CC_ATTR_GUEST_TDX)) {
if (map_type == TDX_MAP_SHARED)
cpa_flush(&cpa, 1);
} else {
cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT));
}

ret = __change_page_attr_set_clr(&cpa, 1);

Expand All @@ -2023,6 +2044,16 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
*/
cpa_flush(&cpa, 0);

/*
* For TDX Guest, raise hypercall to request memory mapping
* change with the VMM.
*/
if (!ret && cc_platform_has(CC_ATTR_GUEST_TDX)) {
ret = tdx_hcall_request_gpa_type(__pa(addr),
__pa(addr) + numpages * PAGE_SIZE,
map_type);
}

/*
* Notify hypervisor that a given memory range is mapped encrypted
* or decrypted.
Expand Down

0 comments on commit 0813e62

Please sign in to comment.