Skip to content

Commit

Permalink
x86/mm/cpa: Add support for TDX shared memory
Browse files Browse the repository at this point in the history
TDX steals a bit from the physical address and uses it to indicate
whether the page is private to the guest (bit set 0) or unprotected
and shared with the VMM (bit set 1).

AMD SEV uses a similar scheme, repurposing a bit from the physical address
to indicate encrypted or decrypted pages.

The kernel already has the infrastructure to deal with encrypted/decrypted
pages for AMD SEV. Modify the __set_memory_enc_pgtable() and make it
aware about TDX.

After modifying page table entries, the kernel needs to notify VMM about
the change with tdx_hcall_request_gpa_type().

Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Co-developed-by: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com>
Signed-off-by: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com>
Tested-by: Kai Huang <kai.huang@linux.intel.com>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Reviewed-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
  • Loading branch information
kiryl committed Jan 13, 2022
1 parent 95cee05 commit 5364ea9
Show file tree
Hide file tree
Showing 7 changed files with 60 additions and 13 deletions.
2 changes: 1 addition & 1 deletion arch/x86/Kconfig
Expand Up @@ -878,7 +878,7 @@ config INTEL_TDX_GUEST
depends on X86_X2APIC
select ARCH_HAS_CC_PLATFORM
select X86_MCE
select DYNAMIC_PHYSICAL_MASK
select X86_MEM_ENCRYPT
help
Support running as a guest under Intel TDX. Without this support,
the guest kernel can not boot or run under TDX.
Expand Down
8 changes: 8 additions & 0 deletions arch/x86/include/asm/mem_encrypt.h
Expand Up @@ -49,6 +49,8 @@ void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages,

void __init mem_encrypt_free_decrypted_mem(void);

int amd_notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc);

void __init sev_es_init_vc_handling(void);

#define __bss_decrypted __section(".bss..decrypted")
Expand Down Expand Up @@ -82,6 +84,12 @@ early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) {}

static inline void mem_encrypt_free_decrypted_mem(void) { }

static inline int amd_notify_range_enc_status_changed(unsigned long vaddr,
int npages, bool enc)
{
return 0;
}

#define __bss_decrypted

#endif /* CONFIG_AMD_MEM_ENCRYPT */
Expand Down
1 change: 0 additions & 1 deletion arch/x86/include/asm/set_memory.h
Expand Up @@ -83,7 +83,6 @@ int set_pages_rw(struct page *page, int numpages);
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
bool kernel_page_present(struct page *page);
void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc);

extern int kernel_set_to_readonly;

Expand Down
2 changes: 2 additions & 0 deletions arch/x86/kernel/cc_platform.c
Expand Up @@ -20,6 +20,8 @@ static bool intel_cc_platform_has(enum cc_attr attr)
switch (attr) {
case CC_ATTR_GUEST_UNROLL_STRING_IO:
case CC_ATTR_HOTPLUG_DISABLED:
case CC_ATTR_GUEST_TDX:
case CC_ATTR_GUEST_MEM_ENCRYPT:
return true;
default:
return false;
Expand Down
10 changes: 6 additions & 4 deletions arch/x86/mm/mem_encrypt_amd.c
Expand Up @@ -256,7 +256,8 @@ static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot)
return pfn;
}

void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc)
int amd_notify_range_enc_status_changed(unsigned long vaddr, int npages,
bool enc)
{
#ifdef CONFIG_PARAVIRT
unsigned long sz = npages << PAGE_SHIFT;
Expand All @@ -270,7 +271,7 @@ void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc)
kpte = lookup_address(vaddr, &level);
if (!kpte || pte_none(*kpte)) {
WARN_ONCE(1, "kpte lookup for vaddr\n");
return;
return 0;
}

pfn = pg_level_to_pfn(level, kpte, NULL);
Expand All @@ -285,6 +286,7 @@ void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc)
vaddr = (vaddr & pmask) + psize;
}
#endif
return 0;
}

static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
Expand Down Expand Up @@ -392,7 +394,7 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr,

ret = 0;

notify_range_enc_status_changed(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
amd_notify_range_enc_status_changed(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
out:
__flush_tlb_all();
return ret;
Expand All @@ -410,7 +412,7 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)

void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
{
notify_range_enc_status_changed(vaddr, npages, enc);
amd_notify_range_enc_status_changed(vaddr, npages, enc);
}

void __init mem_encrypt_free_decrypted_mem(void)
Expand Down
41 changes: 34 additions & 7 deletions arch/x86/mm/pat/set_memory.c
Expand Up @@ -32,6 +32,7 @@
#include <asm/set_memory.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
#include <asm/tdx.h>

#include "../mm_internal.h"

Expand Down Expand Up @@ -1983,6 +1984,24 @@ int set_memory_global(unsigned long addr, int numpages)
__pgprot(_PAGE_GLOBAL), 0);
}

static pgprot_t pgprot_cc_mask(bool enc)
{
if (enc)
return pgprot_encrypted(__pgprot(0));
else
return pgprot_decrypted(__pgprot(0));
}

static int notify_range_enc_status_changed(unsigned long vaddr, int npages,
bool enc)
{
if (cc_platform_has(CC_ATTR_GUEST_TDX)) {
return tdx_notify_range_enc_status_changed(vaddr, npages, enc);
} else {
return amd_notify_range_enc_status_changed(vaddr, npages, enc);
}
}

/*
* __set_memory_enc_pgtable() is used for the hypervisors that get
* informed about "encryption" status via page tables.
Expand All @@ -1999,18 +2018,28 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
memset(&cpa, 0, sizeof(cpa));
cpa.vaddr = &addr;
cpa.numpages = numpages;
cpa.mask_set = enc ? __pgprot(_PAGE_ENC) : __pgprot(0);
cpa.mask_clr = enc ? __pgprot(0) : __pgprot(_PAGE_ENC);

cpa.mask_set = pgprot_cc_mask(enc);
cpa.mask_clr = pgprot_cc_mask(!enc);

cpa.pgd = init_mm.pgd;

/* Must avoid aliasing mappings in the highmem code */
kmap_flush_unused();
vm_unmap_aliases();

/*
* Before changing the encryption attribute, we need to flush caches.
* Before changing the encryption attribute, flush caches.
*
* For TDX, guest is responsible for flushing caches on private->shared
* transition. VMM is responsible for flushing on shared->private.
*/
cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT));
if (cc_platform_has(CC_ATTR_GUEST_TDX)) {
if (!enc)
cpa_flush(&cpa, 1);
} else {
cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT));
}

ret = __change_page_attr_set_clr(&cpa, 1);

Expand All @@ -2027,9 +2056,7 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
* Notify hypervisor that a given memory range is mapped encrypted
* or decrypted.
*/
notify_range_enc_status_changed(addr, numpages, enc);

return ret;
return notify_range_enc_status_changed(addr, numpages, enc);
}

static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
Expand Down
9 changes: 9 additions & 0 deletions include/linux/cc_platform.h
Expand Up @@ -82,6 +82,15 @@ enum cc_attr {
* Examples include TDX Guest.
*/
CC_ATTR_HOTPLUG_DISABLED,

/**
* @CC_ATTR_GUEST_TDX: Trust Domain Extension Support
*
* The platform/OS is running as a TDX guest/virtual machine.
*
* Examples include Intel TDX.
*/
CC_ATTR_GUEST_TDX = 0x100,
};

#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
Expand Down

0 comments on commit 5364ea9

Please sign in to comment.