Skip to content

Commit

Permalink
x86/mm/cpa: Generalize __set_memory_enc_pgtable()
Browse files Browse the repository at this point in the history
The kernel provides infrastructure to set or clear the encryption mask
from the pages for AMD SEV, but TDX requires few tweaks.

- TDX and SEV have different requirements to the cache and TLB
  flushing.

- TDX has own routine to notify VMM about page encryption status change.

Modify __set_memory_enc_pgtable() and make it flexible enough to cover
both AMD SEV and Intel TDX. The AMD-specific behavior is isolated in the
callbacks under x86_platform.guest. TDX will provide own version of said
callbacks.

  [ bp: Beat into submission. ]

Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Link: https://lore.kernel.org/r/20220223043528.2093214-1-brijesh.singh@amd.com
  • Loading branch information
codomania authored and Borislav Petkov committed Feb 23, 2022
1 parent b577f54 commit 1e8c597
Show file tree
Hide file tree
Showing 5 changed files with 91 additions and 34 deletions.
1 change: 0 additions & 1 deletion arch/x86/include/asm/set_memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,6 @@ int set_pages_rw(struct page *page, int numpages);
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
bool kernel_page_present(struct page *page);
void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc);

extern int kernel_set_to_readonly;

Expand Down
16 changes: 16 additions & 0 deletions arch/x86/include/asm/x86_init.h
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,21 @@ struct x86_init_acpi {
void (*reduced_hw_early_init)(void);
};

/**
* struct x86_guest - Functions used by misc guest incarnations like SEV, TDX, etc.
*
* @enc_status_change_prepare Notify HV before the encryption status of a range is changed
* @enc_status_change_finish Notify HV after the encryption status of a range is changed
* @enc_tlb_flush_required Returns true if a TLB flush is needed before changing page encryption status
* @enc_cache_flush_required Returns true if a cache flush is needed before changing page encryption status
*/
struct x86_guest {
void (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc);
bool (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc);
bool (*enc_tlb_flush_required)(bool enc);
bool (*enc_cache_flush_required)(void);
};

/**
* struct x86_init_ops - functions for platform specific setup
*
Expand Down Expand Up @@ -287,6 +302,7 @@ struct x86_platform_ops {
struct x86_legacy_features legacy;
void (*set_legacy_features)(void);
struct x86_hyper_runtime hyper;
struct x86_guest guest;
};

struct x86_apic_ops {
Expand Down
16 changes: 14 additions & 2 deletions arch/x86/kernel/x86_init.c
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,11 @@ struct x86_cpuinit_ops x86_cpuinit = {

static void default_nmi_init(void) { };

static void enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool enc) { }
static bool enc_status_change_finish_noop(unsigned long vaddr, int npages, bool enc) { return false; }
static bool enc_tlb_flush_required_noop(bool enc) { return false; }
static bool enc_cache_flush_required_noop(void) { return false; }

struct x86_platform_ops x86_platform __ro_after_init = {
.calibrate_cpu = native_calibrate_cpu_early,
.calibrate_tsc = native_calibrate_tsc,
Expand All @@ -138,9 +143,16 @@ struct x86_platform_ops x86_platform __ro_after_init = {
.is_untracked_pat_range = is_ISA_range,
.nmi_init = default_nmi_init,
.get_nmi_reason = default_get_nmi_reason,
.save_sched_clock_state = tsc_save_sched_clock_state,
.restore_sched_clock_state = tsc_restore_sched_clock_state,
.save_sched_clock_state = tsc_save_sched_clock_state,
.restore_sched_clock_state = tsc_restore_sched_clock_state,
.hyper.pin_vcpu = x86_op_int_noop,

.guest = {
.enc_status_change_prepare = enc_status_change_prepare_noop,
.enc_status_change_finish = enc_status_change_finish_noop,
.enc_tlb_flush_required = enc_tlb_flush_required_noop,
.enc_cache_flush_required = enc_cache_flush_required_noop,
},
};

EXPORT_SYMBOL_GPL(x86_platform);
Expand Down
72 changes: 50 additions & 22 deletions arch/x86/mm/mem_encrypt_amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -177,25 +177,6 @@ void __init sme_map_bootdata(char *real_mode_data)
__sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true);
}

void __init sme_early_init(void)
{
unsigned int i;

if (!sme_me_mask)
return;

early_pmd_flags = __sme_set(early_pmd_flags);

__supported_pte_mask = __sme_set(__supported_pte_mask);

/* Update the protection map with memory encryption mask */
for (i = 0; i < ARRAY_SIZE(protection_map); i++)
protection_map[i] = pgprot_encrypted(protection_map[i]);

if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
swiotlb_force = SWIOTLB_FORCE;
}

void __init sev_setup_arch(void)
{
phys_addr_t total_mem = memblock_phys_mem_size();
Expand Down Expand Up @@ -256,7 +237,17 @@ static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot)
return pfn;
}

void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc)
static bool amd_enc_tlb_flush_required(bool enc)
{
return true;
}

static bool amd_enc_cache_flush_required(void)
{
return !cpu_feature_enabled(X86_FEATURE_SME_COHERENT);
}

static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
{
#ifdef CONFIG_PARAVIRT
unsigned long sz = npages << PAGE_SHIFT;
Expand Down Expand Up @@ -287,6 +278,19 @@ void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc)
#endif
}

static void amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
{
}

/* Return true unconditionally: return value doesn't matter for the SEV side */
static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc)
{
if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
enc_dec_hypercall(vaddr, npages, enc);

return true;
}

static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
{
pgprot_t old_prot, new_prot;
Expand Down Expand Up @@ -392,7 +396,7 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr,

ret = 0;

notify_range_enc_status_changed(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
early_set_mem_enc_dec_hypercall(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
out:
__flush_tlb_all();
return ret;
Expand All @@ -410,7 +414,31 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)

void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
{
notify_range_enc_status_changed(vaddr, npages, enc);
enc_dec_hypercall(vaddr, npages, enc);
}

void __init sme_early_init(void)
{
unsigned int i;

if (!sme_me_mask)
return;

early_pmd_flags = __sme_set(early_pmd_flags);

__supported_pte_mask = __sme_set(__supported_pte_mask);

/* Update the protection map with memory encryption mask */
for (i = 0; i < ARRAY_SIZE(protection_map); i++)
protection_map[i] = pgprot_encrypted(protection_map[i]);

if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
swiotlb_force = SWIOTLB_FORCE;

x86_platform.guest.enc_status_change_prepare = amd_enc_status_change_prepare;
x86_platform.guest.enc_status_change_finish = amd_enc_status_change_finish;
x86_platform.guest.enc_tlb_flush_required = amd_enc_tlb_flush_required;
x86_platform.guest.enc_cache_flush_required = amd_enc_cache_flush_required;
}

void __init mem_encrypt_free_decrypted_mem(void)
Expand Down
20 changes: 11 additions & 9 deletions arch/x86/mm/pat/set_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -2008,10 +2008,12 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
kmap_flush_unused();
vm_unmap_aliases();

/*
* Before changing the encryption attribute, we need to flush caches.
*/
cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT));
/* Flush the caches as needed before changing the encryption attribute. */
if (x86_platform.guest.enc_tlb_flush_required(enc))
cpa_flush(&cpa, x86_platform.guest.enc_cache_flush_required());

/* Notify hypervisor that we are about to set/clr encryption attribute. */
x86_platform.guest.enc_status_change_prepare(addr, numpages, enc);

ret = __change_page_attr_set_clr(&cpa, 1);

Expand All @@ -2024,11 +2026,11 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
*/
cpa_flush(&cpa, 0);

/*
* Notify hypervisor that a given memory range is mapped encrypted
* or decrypted.
*/
notify_range_enc_status_changed(addr, numpages, enc);
/* Notify hypervisor that we have successfully set/clr encryption attribute. */
if (!ret) {
if (!x86_platform.guest.enc_status_change_finish(addr, numpages, enc))
ret = -EIO;
}

return ret;
}
Expand Down

0 comments on commit 1e8c597

Please sign in to comment.