Skip to content

Commit

Permalink
x86/kvm: Use bounce buffers for KVM memory protection
Browse files Browse the repository at this point in the history
Mirror SEV, use SWIOTLB always if KVM memory protection is enabled.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
  • Loading branch information
kiryl authored and intel-lab-lkp committed Oct 20, 2020
1 parent 466d697 commit 0bdb6c3
Show file tree
Hide file tree
Showing 5 changed files with 28 additions and 22 deletions.
1 change: 1 addition & 0 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -806,6 +806,7 @@ config KVM_GUEST
select ARCH_CPUIDLE_HALTPOLL
select X86_HV_CALLBACK_VECTOR
select X86_MEM_ENCRYPT_COMMON
select SWIOTLB
default y
help
This option enables various optimizations for running under the KVM
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/kernel/kvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
#include <linux/kprobes.h>
#include <linux/nmi.h>
#include <linux/swait.h>
#include <linux/swiotlb.h>
#include <asm/timer.h>
#include <asm/cpu.h>
#include <asm/traps.h>
Expand Down Expand Up @@ -759,6 +760,7 @@ static void __init kvm_init_platform(void)
pr_info("KVM memory protection enabled\n");
mem_protected = true;
setup_force_cpu_cap(X86_FEATURE_KVM_MEM_PROTECTED);
swiotlb_force = SWIOTLB_FORCE;
}
}

Expand Down
3 changes: 2 additions & 1 deletion arch/x86/kernel/pci-swiotlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include <asm/dma.h>
#include <asm/xen/swiotlb-xen.h>
#include <asm/iommu_table.h>
#include <asm/kvm_para.h>

int swiotlb __read_mostly;

Expand Down Expand Up @@ -49,7 +50,7 @@ int __init pci_swiotlb_detect_4gb(void)
* buffers are allocated and used for devices that do not support
* the addressing range required for the encryption mask.
*/
if (sme_active())
if (sme_active() || kvm_mem_protected())
swiotlb = 1;

return swiotlb;
Expand Down
21 changes: 0 additions & 21 deletions arch/x86/mm/mem_encrypt.c
Original file line number Diff line number Diff line change
Expand Up @@ -369,24 +369,3 @@ void __init mem_encrypt_free_decrypted_mem(void)

free_init_pages("unused decrypted", vaddr, vaddr_end);
}

/* Architecture __weak replacement functions */
void __init mem_encrypt_init(void)
{
if (!sme_me_mask)
return;

/* Call into SWIOTLB to update the SWIOTLB DMA buffers */
swiotlb_update_mem_attributes();

/*
* With SEV, we need to unroll the rep string I/O instructions.
*/
if (sev_active())
static_branch_enable(&sev_enable_key);

pr_info("AMD %s active\n",
sev_active() ? "Secure Encrypted Virtualization (SEV)"
: "Secure Memory Encryption (SME)");
}

23 changes: 23 additions & 0 deletions arch/x86/mm/mem_encrypt_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,3 +37,26 @@ bool force_dma_unencrypted(struct device *dev)

return false;
}

void __init mem_encrypt_init(void)
{
if (!sme_me_mask && !kvm_mem_protected())
return;

/* Call into SWIOTLB to update the SWIOTLB DMA buffers */
swiotlb_update_mem_attributes();

/*
* With SEV, we need to unroll the rep string I/O instructions.
*/
if (sev_active())
static_branch_enable(&sev_enable_key);

if (sme_me_mask) {
pr_info("AMD %s active\n",
sev_active() ? "Secure Encrypted Virtualization (SEV)"
: "Secure Memory Encryption (SME)");
} else {
pr_info("KVM memory protection enabled\n");
}
}

0 comments on commit 0bdb6c3

Please sign in to comment.