Skip to content

Commit

Permalink
x86/tdx: Enable direct iomap MMIO optimizations
Browse files Browse the repository at this point in the history
Using the infrastructure added in a previous patch, enable direct
TDCALLs for MMIO accesses through iomap. Add functions for each
of the read and write operations and hook them in using the iomap_mmio
call vector when TDX is active.

This improves the TD virtio performance for disk workloads by about 4%

Signed-off-by: Andi Kleen <ak@linux.intel.com>
  • Loading branch information
Andi Kleen authored and virtuoso committed Jul 15, 2022
1 parent bbf82dc commit 733588e
Show file tree
Hide file tree
Showing 2 changed files with 100 additions and 0 deletions.
1 change: 1 addition & 0 deletions arch/x86/Kconfig
Expand Up @@ -892,6 +892,7 @@ config INTEL_TDX_GUEST
select ARCH_HAS_CC_PLATFORM
select X86_MEM_ENCRYPT
select X86_MCE
select IOMAP_IND_MMIO
help
Support running as a guest under Intel TDX. Without this support,
the guest kernel can not boot or run under TDX.
Expand Down
99 changes: 99 additions & 0 deletions arch/x86/coco/tdx/tdx.c
Expand Up @@ -5,6 +5,7 @@
#define pr_fmt(fmt) "tdx: " fmt

#include <linux/cpufeature.h>
#include <linux/io.h>
#include <linux/platform-feature.h>
#include <linux/random.h>
#include <asm/coco.h>
Expand Down Expand Up @@ -479,6 +480,102 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
return insn.length;
}

static unsigned long tdx_virt_mmio(int size, bool write, unsigned long vaddr,
unsigned long* val)
{
pte_t* pte;
int level;

pte = lookup_address(vaddr, &level);
if (!pte)
return -EIO;

return write ?
mmio_write(size,
(pte_pfn(*pte) << PAGE_SHIFT) +
(vaddr & ~page_level_mask(level)),
*val) :
mmio_read(size,
(pte_pfn(*pte) << PAGE_SHIFT) +
(vaddr & ~page_level_mask(level)),
val);
}

static unsigned char tdx_mmio_readb(void __iomem* addr)
{
unsigned long val;

if (tdx_virt_mmio(1, false, (unsigned long)addr, &val))
return 0xff;
return val;
}

static unsigned short tdx_mmio_readw(void __iomem* addr)
{
unsigned long val;

if (tdx_virt_mmio(2, false, (unsigned long)addr, &val))
return 0xffff;
return val;
}

static unsigned int tdx_mmio_readl(void __iomem* addr)
{
unsigned long val;

if (tdx_virt_mmio(4, false, (unsigned long)addr, &val))
return 0xffffffff;
return val;
}

unsigned long tdx_mmio_readq(void __iomem* addr)
{
unsigned long val;

if (tdx_virt_mmio(8, false, (unsigned long)addr, &val))
return 0xffffffffffffffff;
return val;
}

static void tdx_mmio_writeb(unsigned char v, void __iomem* addr)
{
unsigned long val = v;

tdx_virt_mmio(1, true, (unsigned long)addr, &val);
}

static void tdx_mmio_writew(unsigned short v, void __iomem* addr)
{
unsigned long val = v;

tdx_virt_mmio(2, true, (unsigned long)addr, &val);
}

static void tdx_mmio_writel(unsigned int v, void __iomem* addr)
{
unsigned long val = v;

tdx_virt_mmio(4, true, (unsigned long)addr, &val);
}

static void tdx_mmio_writeq(unsigned long v, void __iomem* addr)
{
unsigned long val = v;

tdx_virt_mmio(8, true, (unsigned long)addr, &val);
}

static const struct iomap_mmio tdx_iomap_mmio = {
.ireadb = tdx_mmio_readb,
.ireadw = tdx_mmio_readw,
.ireadl = tdx_mmio_readl,
.ireadq = tdx_mmio_readq,
.iwriteb = tdx_mmio_writeb,
.iwritew = tdx_mmio_writew,
.iwritel = tdx_mmio_writel,
.iwriteq = tdx_mmio_writeq,
};

static bool handle_in(struct pt_regs *regs, int size, int port)
{
struct tdx_hypercall_args args = {
Expand Down Expand Up @@ -816,6 +913,8 @@ void __init tdx_early_init(void)
*/
random_enable_trust_cpu();

iomap_mmio = &tdx_iomap_mmio;

/*
* Make sure there is a panic if something goes wrong,
* just in case it's some kind of host attack.
Expand Down

0 comments on commit 733588e

Please sign in to comment.