diff --git a/arch/x86/kernel/tdx.c b/arch/x86/kernel/tdx.c index bb41ff6e27af1..f7c4d9f865803 100644 --- a/arch/x86/kernel/tdx.c +++ b/arch/x86/kernel/tdx.c @@ -464,8 +464,33 @@ static bool tdx_read_msr(unsigned int msr, u64 *val) return true; } -static bool tdx_write_msr(unsigned int msr, unsigned int low, - unsigned int high) +/* + * TDX has context switched MSRs and emulated MSRs. The emulated MSRs + * normally trigger a #VE, but that is expensive, which can be avoided + * by doing a direct TDCALL. Unfortunately, this cannot be done for all + * because some MSRs are "context switched" and need WRMSR. + * + * The list for this is unfortunately quite long. To avoid maintaining + * very long switch statements just do a fast path for the few critical + * MSRs that need TDCALL, currently only TSC_DEADLINE. + * + * More can be added as needed. + * + * The others will be handled by the #VE handler as needed. + * See 18.1 "MSR virtualization" in the TDX Module EAS + */ +static bool tdx_fast_tdcall_path_msr(unsigned int msr) +{ + switch (msr) { + case MSR_IA32_TSC_DEADLINE: + return true; + default: + return false; + } +} + +static bool __tdx_write_msr(unsigned int msr, unsigned int low, + unsigned int high) { u64 ret; @@ -480,6 +505,14 @@ static bool tdx_write_msr(unsigned int msr, unsigned int low, return ret ? false : true; } +void notrace tdx_write_msr(unsigned int msr, u32 low, u32 high) +{ + if (tdx_fast_tdcall_path_msr(msr)) + __tdx_write_msr(msr, low, high); + else + native_write_msr(msr, low, high); +} + static bool tdx_handle_cpuid(struct pt_regs *regs) { struct tdx_hypercall_output out; @@ -746,7 +779,7 @@ static bool tdx_virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve) } break; case EXIT_REASON_MSR_WRITE: - ret = tdx_write_msr(regs->cx, regs->ax, regs->dx); + ret = __tdx_write_msr(regs->cx, regs->ax, regs->dx); break; case EXIT_REASON_CPUID: ret = tdx_handle_cpuid(regs); @@ -846,6 +879,8 @@ void __init tdx_early_init(void) swiotlb_force = SWIOTLB_FORCE; + pv_ops.cpu.write_msr = tdx_write_msr; + legacy_pic = &null_legacy_pic; /*