|
12 | 12 | #include "vmx.h" |
13 | 13 | #include "mmu/spte.h" |
14 | 14 | #include "common.h" |
| 15 | +#include <trace/events/kvm.h> |
| 16 | +#include "trace.h" |
15 | 17 |
|
16 | 18 | #pragma GCC poison to_vmx |
17 | 19 |
|
@@ -660,6 +662,66 @@ void tdx_vcpu_free(struct kvm_vcpu *vcpu) |
660 | 662 | tdx->state = VCPU_TD_STATE_UNINITIALIZED; |
661 | 663 | } |
662 | 664 |
|
| 665 | +int tdx_vcpu_pre_run(struct kvm_vcpu *vcpu) |
| 666 | +{ |
| 667 | + if (unlikely(to_tdx(vcpu)->state != VCPU_TD_STATE_INITIALIZED || |
| 668 | + to_kvm_tdx(vcpu->kvm)->state != TD_STATE_RUNNABLE)) |
| 669 | + return -EINVAL; |
| 670 | + |
| 671 | + return 1; |
| 672 | +} |
| 673 | + |
| 674 | +static noinstr void tdx_vcpu_enter_exit(struct kvm_vcpu *vcpu) |
| 675 | +{ |
| 676 | + struct vcpu_tdx *tdx = to_tdx(vcpu); |
| 677 | + |
| 678 | + guest_state_enter_irqoff(); |
| 679 | + |
| 680 | + tdx->vp_enter_ret = tdh_vp_enter(&tdx->vp, &tdx->vp_enter_args); |
| 681 | + |
| 682 | + guest_state_exit_irqoff(); |
| 683 | +} |
| 684 | + |
| 685 | +#define TDX_REGS_AVAIL_SET (BIT_ULL(VCPU_EXREG_EXIT_INFO_1) | \ |
| 686 | + BIT_ULL(VCPU_EXREG_EXIT_INFO_2) | \ |
| 687 | + BIT_ULL(VCPU_REGS_RAX) | \ |
| 688 | + BIT_ULL(VCPU_REGS_RBX) | \ |
| 689 | + BIT_ULL(VCPU_REGS_RCX) | \ |
| 690 | + BIT_ULL(VCPU_REGS_RDX) | \ |
| 691 | + BIT_ULL(VCPU_REGS_RBP) | \ |
| 692 | + BIT_ULL(VCPU_REGS_RSI) | \ |
| 693 | + BIT_ULL(VCPU_REGS_RDI) | \ |
| 694 | + BIT_ULL(VCPU_REGS_R8) | \ |
| 695 | + BIT_ULL(VCPU_REGS_R9) | \ |
| 696 | + BIT_ULL(VCPU_REGS_R10) | \ |
| 697 | + BIT_ULL(VCPU_REGS_R11) | \ |
| 698 | + BIT_ULL(VCPU_REGS_R12) | \ |
| 699 | + BIT_ULL(VCPU_REGS_R13) | \ |
| 700 | + BIT_ULL(VCPU_REGS_R14) | \ |
| 701 | + BIT_ULL(VCPU_REGS_R15)) |
| 702 | + |
| 703 | +fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit) |
| 704 | +{ |
| 705 | + /* |
| 706 | + * force_immediate_exit requires vCPU entering for events injection with |
| 707 | + * an immediately exit followed. But The TDX module doesn't guarantee |
| 708 | + * entry, it's already possible for KVM to _think_ it completely entry |
| 709 | + * to the guest without actually having done so. |
| 710 | + * Since KVM never needs to force an immediate exit for TDX, and can't |
| 711 | + * do direct injection, just warn on force_immediate_exit. |
| 712 | + */ |
| 713 | + WARN_ON_ONCE(force_immediate_exit); |
| 714 | + |
| 715 | + trace_kvm_entry(vcpu, force_immediate_exit); |
| 716 | + |
| 717 | + tdx_vcpu_enter_exit(vcpu); |
| 718 | + |
| 719 | + vcpu->arch.regs_avail &= TDX_REGS_AVAIL_SET; |
| 720 | + |
| 721 | + trace_kvm_exit(vcpu, KVM_ISA_VMX); |
| 722 | + |
| 723 | + return EXIT_FASTPATH_NONE; |
| 724 | +} |
663 | 725 |
|
664 | 726 | void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int pgd_level) |
665 | 727 | { |
|
0 commit comments