Skip to content

Commit b836503

Browse files
committed
KVM: TDX: Fold tdx_sept_drop_private_spte() into tdx_sept_remove_private_spte()
Fold tdx_sept_drop_private_spte() into tdx_sept_remove_private_spte() as a step towards having "remove" be the one and only function that deals with removing/zapping/dropping a SPTE, e.g. to avoid having to differentiate between "zap", "drop", and "remove". Eliminating the "drop" helper also gets rid of what is effectively dead code due to redundant checks, e.g. on an HKID being assigned. No functional change intended. Reviewed-by: Binbin Wu <binbin.wu@linux.intel.com> Reviewed-by: Kai Huang <kai.huang@intel.com> Reviewed-by: Yan Zhao <yan.y.zhao@intel.com> Tested-by: Yan Zhao <yan.y.zhao@intel.com> Tested-by: Kai Huang <kai.huang@intel.com> Link: https://patch.msgid.link/20251030200951.3402865-11-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent e6348c9 commit b836503

File tree

1 file changed

+40
-50
lines changed

1 file changed

+40
-50
lines changed

arch/x86/kvm/vmx/tdx.c

Lines changed: 40 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -1648,55 +1648,6 @@ static int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
16481648
return tdx_mem_page_record_premap_cnt(kvm, gfn, level, pfn);
16491649
}
16501650

1651-
static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
1652-
enum pg_level level, struct page *page)
1653-
{
1654-
int tdx_level = pg_level_to_tdx_sept_level(level);
1655-
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
1656-
gpa_t gpa = gfn_to_gpa(gfn);
1657-
u64 err, entry, level_state;
1658-
1659-
/* TODO: handle large pages. */
1660-
if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
1661-
return -EIO;
1662-
1663-
if (KVM_BUG_ON(!is_hkid_assigned(kvm_tdx), kvm))
1664-
return -EIO;
1665-
1666-
/*
1667-
* When zapping private page, write lock is held. So no race condition
1668-
* with other vcpu sept operation.
1669-
* Race with TDH.VP.ENTER due to (0-step mitigation) and Guest TDCALLs.
1670-
*/
1671-
err = tdh_mem_page_remove(&kvm_tdx->td, gpa, tdx_level, &entry,
1672-
&level_state);
1673-
1674-
if (unlikely(tdx_operand_busy(err))) {
1675-
/*
1676-
* The second retry is expected to succeed after kicking off all
1677-
* other vCPUs and prevent them from invoking TDH.VP.ENTER.
1678-
*/
1679-
tdx_no_vcpus_enter_start(kvm);
1680-
err = tdh_mem_page_remove(&kvm_tdx->td, gpa, tdx_level, &entry,
1681-
&level_state);
1682-
tdx_no_vcpus_enter_stop(kvm);
1683-
}
1684-
1685-
if (KVM_BUG_ON(err, kvm)) {
1686-
pr_tdx_error_2(TDH_MEM_PAGE_REMOVE, err, entry, level_state);
1687-
return -EIO;
1688-
}
1689-
1690-
err = tdh_phymem_page_wbinvd_hkid((u16)kvm_tdx->hkid, page);
1691-
1692-
if (KVM_BUG_ON(err, kvm)) {
1693-
pr_tdx_error(TDH_PHYMEM_PAGE_WBINVD, err);
1694-
return -EIO;
1695-
}
1696-
tdx_quirk_reset_page(page);
1697-
return 0;
1698-
}
1699-
17001651
static int tdx_sept_link_private_spt(struct kvm *kvm, gfn_t gfn,
17011652
enum pg_level level, void *private_spt)
17021653
{
@@ -1858,7 +1809,11 @@ static int tdx_sept_free_private_spt(struct kvm *kvm, gfn_t gfn,
18581809
static int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
18591810
enum pg_level level, kvm_pfn_t pfn)
18601811
{
1812+
int tdx_level = pg_level_to_tdx_sept_level(level);
1813+
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
18611814
struct page *page = pfn_to_page(pfn);
1815+
gpa_t gpa = gfn_to_gpa(gfn);
1816+
u64 err, entry, level_state;
18621817
int ret;
18631818

18641819
/*
@@ -1869,6 +1824,10 @@ static int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
18691824
if (KVM_BUG_ON(!is_hkid_assigned(to_kvm_tdx(kvm)), kvm))
18701825
return -EIO;
18711826

1827+
/* TODO: handle large pages. */
1828+
if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
1829+
return -EIO;
1830+
18721831
ret = tdx_sept_zap_private_spte(kvm, gfn, level, page);
18731832
if (ret <= 0)
18741833
return ret;
@@ -1879,7 +1838,38 @@ static int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
18791838
*/
18801839
tdx_track(kvm);
18811840

1882-
return tdx_sept_drop_private_spte(kvm, gfn, level, page);
1841+
/*
1842+
* When zapping private page, write lock is held. So no race condition
1843+
* with other vcpu sept operation.
1844+
* Race with TDH.VP.ENTER due to (0-step mitigation) and Guest TDCALLs.
1845+
*/
1846+
err = tdh_mem_page_remove(&kvm_tdx->td, gpa, tdx_level, &entry,
1847+
&level_state);
1848+
1849+
if (unlikely(tdx_operand_busy(err))) {
1850+
/*
1851+
* The second retry is expected to succeed after kicking off all
1852+
* other vCPUs and prevent them from invoking TDH.VP.ENTER.
1853+
*/
1854+
tdx_no_vcpus_enter_start(kvm);
1855+
err = tdh_mem_page_remove(&kvm_tdx->td, gpa, tdx_level, &entry,
1856+
&level_state);
1857+
tdx_no_vcpus_enter_stop(kvm);
1858+
}
1859+
1860+
if (KVM_BUG_ON(err, kvm)) {
1861+
pr_tdx_error_2(TDH_MEM_PAGE_REMOVE, err, entry, level_state);
1862+
return -EIO;
1863+
}
1864+
1865+
err = tdh_phymem_page_wbinvd_hkid((u16)kvm_tdx->hkid, page);
1866+
if (KVM_BUG_ON(err, kvm)) {
1867+
pr_tdx_error(TDH_PHYMEM_PAGE_WBINVD, err);
1868+
return -EIO;
1869+
}
1870+
1871+
tdx_quirk_reset_page(page);
1872+
return 0;
18831873
}
18841874

18851875
void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,

0 commit comments

Comments
 (0)