Skip to content

Commit c3c6c9f

Browse files
Lai Jiangshansean-jc
authored andcommitted
KVM: x86/mmu: Move the code out of FNAME(sync_page)'s loop body into mmu.c
Rename mmu->sync_page to mmu->sync_spte and move the code out of FNAME(sync_page)'s loop body into mmu.c. No functionalities change intended. Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com> Link: https://lore.kernel.org/r/20230216154115.710033-6-jiangshanlai@gmail.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 8ef228c commit c3c6c9f

File tree

3 files changed

+76
-76
lines changed

3 files changed

+76
-76
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -439,8 +439,8 @@ struct kvm_mmu {
439439
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
440440
gpa_t gva_or_gpa, u64 access,
441441
struct x86_exception *exception);
442-
int (*sync_page)(struct kvm_vcpu *vcpu,
443-
struct kvm_mmu_page *sp);
442+
int (*sync_spte)(struct kvm_vcpu *vcpu,
443+
struct kvm_mmu_page *sp, int i);
444444
void (*invlpg)(struct kvm_vcpu *vcpu, u64 addr, hpa_t root_hpa);
445445
struct kvm_mmu_root_info root;
446446
union kvm_cpu_role cpu_role;

arch/x86/kvm/mmu/mmu.c

Lines changed: 27 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1934,7 +1934,7 @@ static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
19341934
* differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the
19351935
* reserved bits checks will be wrong, etc...
19361936
*/
1937-
if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_page ||
1937+
if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte ||
19381938
(sp->role.word ^ root_role.word) & ~sync_role_ign.word))
19391939
return false;
19401940

@@ -1943,10 +1943,30 @@ static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
19431943

19441944
static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
19451945
{
1946+
int flush = 0;
1947+
int i;
1948+
19461949
if (!kvm_sync_page_check(vcpu, sp))
19471950
return -1;
19481951

1949-
return vcpu->arch.mmu->sync_page(vcpu, sp);
1952+
for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
1953+
int ret = vcpu->arch.mmu->sync_spte(vcpu, sp, i);
1954+
1955+
if (ret < -1)
1956+
return -1;
1957+
flush |= ret;
1958+
}
1959+
1960+
/*
1961+
* Note, any flush is purely for KVM's correctness, e.g. when dropping
1962+
* an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier
1963+
* unmap or dirty logging event doesn't fail to flush. The guest is
1964+
* responsible for flushing the TLB to ensure any changes in protection
1965+
* bits are recognized, i.e. until the guest flushes or page faults on
1966+
* a relevant address, KVM is architecturally allowed to let vCPUs use
1967+
* cached translations with the old protection bits.
1968+
*/
1969+
return flush;
19501970
}
19511971

19521972
static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
@@ -4504,7 +4524,7 @@ static void nonpaging_init_context(struct kvm_mmu *context)
45044524
{
45054525
context->page_fault = nonpaging_page_fault;
45064526
context->gva_to_gpa = nonpaging_gva_to_gpa;
4507-
context->sync_page = NULL;
4527+
context->sync_spte = NULL;
45084528
context->invlpg = NULL;
45094529
}
45104530

@@ -5095,15 +5115,15 @@ static void paging64_init_context(struct kvm_mmu *context)
50955115
{
50965116
context->page_fault = paging64_page_fault;
50975117
context->gva_to_gpa = paging64_gva_to_gpa;
5098-
context->sync_page = paging64_sync_page;
5118+
context->sync_spte = paging64_sync_spte;
50995119
context->invlpg = paging64_invlpg;
51005120
}
51015121

51025122
static void paging32_init_context(struct kvm_mmu *context)
51035123
{
51045124
context->page_fault = paging32_page_fault;
51055125
context->gva_to_gpa = paging32_gva_to_gpa;
5106-
context->sync_page = paging32_sync_page;
5126+
context->sync_spte = paging32_sync_spte;
51075127
context->invlpg = paging32_invlpg;
51085128
}
51095129

@@ -5192,7 +5212,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
51925212
context->cpu_role.as_u64 = cpu_role.as_u64;
51935213
context->root_role.word = root_role.word;
51945214
context->page_fault = kvm_tdp_page_fault;
5195-
context->sync_page = NULL;
5215+
context->sync_spte = NULL;
51965216
context->invlpg = NULL;
51975217
context->get_guest_pgd = get_cr3;
51985218
context->get_pdptr = kvm_pdptr_read;
@@ -5324,7 +5344,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
53245344

53255345
context->page_fault = ept_page_fault;
53265346
context->gva_to_gpa = ept_gva_to_gpa;
5327-
context->sync_page = ept_sync_page;
5347+
context->sync_spte = ept_sync_spte;
53285348
context->invlpg = ept_invlpg;
53295349

53305350
update_permission_bitmask(context, true);

arch/x86/kvm/mmu/paging_tmpl.h

Lines changed: 47 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -937,87 +937,67 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
937937
* can't change unless all sptes pointing to it are nuked first.
938938
*
939939
* Returns
940-
* < 0: the sp should be zapped
941-
* 0: the sp is synced and no tlb flushing is required
942-
* > 0: the sp is synced and tlb flushing is required
940+
* < 0: failed to sync spte
941+
* 0: the spte is synced and no tlb flushing is required
942+
* > 0: the spte is synced and tlb flushing is required
943943
*/
944-
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
944+
static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
945945
{
946-
int i;
947946
bool host_writable;
948947
gpa_t first_pte_gpa;
949-
bool flush = false;
950-
951-
first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
952-
953-
for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
954-
u64 *sptep, spte;
955-
struct kvm_memory_slot *slot;
956-
unsigned pte_access;
957-
pt_element_t gpte;
958-
gpa_t pte_gpa;
959-
gfn_t gfn;
960-
961-
if (!sp->spt[i])
962-
continue;
948+
u64 *sptep, spte;
949+
struct kvm_memory_slot *slot;
950+
unsigned pte_access;
951+
pt_element_t gpte;
952+
gpa_t pte_gpa;
953+
gfn_t gfn;
963954

964-
pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
955+
if (!sp->spt[i])
956+
return 0;
965957

966-
if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
967-
sizeof(pt_element_t)))
968-
return -1;
958+
first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
959+
pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
969960

970-
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
971-
flush = true;
972-
continue;
973-
}
961+
if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
962+
sizeof(pt_element_t)))
963+
return -1;
974964

975-
gfn = gpte_to_gfn(gpte);
976-
pte_access = sp->role.access;
977-
pte_access &= FNAME(gpte_access)(gpte);
978-
FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
965+
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte))
966+
return 1;
979967

980-
if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access))
981-
continue;
968+
gfn = gpte_to_gfn(gpte);
969+
pte_access = sp->role.access;
970+
pte_access &= FNAME(gpte_access)(gpte);
971+
FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
982972

983-
/*
984-
* Drop the SPTE if the new protections would result in a RWX=0
985-
* SPTE or if the gfn is changing. The RWX=0 case only affects
986-
* EPT with execute-only support, i.e. EPT without an effective
987-
* "present" bit, as all other paging modes will create a
988-
* read-only SPTE if pte_access is zero.
989-
*/
990-
if ((!pte_access && !shadow_present_mask) ||
991-
gfn != kvm_mmu_page_get_gfn(sp, i)) {
992-
drop_spte(vcpu->kvm, &sp->spt[i]);
993-
flush = true;
994-
continue;
995-
}
973+
if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access))
974+
return 0;
996975

997-
/* Update the shadowed access bits in case they changed. */
998-
kvm_mmu_page_set_access(sp, i, pte_access);
976+
/*
977+
* Drop the SPTE if the new protections would result in a RWX=0
978+
* SPTE or if the gfn is changing. The RWX=0 case only affects
979+
* EPT with execute-only support, i.e. EPT without an effective
980+
* "present" bit, as all other paging modes will create a
981+
* read-only SPTE if pte_access is zero.
982+
*/
983+
if ((!pte_access && !shadow_present_mask) ||
984+
gfn != kvm_mmu_page_get_gfn(sp, i)) {
985+
drop_spte(vcpu->kvm, &sp->spt[i]);
986+
return 1;
987+
}
999988

1000-
sptep = &sp->spt[i];
1001-
spte = *sptep;
1002-
host_writable = spte & shadow_host_writable_mask;
1003-
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1004-
make_spte(vcpu, sp, slot, pte_access, gfn,
1005-
spte_to_pfn(spte), spte, true, false,
1006-
host_writable, &spte);
989+
/* Update the shadowed access bits in case they changed. */
990+
kvm_mmu_page_set_access(sp, i, pte_access);
1007991

1008-
flush |= mmu_spte_update(sptep, spte);
1009-
}
992+
sptep = &sp->spt[i];
993+
spte = *sptep;
994+
host_writable = spte & shadow_host_writable_mask;
995+
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
996+
make_spte(vcpu, sp, slot, pte_access, gfn,
997+
spte_to_pfn(spte), spte, true, false,
998+
host_writable, &spte);
1010999

1011-
/*
1012-
* Note, any flush is purely for KVM's correctness, e.g. when dropping
1013-
* an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier
1014-
* unmap or dirty logging event doesn't fail to flush. The guest is
1015-
* responsible for flushing the TLB to ensure any changes in protection
1016-
* bits are recognized, i.e. until the guest flushes or page faults on
1017-
* a relevant address, KVM is architecturally allowed to let vCPUs use
1018-
* cached translations with the old protection bits.
1019-
*/
1020-
return flush;
1000+
return mmu_spte_update(sptep, spte);
10211001
}
10221002

10231003
#undef pt_element_t

0 commit comments

Comments
 (0)