Skip to content

Commit de3cd11

Browse files
Sean Christophersonbonzini
authored andcommitted
KVM: x86: Omit caching logic for always-available GPRs
Except for RSP and RIP, which are held in VMX's VMCS, GPRs are always treated "available and dirtly" on both VMX and SVM, i.e. are unconditionally loaded/saved immediately before/after VM-Enter/VM-Exit. Eliminating the unnecessary caching code reduces the size of KVM by a non-trivial amount, much of which comes from the most common code paths. E.g. on x86_64, kvm_emulate_cpuid() is reduced from 342 to 182 bytes and kvm_emulate_hypercall() from 1362 to 1143, with the total size of KVM dropping by ~1000 bytes. With CONFIG_RETPOLINE=y, the numbers are even more pronounced, e.g.: 353->182, 1418->1172 and well over 2000 bytes. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 0c55671 commit de3cd11

File tree

6 files changed

+105
-80
lines changed

6 files changed

+105
-80
lines changed

arch/x86/kvm/cpuid.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -962,13 +962,13 @@ int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
962962
if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
963963
return 1;
964964

965-
eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
966-
ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
965+
eax = kvm_rax_read(vcpu);
966+
ecx = kvm_rcx_read(vcpu);
967967
kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, true);
968-
kvm_register_write(vcpu, VCPU_REGS_RAX, eax);
969-
kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
970-
kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
971-
kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
968+
kvm_rax_write(vcpu, eax);
969+
kvm_rbx_write(vcpu, ebx);
970+
kvm_rcx_write(vcpu, ecx);
971+
kvm_rdx_write(vcpu, edx);
972972
return kvm_skip_emulated_instruction(vcpu);
973973
}
974974
EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);

arch/x86/kvm/hyperv.c

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1526,10 +1526,10 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
15261526

15271527
longmode = is_64_bit_mode(vcpu);
15281528
if (longmode)
1529-
kvm_register_write(vcpu, VCPU_REGS_RAX, result);
1529+
kvm_rax_write(vcpu, result);
15301530
else {
1531-
kvm_register_write(vcpu, VCPU_REGS_RDX, result >> 32);
1532-
kvm_register_write(vcpu, VCPU_REGS_RAX, result & 0xffffffff);
1531+
kvm_rdx_write(vcpu, result >> 32);
1532+
kvm_rax_write(vcpu, result & 0xffffffff);
15331533
}
15341534
}
15351535

@@ -1602,18 +1602,18 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
16021602
longmode = is_64_bit_mode(vcpu);
16031603

16041604
if (!longmode) {
1605-
param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
1606-
(kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
1607-
ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
1608-
(kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
1609-
outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
1610-
(kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
1605+
param = ((u64)kvm_rdx_read(vcpu) << 32) |
1606+
(kvm_rax_read(vcpu) & 0xffffffff);
1607+
ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
1608+
(kvm_rcx_read(vcpu) & 0xffffffff);
1609+
outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
1610+
(kvm_rsi_read(vcpu) & 0xffffffff);
16111611
}
16121612
#ifdef CONFIG_X86_64
16131613
else {
1614-
param = kvm_register_read(vcpu, VCPU_REGS_RCX);
1615-
ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
1616-
outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
1614+
param = kvm_rcx_read(vcpu);
1615+
ingpa = kvm_rdx_read(vcpu);
1616+
outgpa = kvm_r8_read(vcpu);
16171617
}
16181618
#endif
16191619

arch/x86/kvm/kvm_cache_regs.h

Lines changed: 30 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,34 @@
99
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
1010
| X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)
1111

12+
#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
13+
static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
14+
{ \
15+
return vcpu->arch.regs[VCPU_REGS_##uname]; \
16+
} \
17+
static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
18+
unsigned long val) \
19+
{ \
20+
vcpu->arch.regs[VCPU_REGS_##uname] = val; \
21+
}
22+
BUILD_KVM_GPR_ACCESSORS(rax, RAX)
23+
BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
24+
BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
25+
BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
26+
BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
27+
BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
28+
BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
29+
#ifdef CONFIG_X86_64
30+
BUILD_KVM_GPR_ACCESSORS(r8, R8)
31+
BUILD_KVM_GPR_ACCESSORS(r9, R9)
32+
BUILD_KVM_GPR_ACCESSORS(r10, R10)
33+
BUILD_KVM_GPR_ACCESSORS(r11, R11)
34+
BUILD_KVM_GPR_ACCESSORS(r12, R12)
35+
BUILD_KVM_GPR_ACCESSORS(r13, R13)
36+
BUILD_KVM_GPR_ACCESSORS(r14, R14)
37+
BUILD_KVM_GPR_ACCESSORS(r15, R15)
38+
#endif
39+
1240
static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
1341
enum kvm_reg reg)
1442
{
@@ -83,8 +111,8 @@ static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
83111

84112
static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
85113
{
86-
return (kvm_register_read(vcpu, VCPU_REGS_RAX) & -1u)
87-
| ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
114+
return (kvm_rax_read(vcpu) & -1u)
115+
| ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
88116
}
89117

90118
static inline void enter_guest_mode(struct kvm_vcpu *vcpu)

arch/x86/kvm/svm.c

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2091,7 +2091,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
20912091
init_vmcb(svm);
20922092

20932093
kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, true);
2094-
kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
2094+
kvm_rdx_write(vcpu, eax);
20952095

20962096
if (kvm_vcpu_apicv_active(vcpu) && !init_event)
20972097
avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
@@ -3388,7 +3388,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
33883388
} else {
33893389
(void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
33903390
}
3391-
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
3391+
kvm_rax_write(&svm->vcpu, hsave->save.rax);
33923392
kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
33933393
kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
33943394
svm->vmcb->save.dr7 = 0;
@@ -3496,7 +3496,7 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
34963496
kvm_mmu_reset_context(&svm->vcpu);
34973497

34983498
svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
3499-
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
3499+
kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
35003500
kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
35013501
kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
35023502

@@ -3787,19 +3787,19 @@ static int invlpga_interception(struct vcpu_svm *svm)
37873787
{
37883788
struct kvm_vcpu *vcpu = &svm->vcpu;
37893789

3790-
trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX),
3791-
kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
3790+
trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu),
3791+
kvm_rax_read(&svm->vcpu));
37923792

37933793
/* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
3794-
kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
3794+
kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu));
37953795

37963796
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
37973797
return kvm_skip_emulated_instruction(&svm->vcpu);
37983798
}
37993799

38003800
static int skinit_interception(struct vcpu_svm *svm)
38013801
{
3802-
trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
3802+
trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu));
38033803

38043804
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
38053805
return 1;
@@ -3813,7 +3813,7 @@ static int wbinvd_interception(struct vcpu_svm *svm)
38133813
static int xsetbv_interception(struct vcpu_svm *svm)
38143814
{
38153815
u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
3816-
u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
3816+
u32 index = kvm_rcx_read(&svm->vcpu);
38173817

38183818
if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
38193819
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
@@ -4209,7 +4209,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
42094209

42104210
static int rdmsr_interception(struct vcpu_svm *svm)
42114211
{
4212-
u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
4212+
u32 ecx = kvm_rcx_read(&svm->vcpu);
42134213
struct msr_data msr_info;
42144214

42154215
msr_info.index = ecx;
@@ -4221,10 +4221,8 @@ static int rdmsr_interception(struct vcpu_svm *svm)
42214221
} else {
42224222
trace_kvm_msr_read(ecx, msr_info.data);
42234223

4224-
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX,
4225-
msr_info.data & 0xffffffff);
4226-
kvm_register_write(&svm->vcpu, VCPU_REGS_RDX,
4227-
msr_info.data >> 32);
4224+
kvm_rax_write(&svm->vcpu, msr_info.data & 0xffffffff);
4225+
kvm_rdx_write(&svm->vcpu, msr_info.data >> 32);
42284226
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
42294227
return kvm_skip_emulated_instruction(&svm->vcpu);
42304228
}
@@ -4418,7 +4416,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
44184416
static int wrmsr_interception(struct vcpu_svm *svm)
44194417
{
44204418
struct msr_data msr;
4421-
u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
4419+
u32 ecx = kvm_rcx_read(&svm->vcpu);
44224420
u64 data = kvm_read_edx_eax(&svm->vcpu);
44234421

44244422
msr.data = data;

arch/x86/kvm/vmx/vmx.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4926,7 +4926,7 @@ static int handle_wbinvd(struct kvm_vcpu *vcpu)
49264926
static int handle_xsetbv(struct kvm_vcpu *vcpu)
49274927
{
49284928
u64 new_bv = kvm_read_edx_eax(vcpu);
4929-
u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
4929+
u32 index = kvm_rcx_read(vcpu);
49304930

49314931
if (kvm_set_xcr(vcpu, index, new_bv) == 0)
49324932
return kvm_skip_emulated_instruction(vcpu);

arch/x86/kvm/x86.c

Lines changed: 44 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -1096,15 +1096,15 @@ EXPORT_SYMBOL_GPL(kvm_get_dr);
10961096

10971097
bool kvm_rdpmc(struct kvm_vcpu *vcpu)
10981098
{
1099-
u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
1099+
u32 ecx = kvm_rcx_read(vcpu);
11001100
u64 data;
11011101
int err;
11021102

11031103
err = kvm_pmu_rdpmc(vcpu, ecx, &data);
11041104
if (err)
11051105
return err;
1106-
kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
1107-
kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
1106+
kvm_rax_write(vcpu, (u32)data);
1107+
kvm_rdx_write(vcpu, data >> 32);
11081108
return err;
11091109
}
11101110
EXPORT_SYMBOL_GPL(kvm_rdpmc);
@@ -6586,7 +6586,7 @@ static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
65866586
static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
65876587
unsigned short port)
65886588
{
6589-
unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
6589+
unsigned long val = kvm_rax_read(vcpu);
65906590
int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
65916591
size, port, &val, 1);
65926592

@@ -6610,16 +6610,15 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
66106610
}
66116611

66126612
/* For size less than 4 we merge, else we zero extend */
6613-
val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
6614-
: 0;
6613+
val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0;
66156614

66166615
/*
66176616
* Since vcpu->arch.pio.count == 1 let emulator_pio_in_emulated perform
66186617
* the copy and tracing
66196618
*/
66206619
emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, vcpu->arch.pio.size,
66216620
vcpu->arch.pio.port, &val, 1);
6622-
kvm_register_write(vcpu, VCPU_REGS_RAX, val);
6621+
kvm_rax_write(vcpu, val);
66236622

66246623
return kvm_skip_emulated_instruction(vcpu);
66256624
}
@@ -6631,12 +6630,12 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
66316630
int ret;
66326631

66336632
/* For size less than 4 we merge, else we zero extend */
6634-
val = (size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX) : 0;
6633+
val = (size < 4) ? kvm_rax_read(vcpu) : 0;
66356634

66366635
ret = emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, size, port,
66376636
&val, 1);
66386637
if (ret) {
6639-
kvm_register_write(vcpu, VCPU_REGS_RAX, val);
6638+
kvm_rax_write(vcpu, val);
66406639
return ret;
66416640
}
66426641

@@ -7151,11 +7150,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
71517150
if (kvm_hv_hypercall_enabled(vcpu->kvm))
71527151
return kvm_hv_hypercall(vcpu);
71537152

7154-
nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
7155-
a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
7156-
a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
7157-
a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
7158-
a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
7153+
nr = kvm_rax_read(vcpu);
7154+
a0 = kvm_rbx_read(vcpu);
7155+
a1 = kvm_rcx_read(vcpu);
7156+
a2 = kvm_rdx_read(vcpu);
7157+
a3 = kvm_rsi_read(vcpu);
71597158

71607159
trace_kvm_hypercall(nr, a0, a1, a2, a3);
71617160

@@ -7196,7 +7195,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
71967195
out:
71977196
if (!op_64_bit)
71987197
ret = (u32)ret;
7199-
kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
7198+
kvm_rax_write(vcpu, ret);
72007199

72017200
++vcpu->stat.hypercalls;
72027201
return kvm_skip_emulated_instruction(vcpu);
@@ -8285,23 +8284,23 @@ static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
82858284
emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt);
82868285
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
82878286
}
8288-
regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
8289-
regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
8290-
regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
8291-
regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
8292-
regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
8293-
regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
8287+
regs->rax = kvm_rax_read(vcpu);
8288+
regs->rbx = kvm_rbx_read(vcpu);
8289+
regs->rcx = kvm_rcx_read(vcpu);
8290+
regs->rdx = kvm_rdx_read(vcpu);
8291+
regs->rsi = kvm_rsi_read(vcpu);
8292+
regs->rdi = kvm_rdi_read(vcpu);
82948293
regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
8295-
regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
8294+
regs->rbp = kvm_rbp_read(vcpu);
82968295
#ifdef CONFIG_X86_64
8297-
regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
8298-
regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
8299-
regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
8300-
regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
8301-
regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
8302-
regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
8303-
regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
8304-
regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
8296+
regs->r8 = kvm_r8_read(vcpu);
8297+
regs->r9 = kvm_r9_read(vcpu);
8298+
regs->r10 = kvm_r10_read(vcpu);
8299+
regs->r11 = kvm_r11_read(vcpu);
8300+
regs->r12 = kvm_r12_read(vcpu);
8301+
regs->r13 = kvm_r13_read(vcpu);
8302+
regs->r14 = kvm_r14_read(vcpu);
8303+
regs->r15 = kvm_r15_read(vcpu);
83058304
#endif
83068305

83078306
regs->rip = kvm_rip_read(vcpu);
@@ -8321,23 +8320,23 @@ static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
83218320
vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
83228321
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
83238322

8324-
kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
8325-
kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
8326-
kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
8327-
kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
8328-
kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
8329-
kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
8323+
kvm_rax_write(vcpu, regs->rax);
8324+
kvm_rbx_write(vcpu, regs->rbx);
8325+
kvm_rcx_write(vcpu, regs->rcx);
8326+
kvm_rdx_write(vcpu, regs->rdx);
8327+
kvm_rsi_write(vcpu, regs->rsi);
8328+
kvm_rdi_write(vcpu, regs->rdi);
83308329
kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
8331-
kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
8330+
kvm_rbp_write(vcpu, regs->rbp);
83328331
#ifdef CONFIG_X86_64
8333-
kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
8334-
kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
8335-
kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
8336-
kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
8337-
kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
8338-
kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
8339-
kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
8340-
kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
8332+
kvm_r8_write(vcpu, regs->r8);
8333+
kvm_r9_write(vcpu, regs->r9);
8334+
kvm_r10_write(vcpu, regs->r10);
8335+
kvm_r11_write(vcpu, regs->r11);
8336+
kvm_r12_write(vcpu, regs->r12);
8337+
kvm_r13_write(vcpu, regs->r13);
8338+
kvm_r14_write(vcpu, regs->r14);
8339+
kvm_r15_write(vcpu, regs->r15);
83418340
#endif
83428341

83438342
kvm_rip_write(vcpu, regs->rip);

0 commit comments

Comments
 (0)