Skip to content

Commit ace4f48

Browse files
Xiangyang Wulijinxia
authored andcommitted
HV:treewide:rename vcpu_arch data structure
For data structure types "struct vcpu_arch", its name shall follow Naming convention. Naming convention rule:If the data structure type is used by multi modules, its corresponding logic resource is exposed to external components (such as SOS, UOS), and its name meaning is simplistic (such as vcpu, vm), its name needs prefix "acrn_". Variable name can be shortened from its data structure type name. The following udpates are made: struct vcpu_arch arch_vcpu-->struct acrn_vcpu_arch arch Tracked-On: #861 Signed-off-by: Xiangyang Wu <xiangyang.wu@linux.intel.com>
1 parent fa26a16 commit ace4f48

File tree

16 files changed

+136
-136
lines changed

16 files changed

+136
-136
lines changed

hypervisor/arch/x86/ept.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
3030
void *eptp;
3131
struct acrn_vcpu *vcpu = vcpu_from_pid(vm, get_cpu_id());
3232

33-
if ((vcpu != NULL) && (vcpu->arch_vcpu.cur_context == SECURE_WORLD)) {
33+
if ((vcpu != NULL) && (vcpu->arch.cur_context == SECURE_WORLD)) {
3434
eptp = vm->arch_vm.sworld_eptp;
3535
} else {
3636
eptp = vm->arch_vm.nworld_eptp;
@@ -80,7 +80,7 @@ int ept_violation_vmexit_handler(struct acrn_vcpu *vcpu)
8080
struct mmio_request *mmio_req = &io_req->reqs.mmio;
8181

8282
/* Handle page fault from guest */
83-
exit_qual = vcpu->arch_vcpu.exit_qualification;
83+
exit_qual = vcpu->arch.exit_qualification;
8484

8585
io_req->type = REQ_MMIO;
8686

hypervisor/arch/x86/guest/instr_emul.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1631,7 +1631,7 @@ static int vmm_emulate_instruction(struct instr_emul_ctxt *ctxt)
16311631
static int vie_init(struct instr_emul_vie *vie, struct acrn_vcpu *vcpu)
16321632
{
16331633
uint64_t guest_rip_gva = vcpu_get_rip(vcpu);
1634-
uint32_t inst_len = vcpu->arch_vcpu.inst_len;
1634+
uint32_t inst_len = vcpu->arch.inst_len;
16351635
uint32_t err_code;
16361636
uint64_t fault_addr;
16371637
int ret;

hypervisor/arch/x86/guest/vcpu.c

Lines changed: 43 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -13,23 +13,23 @@ vm_sw_loader_t vm_sw_loader;
1313
inline uint64_t vcpu_get_gpreg(const struct acrn_vcpu *vcpu, uint32_t reg)
1414
{
1515
const struct run_context *ctx =
16-
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
16+
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
1717

1818
return ctx->guest_cpu_regs.longs[reg];
1919
}
2020

2121
inline void vcpu_set_gpreg(struct acrn_vcpu *vcpu, uint32_t reg, uint64_t val)
2222
{
2323
struct run_context *ctx =
24-
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
24+
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
2525

2626
ctx->guest_cpu_regs.longs[reg] = val;
2727
}
2828

2929
inline uint64_t vcpu_get_rip(struct acrn_vcpu *vcpu)
3030
{
3131
struct run_context *ctx =
32-
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
32+
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
3333

3434
if (bitmap_test(CPU_REG_RIP, &vcpu->reg_updated) == 0 &&
3535
bitmap_test_and_set_lock(CPU_REG_RIP, &vcpu->reg_cached) == 0)
@@ -39,22 +39,22 @@ inline uint64_t vcpu_get_rip(struct acrn_vcpu *vcpu)
3939

4040
inline void vcpu_set_rip(struct acrn_vcpu *vcpu, uint64_t val)
4141
{
42-
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.rip = val;
42+
vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.rip = val;
4343
bitmap_set_lock(CPU_REG_RIP, &vcpu->reg_updated);
4444
}
4545

4646
inline uint64_t vcpu_get_rsp(struct acrn_vcpu *vcpu)
4747
{
4848
struct run_context *ctx =
49-
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
49+
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
5050

5151
return ctx->guest_cpu_regs.regs.rsp;
5252
}
5353

5454
inline void vcpu_set_rsp(struct acrn_vcpu *vcpu, uint64_t val)
5555
{
5656
struct run_context *ctx =
57-
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
57+
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
5858

5959
ctx->guest_cpu_regs.regs.rsp = val;
6060
bitmap_set_lock(CPU_REG_RSP, &vcpu->reg_updated);
@@ -63,7 +63,7 @@ inline void vcpu_set_rsp(struct acrn_vcpu *vcpu, uint64_t val)
6363
inline uint64_t vcpu_get_efer(struct acrn_vcpu *vcpu)
6464
{
6565
struct run_context *ctx =
66-
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
66+
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
6767

6868
if (bitmap_test(CPU_REG_EFER, &vcpu->reg_updated) == 0 &&
6969
bitmap_test_and_set_lock(CPU_REG_EFER, &vcpu->reg_cached) == 0)
@@ -73,15 +73,15 @@ inline uint64_t vcpu_get_efer(struct acrn_vcpu *vcpu)
7373

7474
inline void vcpu_set_efer(struct acrn_vcpu *vcpu, uint64_t val)
7575
{
76-
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.ia32_efer
76+
vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.ia32_efer
7777
= val;
7878
bitmap_set_lock(CPU_REG_EFER, &vcpu->reg_updated);
7979
}
8080

8181
inline uint64_t vcpu_get_rflags(struct acrn_vcpu *vcpu)
8282
{
8383
struct run_context *ctx =
84-
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
84+
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
8585

8686
if (bitmap_test(CPU_REG_RFLAGS, &vcpu->reg_updated) == 0 &&
8787
bitmap_test_and_set_lock(CPU_REG_RFLAGS,
@@ -92,7 +92,7 @@ inline uint64_t vcpu_get_rflags(struct acrn_vcpu *vcpu)
9292

9393
inline void vcpu_set_rflags(struct acrn_vcpu *vcpu, uint64_t val)
9494
{
95-
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.rflags =
95+
vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.rflags =
9696
val;
9797
bitmap_set_lock(CPU_REG_RFLAGS, &vcpu->reg_updated);
9898
}
@@ -101,7 +101,7 @@ inline uint64_t vcpu_get_cr0(struct acrn_vcpu *vcpu)
101101
{
102102
uint64_t mask;
103103
struct run_context *ctx =
104-
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
104+
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
105105

106106
if (bitmap_test_and_set_lock(CPU_REG_CR0, &vcpu->reg_cached) == 0) {
107107
mask = exec_vmread(VMX_CR0_MASK);
@@ -119,19 +119,19 @@ inline void vcpu_set_cr0(struct acrn_vcpu *vcpu, uint64_t val)
119119
inline uint64_t vcpu_get_cr2(struct acrn_vcpu *vcpu)
120120
{
121121
return vcpu->
122-
arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.cr2;
122+
arch.contexts[vcpu->arch.cur_context].run_ctx.cr2;
123123
}
124124

125125
inline void vcpu_set_cr2(struct acrn_vcpu *vcpu, uint64_t val)
126126
{
127-
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.cr2 = val;
127+
vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.cr2 = val;
128128
}
129129

130130
inline uint64_t vcpu_get_cr4(struct acrn_vcpu *vcpu)
131131
{
132132
uint64_t mask;
133133
struct run_context *ctx =
134-
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
134+
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
135135

136136
if (bitmap_test_and_set_lock(CPU_REG_CR4, &vcpu->reg_cached) == 0) {
137137
mask = exec_vmread(VMX_CR4_MASK);
@@ -148,13 +148,13 @@ inline void vcpu_set_cr4(struct acrn_vcpu *vcpu, uint64_t val)
148148

149149
inline uint64_t vcpu_get_pat_ext(const struct acrn_vcpu *vcpu)
150150
{
151-
return vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].
151+
return vcpu->arch.contexts[vcpu->arch.cur_context].
152152
ext_ctx.ia32_pat;
153153
}
154154

155155
inline void vcpu_set_pat_ext(struct acrn_vcpu *vcpu, uint64_t val)
156156
{
157-
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ext_ctx.ia32_pat
157+
vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx.ia32_pat
158158
= val;
159159
}
160160

@@ -168,13 +168,13 @@ static void set_vcpu_mode(struct acrn_vcpu *vcpu, uint32_t cs_attr, uint64_t ia3
168168
{
169169
if (ia32_efer & MSR_IA32_EFER_LMA_BIT) {
170170
if (cs_attr & 0x2000) /* CS.L = 1 */
171-
vcpu->arch_vcpu.cpu_mode = CPU_MODE_64BIT;
171+
vcpu->arch.cpu_mode = CPU_MODE_64BIT;
172172
else
173-
vcpu->arch_vcpu.cpu_mode = CPU_MODE_COMPATIBILITY;
173+
vcpu->arch.cpu_mode = CPU_MODE_COMPATIBILITY;
174174
} else if (cr0 & CR0_PE) {
175-
vcpu->arch_vcpu.cpu_mode = CPU_MODE_PROTECTED;
175+
vcpu->arch.cpu_mode = CPU_MODE_PROTECTED;
176176
} else {
177-
vcpu->arch_vcpu.cpu_mode = CPU_MODE_REAL;
177+
vcpu->arch.cpu_mode = CPU_MODE_REAL;
178178
}
179179
}
180180

@@ -186,8 +186,8 @@ void set_vcpu_regs(struct acrn_vcpu *vcpu, struct acrn_vcpu_regs *vcpu_regs)
186186
struct segment_sel *seg;
187187
uint32_t limit, attr;
188188

189-
ectx = &(vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ext_ctx);
190-
ctx = &(vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx);
189+
ectx = &(vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx);
190+
ctx = &(vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx);
191191

192192
/* NOTE:
193193
* This is to set the attr and limit to default value.
@@ -289,7 +289,7 @@ void set_ap_entry(struct acrn_vcpu *vcpu, uint64_t entry)
289289
{
290290
struct ext_context *ectx;
291291

292-
ectx = &(vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ext_ctx);
292+
ectx = &(vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx);
293293
ectx->cs.selector = (uint16_t)((entry >> 4U) & 0xFFFFU);
294294
ectx->cs.base = ectx->cs.selector << 4U;
295295

@@ -353,13 +353,13 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct acrn_vcpu **rtn_vcpu_han
353353
vcpu->pcpu_id, vcpu->vm->vm_id, vcpu->vcpu_id,
354354
is_vcpu_bsp(vcpu) ? "PRIMARY" : "SECONDARY");
355355

356-
vcpu->arch_vcpu.vpid = allocate_vpid();
356+
vcpu->arch.vpid = allocate_vpid();
357357

358358
/* Initialize exception field in VCPU context */
359-
vcpu->arch_vcpu.exception_info.exception = VECTOR_INVALID;
359+
vcpu->arch.exception_info.exception = VECTOR_INVALID;
360360

361361
/* Initialize cur context */
362-
vcpu->arch_vcpu.cur_context = NORMAL_WORLD;
362+
vcpu->arch.cur_context = NORMAL_WORLD;
363363

364364
/* Create per vcpu vlapic */
365365
vlapic_create(vcpu);
@@ -374,7 +374,7 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct acrn_vcpu **rtn_vcpu_han
374374
vcpu->launched = false;
375375
vcpu->paused_cnt = 0U;
376376
vcpu->running = 0;
377-
vcpu->arch_vcpu.nr_sipi = 0;
377+
vcpu->arch.nr_sipi = 0;
378378
vcpu->pending_pre_work = 0U;
379379
vcpu->state = VCPU_INIT;
380380

@@ -392,7 +392,7 @@ int run_vcpu(struct acrn_vcpu *vcpu)
392392
uint32_t instlen, cs_attr;
393393
uint64_t rip, ia32_efer, cr0;
394394
struct run_context *ctx =
395-
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
395+
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
396396
int64_t status = 0;
397397

398398
if (bitmap_test_and_clear_lock(CPU_REG_RIP, &vcpu->reg_updated))
@@ -409,8 +409,8 @@ int run_vcpu(struct acrn_vcpu *vcpu)
409409
pr_info("VM %d Starting VCPU %hu",
410410
vcpu->vm->vm_id, vcpu->vcpu_id);
411411

412-
if (vcpu->arch_vcpu.vpid)
413-
exec_vmwrite16(VMX_VPID, vcpu->arch_vcpu.vpid);
412+
if (vcpu->arch.vpid)
413+
exec_vmwrite16(VMX_VPID, vcpu->arch.vpid);
414414

415415
/*
416416
* A power-up or a reset invalidates all linear mappings,
@@ -447,7 +447,7 @@ int run_vcpu(struct acrn_vcpu *vcpu)
447447
/* This VCPU was already launched, check if the last guest
448448
* instruction needs to be repeated and resume VCPU accordingly
449449
*/
450-
instlen = vcpu->arch_vcpu.inst_len;
450+
instlen = vcpu->arch.inst_len;
451451
rip = vcpu_get_rip(vcpu);
452452
exec_vmwrite(VMX_GUEST_RIP, ((rip+(uint64_t)instlen) &
453453
0xFFFFFFFFFFFFFFFFUL));
@@ -467,17 +467,17 @@ int run_vcpu(struct acrn_vcpu *vcpu)
467467
set_vcpu_mode(vcpu, cs_attr, ia32_efer, cr0);
468468

469469
/* Obtain current VCPU instruction length */
470-
vcpu->arch_vcpu.inst_len = exec_vmread32(VMX_EXIT_INSTR_LEN);
470+
vcpu->arch.inst_len = exec_vmread32(VMX_EXIT_INSTR_LEN);
471471

472472
ctx->guest_cpu_regs.regs.rsp = exec_vmread(VMX_GUEST_RSP);
473473

474474
/* Obtain VM exit reason */
475-
vcpu->arch_vcpu.exit_reason = exec_vmread32(VMX_EXIT_REASON);
475+
vcpu->arch.exit_reason = exec_vmread32(VMX_EXIT_REASON);
476476

477477
if (status != 0) {
478478
/* refer to 64-ia32 spec section 24.9.1 volume#3 */
479-
if (vcpu->arch_vcpu.exit_reason & VMX_VMENTRY_FAIL)
480-
pr_fatal("vmentry fail reason=%lx", vcpu->arch_vcpu.exit_reason);
479+
if (vcpu->arch.exit_reason & VMX_VMENTRY_FAIL)
480+
pr_fatal("vmentry fail reason=%lx", vcpu->arch.exit_reason);
481481
else
482482
pr_fatal("vmexit fail err_inst=%x", exec_vmread32(VMX_INSTR_ERROR));
483483

@@ -525,20 +525,20 @@ void reset_vcpu(struct acrn_vcpu *vcpu)
525525
vcpu->launched = false;
526526
vcpu->paused_cnt = 0U;
527527
vcpu->running = 0;
528-
vcpu->arch_vcpu.nr_sipi = 0;
528+
vcpu->arch.nr_sipi = 0;
529529
vcpu->pending_pre_work = 0U;
530530

531-
vcpu->arch_vcpu.exception_info.exception = VECTOR_INVALID;
532-
vcpu->arch_vcpu.cur_context = NORMAL_WORLD;
533-
vcpu->arch_vcpu.irq_window_enabled = 0;
534-
vcpu->arch_vcpu.inject_event_pending = false;
535-
(void)memset(vcpu->arch_vcpu.vmcs, 0U, CPU_PAGE_SIZE);
531+
vcpu->arch.exception_info.exception = VECTOR_INVALID;
532+
vcpu->arch.cur_context = NORMAL_WORLD;
533+
vcpu->arch.irq_window_enabled = 0;
534+
vcpu->arch.inject_event_pending = false;
535+
(void)memset(vcpu->arch.vmcs, 0U, CPU_PAGE_SIZE);
536536

537537
for (i = 0; i < NR_WORLD; i++) {
538-
(void)memset(&vcpu->arch_vcpu.contexts[i], 0U,
538+
(void)memset(&vcpu->arch.contexts[i], 0U,
539539
sizeof(struct run_context));
540540
}
541-
vcpu->arch_vcpu.cur_context = NORMAL_WORLD;
541+
vcpu->arch.cur_context = NORMAL_WORLD;
542542

543543
vlapic = vcpu_vlapic(vcpu);
544544
vlapic_reset(vlapic);

hypervisor/arch/x86/guest/vlapic.c

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -499,7 +499,7 @@ vlapic_set_intr_ready(struct acrn_vlapic *vlapic, uint32_t vector, bool level)
499499
* it to vCPU in next vmentry.
500500
*/
501501
bitmap_set_lock(ACRN_REQUEST_EVENT,
502-
&vlapic->vcpu->arch_vcpu.pending_req);
502+
&vlapic->vcpu->arch.pending_req);
503503
vlapic_post_intr(vlapic->vcpu->pcpu_id);
504504
return 0;
505505
}
@@ -564,7 +564,7 @@ uint64_t apicv_get_pir_desc_paddr(struct acrn_vcpu *vcpu)
564564
{
565565
struct acrn_vlapic *vlapic;
566566

567-
vlapic = &vcpu->arch_vcpu.vlapic;
567+
vlapic = &vcpu->arch.vlapic;
568568
return hva2hpa(&(vlapic->pir_desc));
569569
}
570570

@@ -1189,11 +1189,11 @@ vlapic_process_init_sipi(struct acrn_vcpu* target_vcpu, uint32_t mode,
11891189
* the second SIPI will be ignored as it move out of
11901190
* wait-for-SIPI state.
11911191
*/
1192-
target_vcpu->arch_vcpu.nr_sipi = 1U;
1192+
target_vcpu->arch.nr_sipi = 1U;
11931193
} else if (mode == APIC_DELMODE_STARTUP) {
11941194
/* Ignore SIPIs in any state other than wait-for-SIPI */
11951195
if ((target_vcpu->state != VCPU_INIT) ||
1196-
(target_vcpu->arch_vcpu.nr_sipi == 0U)) {
1196+
(target_vcpu->arch.nr_sipi == 0U)) {
11971197
return;
11981198
}
11991199

@@ -1202,8 +1202,8 @@ vlapic_process_init_sipi(struct acrn_vcpu* target_vcpu, uint32_t mode,
12021202
target_vcpu->vcpu_id, vcpu_id,
12031203
(icr_low & APIC_VECTOR_MASK));
12041204

1205-
target_vcpu->arch_vcpu.nr_sipi--;
1206-
if (target_vcpu->arch_vcpu.nr_sipi > 0U) {
1205+
target_vcpu->arch.nr_sipi--;
1206+
if (target_vcpu->arch.nr_sipi > 0U) {
12071207
return;
12081208
}
12091209

@@ -2238,8 +2238,8 @@ vlapic_wrmsr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t wval)
22382238

22392239
int vlapic_create(struct acrn_vcpu *vcpu)
22402240
{
2241-
vcpu->arch_vcpu.vlapic.vm = vcpu->vm;
2242-
vcpu->arch_vcpu.vlapic.vcpu = vcpu;
2241+
vcpu->arch.vlapic.vm = vcpu->vm;
2242+
vcpu->arch.vlapic.vcpu = vcpu;
22432243

22442244
if (is_vcpu_bsp(vcpu)) {
22452245
uint64_t *pml4_page =
@@ -2450,7 +2450,7 @@ int apic_access_vmexit_handler(struct acrn_vcpu *vcpu)
24502450
struct acrn_vlapic *vlapic;
24512451
struct mmio_request *mmio = &vcpu->req.reqs.mmio;
24522452

2453-
qual = vcpu->arch_vcpu.exit_qualification;
2453+
qual = vcpu->arch.exit_qualification;
24542454
access_type = apic_access_type(qual);
24552455

24562456
/*parse offset if linear access*/
@@ -2500,7 +2500,7 @@ int veoi_vmexit_handler(struct acrn_vcpu *vcpu)
25002500

25012501
vlapic = vcpu_vlapic(vcpu);
25022502
lapic = &(vlapic->apic_page);
2503-
vector = (uint32_t)(vcpu->arch_vcpu.exit_qualification & 0xFFUL);
2503+
vector = (uint32_t)(vcpu->arch.exit_qualification & 0xFFUL);
25042504

25052505
tmrptr = &lapic->tmr[0];
25062506
idx = vector >> 5U;
@@ -2535,7 +2535,7 @@ int apic_write_vmexit_handler(struct acrn_vcpu *vcpu)
25352535
uint32_t offset;
25362536
struct acrn_vlapic *vlapic = NULL;
25372537

2538-
qual = vcpu->arch_vcpu.exit_qualification;
2538+
qual = vcpu->arch.exit_qualification;
25392539
offset = (uint32_t)(qual & 0xFFFUL);
25402540

25412541
handled = 1;

0 commit comments

Comments
 (0)