@@ -13,23 +13,23 @@ vm_sw_loader_t vm_sw_loader;
13
13
inline uint64_t vcpu_get_gpreg (const struct acrn_vcpu * vcpu , uint32_t reg )
14
14
{
15
15
const struct run_context * ctx =
16
- & vcpu -> arch_vcpu .contexts [vcpu -> arch_vcpu .cur_context ].run_ctx ;
16
+ & vcpu -> arch .contexts [vcpu -> arch .cur_context ].run_ctx ;
17
17
18
18
return ctx -> guest_cpu_regs .longs [reg ];
19
19
}
20
20
21
21
inline void vcpu_set_gpreg (struct acrn_vcpu * vcpu , uint32_t reg , uint64_t val )
22
22
{
23
23
struct run_context * ctx =
24
- & vcpu -> arch_vcpu .contexts [vcpu -> arch_vcpu .cur_context ].run_ctx ;
24
+ & vcpu -> arch .contexts [vcpu -> arch .cur_context ].run_ctx ;
25
25
26
26
ctx -> guest_cpu_regs .longs [reg ] = val ;
27
27
}
28
28
29
29
inline uint64_t vcpu_get_rip (struct acrn_vcpu * vcpu )
30
30
{
31
31
struct run_context * ctx =
32
- & vcpu -> arch_vcpu .contexts [vcpu -> arch_vcpu .cur_context ].run_ctx ;
32
+ & vcpu -> arch .contexts [vcpu -> arch .cur_context ].run_ctx ;
33
33
34
34
if (bitmap_test (CPU_REG_RIP , & vcpu -> reg_updated ) == 0 &&
35
35
bitmap_test_and_set_lock (CPU_REG_RIP , & vcpu -> reg_cached ) == 0 )
@@ -39,22 +39,22 @@ inline uint64_t vcpu_get_rip(struct acrn_vcpu *vcpu)
39
39
40
40
inline void vcpu_set_rip (struct acrn_vcpu * vcpu , uint64_t val )
41
41
{
42
- vcpu -> arch_vcpu .contexts [vcpu -> arch_vcpu .cur_context ].run_ctx .rip = val ;
42
+ vcpu -> arch .contexts [vcpu -> arch .cur_context ].run_ctx .rip = val ;
43
43
bitmap_set_lock (CPU_REG_RIP , & vcpu -> reg_updated );
44
44
}
45
45
46
46
inline uint64_t vcpu_get_rsp (struct acrn_vcpu * vcpu )
47
47
{
48
48
struct run_context * ctx =
49
- & vcpu -> arch_vcpu .contexts [vcpu -> arch_vcpu .cur_context ].run_ctx ;
49
+ & vcpu -> arch .contexts [vcpu -> arch .cur_context ].run_ctx ;
50
50
51
51
return ctx -> guest_cpu_regs .regs .rsp ;
52
52
}
53
53
54
54
inline void vcpu_set_rsp (struct acrn_vcpu * vcpu , uint64_t val )
55
55
{
56
56
struct run_context * ctx =
57
- & vcpu -> arch_vcpu .contexts [vcpu -> arch_vcpu .cur_context ].run_ctx ;
57
+ & vcpu -> arch .contexts [vcpu -> arch .cur_context ].run_ctx ;
58
58
59
59
ctx -> guest_cpu_regs .regs .rsp = val ;
60
60
bitmap_set_lock (CPU_REG_RSP , & vcpu -> reg_updated );
@@ -63,7 +63,7 @@ inline void vcpu_set_rsp(struct acrn_vcpu *vcpu, uint64_t val)
63
63
inline uint64_t vcpu_get_efer (struct acrn_vcpu * vcpu )
64
64
{
65
65
struct run_context * ctx =
66
- & vcpu -> arch_vcpu .contexts [vcpu -> arch_vcpu .cur_context ].run_ctx ;
66
+ & vcpu -> arch .contexts [vcpu -> arch .cur_context ].run_ctx ;
67
67
68
68
if (bitmap_test (CPU_REG_EFER , & vcpu -> reg_updated ) == 0 &&
69
69
bitmap_test_and_set_lock (CPU_REG_EFER , & vcpu -> reg_cached ) == 0 )
@@ -73,15 +73,15 @@ inline uint64_t vcpu_get_efer(struct acrn_vcpu *vcpu)
73
73
74
74
inline void vcpu_set_efer (struct acrn_vcpu * vcpu , uint64_t val )
75
75
{
76
- vcpu -> arch_vcpu .contexts [vcpu -> arch_vcpu .cur_context ].run_ctx .ia32_efer
76
+ vcpu -> arch .contexts [vcpu -> arch .cur_context ].run_ctx .ia32_efer
77
77
= val ;
78
78
bitmap_set_lock (CPU_REG_EFER , & vcpu -> reg_updated );
79
79
}
80
80
81
81
inline uint64_t vcpu_get_rflags (struct acrn_vcpu * vcpu )
82
82
{
83
83
struct run_context * ctx =
84
- & vcpu -> arch_vcpu .contexts [vcpu -> arch_vcpu .cur_context ].run_ctx ;
84
+ & vcpu -> arch .contexts [vcpu -> arch .cur_context ].run_ctx ;
85
85
86
86
if (bitmap_test (CPU_REG_RFLAGS , & vcpu -> reg_updated ) == 0 &&
87
87
bitmap_test_and_set_lock (CPU_REG_RFLAGS ,
@@ -92,7 +92,7 @@ inline uint64_t vcpu_get_rflags(struct acrn_vcpu *vcpu)
92
92
93
93
inline void vcpu_set_rflags (struct acrn_vcpu * vcpu , uint64_t val )
94
94
{
95
- vcpu -> arch_vcpu .contexts [vcpu -> arch_vcpu .cur_context ].run_ctx .rflags =
95
+ vcpu -> arch .contexts [vcpu -> arch .cur_context ].run_ctx .rflags =
96
96
val ;
97
97
bitmap_set_lock (CPU_REG_RFLAGS , & vcpu -> reg_updated );
98
98
}
@@ -101,7 +101,7 @@ inline uint64_t vcpu_get_cr0(struct acrn_vcpu *vcpu)
101
101
{
102
102
uint64_t mask ;
103
103
struct run_context * ctx =
104
- & vcpu -> arch_vcpu .contexts [vcpu -> arch_vcpu .cur_context ].run_ctx ;
104
+ & vcpu -> arch .contexts [vcpu -> arch .cur_context ].run_ctx ;
105
105
106
106
if (bitmap_test_and_set_lock (CPU_REG_CR0 , & vcpu -> reg_cached ) == 0 ) {
107
107
mask = exec_vmread (VMX_CR0_MASK );
@@ -119,19 +119,19 @@ inline void vcpu_set_cr0(struct acrn_vcpu *vcpu, uint64_t val)
119
119
inline uint64_t vcpu_get_cr2 (struct acrn_vcpu * vcpu )
120
120
{
121
121
return vcpu ->
122
- arch_vcpu .contexts [vcpu -> arch_vcpu .cur_context ].run_ctx .cr2 ;
122
+ arch .contexts [vcpu -> arch .cur_context ].run_ctx .cr2 ;
123
123
}
124
124
125
125
inline void vcpu_set_cr2 (struct acrn_vcpu * vcpu , uint64_t val )
126
126
{
127
- vcpu -> arch_vcpu .contexts [vcpu -> arch_vcpu .cur_context ].run_ctx .cr2 = val ;
127
+ vcpu -> arch .contexts [vcpu -> arch .cur_context ].run_ctx .cr2 = val ;
128
128
}
129
129
130
130
inline uint64_t vcpu_get_cr4 (struct acrn_vcpu * vcpu )
131
131
{
132
132
uint64_t mask ;
133
133
struct run_context * ctx =
134
- & vcpu -> arch_vcpu .contexts [vcpu -> arch_vcpu .cur_context ].run_ctx ;
134
+ & vcpu -> arch .contexts [vcpu -> arch .cur_context ].run_ctx ;
135
135
136
136
if (bitmap_test_and_set_lock (CPU_REG_CR4 , & vcpu -> reg_cached ) == 0 ) {
137
137
mask = exec_vmread (VMX_CR4_MASK );
@@ -148,13 +148,13 @@ inline void vcpu_set_cr4(struct acrn_vcpu *vcpu, uint64_t val)
148
148
149
149
inline uint64_t vcpu_get_pat_ext (const struct acrn_vcpu * vcpu )
150
150
{
151
- return vcpu -> arch_vcpu .contexts [vcpu -> arch_vcpu .cur_context ].
151
+ return vcpu -> arch .contexts [vcpu -> arch .cur_context ].
152
152
ext_ctx .ia32_pat ;
153
153
}
154
154
155
155
inline void vcpu_set_pat_ext (struct acrn_vcpu * vcpu , uint64_t val )
156
156
{
157
- vcpu -> arch_vcpu .contexts [vcpu -> arch_vcpu .cur_context ].ext_ctx .ia32_pat
157
+ vcpu -> arch .contexts [vcpu -> arch .cur_context ].ext_ctx .ia32_pat
158
158
= val ;
159
159
}
160
160
@@ -168,13 +168,13 @@ static void set_vcpu_mode(struct acrn_vcpu *vcpu, uint32_t cs_attr, uint64_t ia3
168
168
{
169
169
if (ia32_efer & MSR_IA32_EFER_LMA_BIT ) {
170
170
if (cs_attr & 0x2000 ) /* CS.L = 1 */
171
- vcpu -> arch_vcpu .cpu_mode = CPU_MODE_64BIT ;
171
+ vcpu -> arch .cpu_mode = CPU_MODE_64BIT ;
172
172
else
173
- vcpu -> arch_vcpu .cpu_mode = CPU_MODE_COMPATIBILITY ;
173
+ vcpu -> arch .cpu_mode = CPU_MODE_COMPATIBILITY ;
174
174
} else if (cr0 & CR0_PE ) {
175
- vcpu -> arch_vcpu .cpu_mode = CPU_MODE_PROTECTED ;
175
+ vcpu -> arch .cpu_mode = CPU_MODE_PROTECTED ;
176
176
} else {
177
- vcpu -> arch_vcpu .cpu_mode = CPU_MODE_REAL ;
177
+ vcpu -> arch .cpu_mode = CPU_MODE_REAL ;
178
178
}
179
179
}
180
180
@@ -186,8 +186,8 @@ void set_vcpu_regs(struct acrn_vcpu *vcpu, struct acrn_vcpu_regs *vcpu_regs)
186
186
struct segment_sel * seg ;
187
187
uint32_t limit , attr ;
188
188
189
- ectx = & (vcpu -> arch_vcpu .contexts [vcpu -> arch_vcpu .cur_context ].ext_ctx );
190
- ctx = & (vcpu -> arch_vcpu .contexts [vcpu -> arch_vcpu .cur_context ].run_ctx );
189
+ ectx = & (vcpu -> arch .contexts [vcpu -> arch .cur_context ].ext_ctx );
190
+ ctx = & (vcpu -> arch .contexts [vcpu -> arch .cur_context ].run_ctx );
191
191
192
192
/* NOTE:
193
193
* This is to set the attr and limit to default value.
@@ -289,7 +289,7 @@ void set_ap_entry(struct acrn_vcpu *vcpu, uint64_t entry)
289
289
{
290
290
struct ext_context * ectx ;
291
291
292
- ectx = & (vcpu -> arch_vcpu .contexts [vcpu -> arch_vcpu .cur_context ].ext_ctx );
292
+ ectx = & (vcpu -> arch .contexts [vcpu -> arch .cur_context ].ext_ctx );
293
293
ectx -> cs .selector = (uint16_t )((entry >> 4U ) & 0xFFFFU );
294
294
ectx -> cs .base = ectx -> cs .selector << 4U ;
295
295
@@ -353,13 +353,13 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct acrn_vcpu **rtn_vcpu_han
353
353
vcpu -> pcpu_id , vcpu -> vm -> vm_id , vcpu -> vcpu_id ,
354
354
is_vcpu_bsp (vcpu ) ? "PRIMARY" : "SECONDARY" );
355
355
356
- vcpu -> arch_vcpu .vpid = allocate_vpid ();
356
+ vcpu -> arch .vpid = allocate_vpid ();
357
357
358
358
/* Initialize exception field in VCPU context */
359
- vcpu -> arch_vcpu .exception_info .exception = VECTOR_INVALID ;
359
+ vcpu -> arch .exception_info .exception = VECTOR_INVALID ;
360
360
361
361
/* Initialize cur context */
362
- vcpu -> arch_vcpu .cur_context = NORMAL_WORLD ;
362
+ vcpu -> arch .cur_context = NORMAL_WORLD ;
363
363
364
364
/* Create per vcpu vlapic */
365
365
vlapic_create (vcpu );
@@ -374,7 +374,7 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct acrn_vcpu **rtn_vcpu_han
374
374
vcpu -> launched = false;
375
375
vcpu -> paused_cnt = 0U ;
376
376
vcpu -> running = 0 ;
377
- vcpu -> arch_vcpu .nr_sipi = 0 ;
377
+ vcpu -> arch .nr_sipi = 0 ;
378
378
vcpu -> pending_pre_work = 0U ;
379
379
vcpu -> state = VCPU_INIT ;
380
380
@@ -392,7 +392,7 @@ int run_vcpu(struct acrn_vcpu *vcpu)
392
392
uint32_t instlen , cs_attr ;
393
393
uint64_t rip , ia32_efer , cr0 ;
394
394
struct run_context * ctx =
395
- & vcpu -> arch_vcpu .contexts [vcpu -> arch_vcpu .cur_context ].run_ctx ;
395
+ & vcpu -> arch .contexts [vcpu -> arch .cur_context ].run_ctx ;
396
396
int64_t status = 0 ;
397
397
398
398
if (bitmap_test_and_clear_lock (CPU_REG_RIP , & vcpu -> reg_updated ))
@@ -409,8 +409,8 @@ int run_vcpu(struct acrn_vcpu *vcpu)
409
409
pr_info ("VM %d Starting VCPU %hu" ,
410
410
vcpu -> vm -> vm_id , vcpu -> vcpu_id );
411
411
412
- if (vcpu -> arch_vcpu .vpid )
413
- exec_vmwrite16 (VMX_VPID , vcpu -> arch_vcpu .vpid );
412
+ if (vcpu -> arch .vpid )
413
+ exec_vmwrite16 (VMX_VPID , vcpu -> arch .vpid );
414
414
415
415
/*
416
416
* A power-up or a reset invalidates all linear mappings,
@@ -447,7 +447,7 @@ int run_vcpu(struct acrn_vcpu *vcpu)
447
447
/* This VCPU was already launched, check if the last guest
448
448
* instruction needs to be repeated and resume VCPU accordingly
449
449
*/
450
- instlen = vcpu -> arch_vcpu .inst_len ;
450
+ instlen = vcpu -> arch .inst_len ;
451
451
rip = vcpu_get_rip (vcpu );
452
452
exec_vmwrite (VMX_GUEST_RIP , ((rip + (uint64_t )instlen ) &
453
453
0xFFFFFFFFFFFFFFFFUL ));
@@ -467,17 +467,17 @@ int run_vcpu(struct acrn_vcpu *vcpu)
467
467
set_vcpu_mode (vcpu , cs_attr , ia32_efer , cr0 );
468
468
469
469
/* Obtain current VCPU instruction length */
470
- vcpu -> arch_vcpu .inst_len = exec_vmread32 (VMX_EXIT_INSTR_LEN );
470
+ vcpu -> arch .inst_len = exec_vmread32 (VMX_EXIT_INSTR_LEN );
471
471
472
472
ctx -> guest_cpu_regs .regs .rsp = exec_vmread (VMX_GUEST_RSP );
473
473
474
474
/* Obtain VM exit reason */
475
- vcpu -> arch_vcpu .exit_reason = exec_vmread32 (VMX_EXIT_REASON );
475
+ vcpu -> arch .exit_reason = exec_vmread32 (VMX_EXIT_REASON );
476
476
477
477
if (status != 0 ) {
478
478
/* refer to 64-ia32 spec section 24.9.1 volume#3 */
479
- if (vcpu -> arch_vcpu .exit_reason & VMX_VMENTRY_FAIL )
480
- pr_fatal ("vmentry fail reason=%lx" , vcpu -> arch_vcpu .exit_reason );
479
+ if (vcpu -> arch .exit_reason & VMX_VMENTRY_FAIL )
480
+ pr_fatal ("vmentry fail reason=%lx" , vcpu -> arch .exit_reason );
481
481
else
482
482
pr_fatal ("vmexit fail err_inst=%x" , exec_vmread32 (VMX_INSTR_ERROR ));
483
483
@@ -525,20 +525,20 @@ void reset_vcpu(struct acrn_vcpu *vcpu)
525
525
vcpu -> launched = false;
526
526
vcpu -> paused_cnt = 0U ;
527
527
vcpu -> running = 0 ;
528
- vcpu -> arch_vcpu .nr_sipi = 0 ;
528
+ vcpu -> arch .nr_sipi = 0 ;
529
529
vcpu -> pending_pre_work = 0U ;
530
530
531
- vcpu -> arch_vcpu .exception_info .exception = VECTOR_INVALID ;
532
- vcpu -> arch_vcpu .cur_context = NORMAL_WORLD ;
533
- vcpu -> arch_vcpu .irq_window_enabled = 0 ;
534
- vcpu -> arch_vcpu .inject_event_pending = false;
535
- (void )memset (vcpu -> arch_vcpu .vmcs , 0U , CPU_PAGE_SIZE );
531
+ vcpu -> arch .exception_info .exception = VECTOR_INVALID ;
532
+ vcpu -> arch .cur_context = NORMAL_WORLD ;
533
+ vcpu -> arch .irq_window_enabled = 0 ;
534
+ vcpu -> arch .inject_event_pending = false;
535
+ (void )memset (vcpu -> arch .vmcs , 0U , CPU_PAGE_SIZE );
536
536
537
537
for (i = 0 ; i < NR_WORLD ; i ++ ) {
538
- (void )memset (& vcpu -> arch_vcpu .contexts [i ], 0U ,
538
+ (void )memset (& vcpu -> arch .contexts [i ], 0U ,
539
539
sizeof (struct run_context ));
540
540
}
541
- vcpu -> arch_vcpu .cur_context = NORMAL_WORLD ;
541
+ vcpu -> arch .cur_context = NORMAL_WORLD ;
542
542
543
543
vlapic = vcpu_vlapic (vcpu );
544
544
vlapic_reset (vlapic );
0 commit comments