@@ -86,18 +86,11 @@ do { \
86
86
#endif
87
87
88
88
/* TIMER_LVT bit[18:17] == 0x10 TSD DEADLINE mode */
89
- #define VLAPIC_TSCDEADLINE (lvt ) (((lvt) & 0x60000 ) == 0x40000 )
89
+ #define VLAPIC_TSCDEADLINE (lvt ) (((lvt) & APIC_LVTT_TM ) == APIC_LVTT_TM_TSCDLT )
90
90
91
91
/*APIC-v APIC-access address */
92
92
static void * apicv_apic_access_addr ;
93
93
94
- static int
95
- vlapic_write (struct vlapic * vlapic , int mmio_access , uint64_t offset ,
96
- uint64_t data );
97
- static int
98
- vlapic_read (struct vlapic * vlapic , int mmio_access , uint64_t offset ,
99
- uint64_t * data );
100
-
101
94
static int
102
95
apicv_set_intr_ready (struct vlapic * vlapic , int vector , bool level );
103
96
@@ -110,22 +103,12 @@ apicv_set_tmr(struct vlapic *vlapic, int vector, bool level);
110
103
static void
111
104
apicv_batch_set_tmr (struct vlapic * vlapic );
112
105
113
- /*
114
- * Returns 1 if the vcpu needs to be notified of the interrupt and 0 otherwise.
115
- */
116
- static int
117
- vlapic_set_intr_ready (struct vlapic * vlapic , int vector , bool level );
118
-
119
106
/*
120
107
* Post an interrupt to the vcpu running on 'hostcpu'. This will use a
121
108
* hardware assist if available (e.g. Posted Interrupt) or fall back to
122
109
* sending an 'ipinum' to interrupt the 'hostcpu'.
123
110
*/
124
111
static void vlapic_set_error (struct vlapic * vlapic , uint32_t mask );
125
- static int vlapic_trigger_lvt (struct vlapic * vlapic , int vector );
126
-
127
- static uint64_t vlapic_get_apicbase (struct vlapic * vlapic );
128
- static int vlapic_set_apicbase (struct vlapic * vlapic , uint64_t val );
129
112
130
113
static struct vlapic *
131
114
vm_lapic_from_vcpu_id (struct vm * vm , int vcpu_id )
@@ -159,7 +142,7 @@ static int vm_apicid2vcpu_id(struct vm *vm, uint8_t lapicid)
159
142
return vcpu -> vcpu_id ;
160
143
}
161
144
162
- pr_err ("vm_apicid2vcpu_id : bad lapicid %d" , lapicid );
145
+ pr_err ("%s : bad lapicid %d" , __func__ , lapicid );
163
146
164
147
return phy_cpu_num ;
165
148
}
@@ -197,7 +180,7 @@ vlapic_build_id(struct vlapic *vlapic)
197
180
uint32_t id ;
198
181
199
182
if (is_vm0 (vcpu -> vm )) {
200
- /* Get APIC ID sequence format from cpu_storage */
183
+ /* Get APIC ID sequence format from cpu_storage */
201
184
id = per_cpu (lapic_id , vcpu -> vcpu_id );
202
185
} else
203
186
id = vcpu -> vcpu_id ;
@@ -267,6 +250,9 @@ vlapic_esr_write_handler(struct vlapic *vlapic)
267
250
vlapic -> esr_pending = 0 ;
268
251
}
269
252
253
+ /*
254
+ * Returns 1 if the vcpu needs to be notified of the interrupt and 0 otherwise.
255
+ */
270
256
static int
271
257
vlapic_set_intr_ready (struct vlapic * vlapic , int vector , bool level )
272
258
{
@@ -289,8 +275,7 @@ vlapic_set_intr_ready(struct vlapic *vlapic, int vector, bool level)
289
275
if (vector < 16 ) {
290
276
vlapic_set_error (vlapic , APIC_ESR_RECEIVE_ILLEGAL_VECTOR );
291
277
dev_dbg (ACRN_DBG_LAPIC ,
292
- "vlapic ignoring interrupt to vector %d" ,
293
- vector );
278
+ "vlapic ignoring interrupt to vector %d" , vector );
294
279
return 1 ;
295
280
}
296
281
@@ -368,8 +353,8 @@ lvt_off_to_idx(uint32_t offset)
368
353
break ;
369
354
}
370
355
ASSERT (index >= 0 && index <= VLAPIC_MAXLVT_INDEX ,
371
- "lvt_off_to_idx : invalid lvt index %d for offset %#x" ,
372
- index , offset );
356
+ "%s : invalid lvt index %d for offset %#x" ,
357
+ __func__ , index , offset );
373
358
374
359
return index ;
375
360
}
@@ -426,7 +411,7 @@ vlapic_lvt_write_handler(struct vlapic *vlapic, uint32_t offset)
426
411
if (vlapic -> vm -> vpic_wire_mode == VPIC_WIRE_INTR ||
427
412
vlapic -> vm -> vpic_wire_mode == VPIC_WIRE_NULL ) {
428
413
atomic_set_int (& vlapic -> vm -> vpic_wire_mode ,
429
- VPIC_WIRE_LAPIC );
414
+ VPIC_WIRE_LAPIC );
430
415
dev_dbg (ACRN_DBG_LAPIC ,
431
416
"vpic wire mode -> LAPIC" );
432
417
} else {
@@ -587,7 +572,7 @@ vlapic_update_ppr(struct vlapic *vlapic)
587
572
ppr = isrvec & 0xf0 ;
588
573
589
574
vlapic -> apic_page -> ppr = ppr ;
590
- dev_dbg (ACRN_DBG_LAPIC , "vlapic_update_ppr 0x%02x" , ppr );
575
+ dev_dbg (ACRN_DBG_LAPIC , "%s 0x%02x" , __func__ , ppr );
591
576
}
592
577
593
578
static void
@@ -605,7 +590,7 @@ vlapic_process_eoi(struct vlapic *vlapic)
605
590
if (bitpos >= 0 ) {
606
591
if (vlapic -> isrvec_stk_top <= 0 ) {
607
592
panic ("invalid vlapic isrvec_stk_top %d" ,
608
- vlapic -> isrvec_stk_top );
593
+ vlapic -> isrvec_stk_top );
609
594
}
610
595
isrptr [i ].val &= ~(1 << bitpos );
611
596
vector = i * 32 + bitpos ;
@@ -666,7 +651,7 @@ vlapic_trigger_lvt(struct vlapic *vlapic, int vector)
666
651
* When the local APIC is global/hardware disabled,
667
652
* LINT[1:0] pins are configured as INTR and NMI pins,
668
653
* respectively.
669
- */
654
+ */
670
655
switch (vector ) {
671
656
case APIC_LVT_LINT0 :
672
657
vcpu_inject_extint (vlapic -> vcpu );
@@ -774,11 +759,11 @@ vlapic_calcdest(struct vm *vm, uint64_t *dmask, uint32_t dest,
774
759
ldr = vlapic -> apic_page -> ldr ;
775
760
776
761
if ((dfr & APIC_DFR_MODEL_MASK ) ==
777
- APIC_DFR_MODEL_FLAT ) {
762
+ APIC_DFR_MODEL_FLAT ) {
778
763
ldest = ldr >> 24 ;
779
764
mda_ldest = mda_flat_ldest ;
780
765
} else if ((dfr & APIC_DFR_MODEL_MASK ) ==
781
- APIC_DFR_MODEL_CLUSTER ) {
766
+ APIC_DFR_MODEL_CLUSTER ) {
782
767
783
768
cluster = ldr >> 28 ;
784
769
ldest = (ldr >> 24 ) & 0xf ;
@@ -1006,8 +991,8 @@ vlapic_pending_intr(struct vlapic *vlapic, int *vecptr)
1006
991
{
1007
992
struct lapic * lapic = vlapic -> apic_page ;
1008
993
int i , bitpos ;
1009
- uint32_t vector ;
1010
- uint32_t val ;
994
+ uint32_t vector ;
995
+ uint32_t val ;
1011
996
struct lapic_reg * irrptr ;
1012
997
1013
998
if (vlapic -> ops .apicv_pending_intr )
@@ -1231,10 +1216,10 @@ vlapic_write(struct vlapic *vlapic, int mmio_access, uint64_t offset,
1231
1216
int retval ;
1232
1217
1233
1218
ASSERT ((offset & 0xf ) == 0 && offset < CPU_PAGE_SIZE ,
1234
- "vlapic_write : invalid offset %#lx" , offset );
1219
+ "%s : invalid offset %#lx", __func__ , offset );
1235
1220
1236
1221
dev_dbg (ACRN_DBG_LAPIC , "vlapic write offset %#lx, data %#lx" ,
1237
- offset , data );
1222
+ offset , data );
1238
1223
1239
1224
if (offset > sizeof (* lapic ))
1240
1225
return 0 ;
@@ -1351,12 +1336,12 @@ vlapic_reset(struct vlapic *vlapic)
1351
1336
void
1352
1337
vlapic_init (struct vlapic * vlapic )
1353
1338
{
1354
- ASSERT (vlapic -> vm != NULL , "vlapic_init : vm is not initialized" );
1339
+ ASSERT (vlapic -> vm != NULL , "%s : vm is not initialized" , __func__ );
1355
1340
ASSERT (vlapic -> vcpu -> vcpu_id >= 0 &&
1356
1341
vlapic -> vcpu -> vcpu_id < phy_cpu_num ,
1357
- "vlapic_init : vcpu_id is not initialized" );
1342
+ "%s : vcpu_id is not initialized", __func__ );
1358
1343
ASSERT (vlapic -> apic_page != NULL ,
1359
- "vlapic_init : apic_page is not initialized" );
1344
+ "%s : apic_page is not initialized" , __func__ );
1360
1345
1361
1346
/*
1362
1347
* If the vlapic is configured in x2apic mode then it will be
@@ -1426,10 +1411,10 @@ vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest, bool phys,
1426
1411
struct vcpu * target_vcpu ;
1427
1412
1428
1413
if (delmode != IOAPIC_RTE_DELFIXED &&
1429
- delmode != IOAPIC_RTE_DELLOPRI &&
1430
- delmode != IOAPIC_RTE_DELEXINT ) {
1414
+ delmode != IOAPIC_RTE_DELLOPRI &&
1415
+ delmode != IOAPIC_RTE_DELEXINT ) {
1431
1416
dev_dbg (ACRN_DBG_LAPIC ,
1432
- "vlapic intr invalid delmode %#x" , delmode );
1417
+ "vlapic intr invalid delmode %#x" , delmode );
1433
1418
return ;
1434
1419
}
1435
1420
lowprio = (delmode == IOAPIC_RTE_DELLOPRI );
@@ -1462,8 +1447,8 @@ vlapic_enabled(struct vlapic *vlapic)
1462
1447
{
1463
1448
struct lapic * lapic = vlapic -> apic_page ;
1464
1449
1465
- if ((vlapic -> msr_apicbase & APICBASE_ENABLED ) != 0 &&
1466
- (lapic -> svr & APIC_SVR_ENABLE ) != 0 )
1450
+ if ((vlapic -> msr_apicbase & APICBASE_ENABLED ) &&
1451
+ (lapic -> svr & APIC_SVR_ENABLE ))
1467
1452
return true;
1468
1453
else
1469
1454
return false;
@@ -1625,12 +1610,12 @@ vlapic_intr_msi(struct vm *vm, uint64_t addr, uint64_t msg)
1625
1610
*/
1626
1611
dest = (addr >> 12 ) & 0xff ;
1627
1612
phys = ((addr & (MSI_ADDR_RH | MSI_ADDR_LOG )) !=
1628
- (MSI_ADDR_RH | MSI_ADDR_LOG ));
1613
+ (MSI_ADDR_RH | MSI_ADDR_LOG ));
1629
1614
delmode = msg & APIC_DELMODE_MASK ;
1630
1615
vec = msg & 0xff ;
1631
1616
1632
1617
dev_dbg (ACRN_DBG_LAPIC , "lapic MSI %s dest %#x, vec %d" ,
1633
- phys ? "physical" : "logical" , dest , vec );
1618
+ phys ? "physical" : "logical" , dest , vec );
1634
1619
1635
1620
vlapic_deliver_intr (vm , LAPIC_TRIG_EDGE , dest , phys , delmode , vec );
1636
1621
return 0 ;
@@ -1674,7 +1659,7 @@ static int tsc_periodic_time(void *data)
1674
1659
1675
1660
/* inject vcpu timer interrupt if existing */
1676
1661
if (VLAPIC_TSCDEADLINE (lapic -> lvt_timer ))
1677
- vlapic_intr_edge (vcpu , lapic -> lvt_timer & 0xFF );
1662
+ vlapic_intr_edge (vcpu , lapic -> lvt_timer & APIC_LVTT_VECTOR );
1678
1663
1679
1664
return 0 ;
1680
1665
}
@@ -1786,7 +1771,7 @@ vlapic_mmio_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval,
1786
1771
1787
1772
/*
1788
1773
* Memory mapped local apic accesses should be aligned on a
1789
- * 16-byte boundary. They are also suggested to be 4 bytes
1774
+ * 16-byte boundary. They are also suggested to be 4 bytes
1790
1775
* wide, alas not all OSes follow suggestions.
1791
1776
*/
1792
1777
off &= ~3 ;
@@ -2102,7 +2087,7 @@ int apic_access_vmexit_handler(struct vcpu *vcpu)
2102
2087
uint64_t qual ;
2103
2088
struct vlapic * vlapic ;
2104
2089
2105
- qual = vcpu -> arch_vcpu .exit_qualification ;
2090
+ qual = vcpu -> arch_vcpu .exit_qualification ;
2106
2091
access_type = APIC_ACCESS_TYPE (qual );
2107
2092
2108
2093
/*parse offset if linear access*/
@@ -2135,7 +2120,7 @@ int veoi_vmexit_handler(struct vcpu *vcpu)
2135
2120
2136
2121
VCPU_RETAIN_RIP (vcpu );
2137
2122
2138
- vlapic = vcpu -> arch_vcpu .vlapic ;
2123
+ vlapic = vcpu -> arch_vcpu .vlapic ;
2139
2124
lapic = vlapic -> apic_page ;
2140
2125
vector = (vcpu -> arch_vcpu .exit_qualification ) & 0xFF ;
2141
2126
@@ -2164,7 +2149,7 @@ int apic_write_vmexit_handler(struct vcpu *vcpu)
2164
2149
2165
2150
handled = 1 ;
2166
2151
VCPU_RETAIN_RIP (vcpu );
2167
- vlapic = vcpu -> arch_vcpu .vlapic ;
2152
+ vlapic = vcpu -> arch_vcpu .vlapic ;
2168
2153
2169
2154
switch (offset ) {
2170
2155
case APIC_OFFSET_ID :
0 commit comments