35
35
#include <linux/trace_events.h>
36
36
#include <linux/slab.h>
37
37
38
+ #include <asm/apic.h>
38
39
#include <asm/perf_event.h>
39
40
#include <asm/tlbflush.h>
40
41
#include <asm/desc.h>
@@ -183,6 +184,7 @@ struct vcpu_svm {
183
184
u32 ldr_reg ;
184
185
struct page * avic_backing_page ;
185
186
u64 * avic_physical_id_cache ;
187
+ bool avic_is_running ;
186
188
};
187
189
188
190
#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
@@ -1316,6 +1318,72 @@ static int avic_vm_init(struct kvm *kvm)
1316
1318
return err ;
1317
1319
}
1318
1320
1321
+ /**
1322
+ * This function is called during VCPU halt/unhalt.
1323
+ */
1324
+ static void avic_set_running (struct kvm_vcpu * vcpu , bool is_run )
1325
+ {
1326
+ u64 entry ;
1327
+ int h_physical_id = __default_cpu_present_to_apicid (vcpu -> cpu );
1328
+ struct vcpu_svm * svm = to_svm (vcpu );
1329
+
1330
+ if (!kvm_vcpu_apicv_active (vcpu ))
1331
+ return ;
1332
+
1333
+ svm -> avic_is_running = is_run ;
1334
+
1335
+ /* ID = 0xff (broadcast), ID > 0xff (reserved) */
1336
+ if (WARN_ON (h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT ))
1337
+ return ;
1338
+
1339
+ entry = READ_ONCE (* (svm -> avic_physical_id_cache ));
1340
+ WARN_ON (is_run == !!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK ));
1341
+
1342
+ entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK ;
1343
+ if (is_run )
1344
+ entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK ;
1345
+ WRITE_ONCE (* (svm -> avic_physical_id_cache ), entry );
1346
+ }
1347
+
1348
+ static void avic_vcpu_load (struct kvm_vcpu * vcpu , int cpu )
1349
+ {
1350
+ u64 entry ;
1351
+ /* ID = 0xff (broadcast), ID > 0xff (reserved) */
1352
+ int h_physical_id = __default_cpu_present_to_apicid (cpu );
1353
+ struct vcpu_svm * svm = to_svm (vcpu );
1354
+
1355
+ if (!kvm_vcpu_apicv_active (vcpu ))
1356
+ return ;
1357
+
1358
+ if (WARN_ON (h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT ))
1359
+ return ;
1360
+
1361
+ entry = READ_ONCE (* (svm -> avic_physical_id_cache ));
1362
+ WARN_ON (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK );
1363
+
1364
+ entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK ;
1365
+ entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK );
1366
+
1367
+ entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK ;
1368
+ if (svm -> avic_is_running )
1369
+ entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK ;
1370
+
1371
+ WRITE_ONCE (* (svm -> avic_physical_id_cache ), entry );
1372
+ }
1373
+
1374
+ static void avic_vcpu_put (struct kvm_vcpu * vcpu )
1375
+ {
1376
+ u64 entry ;
1377
+ struct vcpu_svm * svm = to_svm (vcpu );
1378
+
1379
+ if (!kvm_vcpu_apicv_active (vcpu ))
1380
+ return ;
1381
+
1382
+ entry = READ_ONCE (* (svm -> avic_physical_id_cache ));
1383
+ entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK ;
1384
+ WRITE_ONCE (* (svm -> avic_physical_id_cache ), entry );
1385
+ }
1386
+
1319
1387
static void svm_vcpu_reset (struct kvm_vcpu * vcpu , bool init_event )
1320
1388
{
1321
1389
struct vcpu_svm * svm = to_svm (vcpu );
@@ -1379,6 +1447,11 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
1379
1447
goto free_page4 ;
1380
1448
}
1381
1449
1450
+ /* We initialize this flag to true to make sure that the is_running
1451
+ * bit would be set the first time the vcpu is loaded.
1452
+ */
1453
+ svm -> avic_is_running = true;
1454
+
1382
1455
svm -> nested .hsave = page_address (hsave_page );
1383
1456
1384
1457
svm -> msrpm = page_address (msrpm_pages );
@@ -1455,13 +1528,17 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1455
1528
/* This assumes that the kernel never uses MSR_TSC_AUX */
1456
1529
if (static_cpu_has (X86_FEATURE_RDTSCP ))
1457
1530
wrmsrl (MSR_TSC_AUX , svm -> tsc_aux );
1531
+
1532
+ avic_vcpu_load (vcpu , cpu );
1458
1533
}
1459
1534
1460
1535
static void svm_vcpu_put (struct kvm_vcpu * vcpu )
1461
1536
{
1462
1537
struct vcpu_svm * svm = to_svm (vcpu );
1463
1538
int i ;
1464
1539
1540
+ avic_vcpu_put (vcpu );
1541
+
1465
1542
++ vcpu -> stat .host_state_reload ;
1466
1543
kvm_load_ldt (svm -> host .ldt );
1467
1544
#ifdef CONFIG_X86_64
@@ -1477,6 +1554,16 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1477
1554
wrmsrl (host_save_user_msrs [i ], svm -> host_user_msrs [i ]);
1478
1555
}
1479
1556
1557
+ static void svm_vcpu_blocking (struct kvm_vcpu * vcpu )
1558
+ {
1559
+ avic_set_running (vcpu , false);
1560
+ }
1561
+
1562
+ static void svm_vcpu_unblocking (struct kvm_vcpu * vcpu )
1563
+ {
1564
+ avic_set_running (vcpu , true);
1565
+ }
1566
+
1480
1567
static unsigned long svm_get_rflags (struct kvm_vcpu * vcpu )
1481
1568
{
1482
1569
return to_svm (vcpu )-> vmcb -> save .rflags ;
@@ -4884,6 +4971,8 @@ static struct kvm_x86_ops svm_x86_ops = {
4884
4971
.prepare_guest_switch = svm_prepare_guest_switch ,
4885
4972
.vcpu_load = svm_vcpu_load ,
4886
4973
.vcpu_put = svm_vcpu_put ,
4974
+ .vcpu_blocking = svm_vcpu_blocking ,
4975
+ .vcpu_unblocking = svm_vcpu_unblocking ,
4887
4976
4888
4977
.update_bp_intercept = update_bp_intercept ,
4889
4978
.get_msr = svm_get_msr ,
0 commit comments