Skip to content

Commit 8221c13

Browse files
ssuthiku-amdbonzini
authored andcommitted
svm: Manage vcpu load/unload when enable AVIC
When a vcpu is loaded/unloaded to a physical core, we need to update host physical APIC ID information in the Physical APIC-ID table accordingly. Also, when vCPU is blocking/un-blocking (due to halt instruction), we need to make sure that the is-running bit in set accordingly in the physical APIC-ID table. Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Reviewed-by: Radim Krčmář <rkrcmar@redhat.com> [Return void from new functions, add WARN_ON when they returned negative errno; split load and put into separate function as they have almost nothing in common. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 3bbf356 commit 8221c13

File tree

1 file changed

+89
-0
lines changed

1 file changed

+89
-0
lines changed

arch/x86/kvm/svm.c

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
#include <linux/trace_events.h>
3636
#include <linux/slab.h>
3737

38+
#include <asm/apic.h>
3839
#include <asm/perf_event.h>
3940
#include <asm/tlbflush.h>
4041
#include <asm/desc.h>
@@ -183,6 +184,7 @@ struct vcpu_svm {
183184
u32 ldr_reg;
184185
struct page *avic_backing_page;
185186
u64 *avic_physical_id_cache;
187+
bool avic_is_running;
186188
};
187189

188190
#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
@@ -1316,6 +1318,72 @@ static int avic_vm_init(struct kvm *kvm)
13161318
return err;
13171319
}
13181320

1321+
/**
1322+
* This function is called during VCPU halt/unhalt.
1323+
*/
1324+
static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
1325+
{
1326+
u64 entry;
1327+
int h_physical_id = __default_cpu_present_to_apicid(vcpu->cpu);
1328+
struct vcpu_svm *svm = to_svm(vcpu);
1329+
1330+
if (!kvm_vcpu_apicv_active(vcpu))
1331+
return;
1332+
1333+
svm->avic_is_running = is_run;
1334+
1335+
/* ID = 0xff (broadcast), ID > 0xff (reserved) */
1336+
if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
1337+
return;
1338+
1339+
entry = READ_ONCE(*(svm->avic_physical_id_cache));
1340+
WARN_ON(is_run == !!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK));
1341+
1342+
entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1343+
if (is_run)
1344+
entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1345+
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
1346+
}
1347+
1348+
static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1349+
{
1350+
u64 entry;
1351+
/* ID = 0xff (broadcast), ID > 0xff (reserved) */
1352+
int h_physical_id = __default_cpu_present_to_apicid(cpu);
1353+
struct vcpu_svm *svm = to_svm(vcpu);
1354+
1355+
if (!kvm_vcpu_apicv_active(vcpu))
1356+
return;
1357+
1358+
if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
1359+
return;
1360+
1361+
entry = READ_ONCE(*(svm->avic_physical_id_cache));
1362+
WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
1363+
1364+
entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
1365+
entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
1366+
1367+
entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1368+
if (svm->avic_is_running)
1369+
entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1370+
1371+
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
1372+
}
1373+
1374+
static void avic_vcpu_put(struct kvm_vcpu *vcpu)
1375+
{
1376+
u64 entry;
1377+
struct vcpu_svm *svm = to_svm(vcpu);
1378+
1379+
if (!kvm_vcpu_apicv_active(vcpu))
1380+
return;
1381+
1382+
entry = READ_ONCE(*(svm->avic_physical_id_cache));
1383+
entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1384+
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
1385+
}
1386+
13191387
static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
13201388
{
13211389
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1379,6 +1447,11 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
13791447
goto free_page4;
13801448
}
13811449

1450+
/* We initialize this flag to true to make sure that the is_running
1451+
* bit would be set the first time the vcpu is loaded.
1452+
*/
1453+
svm->avic_is_running = true;
1454+
13821455
svm->nested.hsave = page_address(hsave_page);
13831456

13841457
svm->msrpm = page_address(msrpm_pages);
@@ -1455,13 +1528,17 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
14551528
/* This assumes that the kernel never uses MSR_TSC_AUX */
14561529
if (static_cpu_has(X86_FEATURE_RDTSCP))
14571530
wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
1531+
1532+
avic_vcpu_load(vcpu, cpu);
14581533
}
14591534

14601535
static void svm_vcpu_put(struct kvm_vcpu *vcpu)
14611536
{
14621537
struct vcpu_svm *svm = to_svm(vcpu);
14631538
int i;
14641539

1540+
avic_vcpu_put(vcpu);
1541+
14651542
++vcpu->stat.host_state_reload;
14661543
kvm_load_ldt(svm->host.ldt);
14671544
#ifdef CONFIG_X86_64
@@ -1477,6 +1554,16 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
14771554
wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
14781555
}
14791556

1557+
static void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
1558+
{
1559+
avic_set_running(vcpu, false);
1560+
}
1561+
1562+
static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
1563+
{
1564+
avic_set_running(vcpu, true);
1565+
}
1566+
14801567
static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
14811568
{
14821569
return to_svm(vcpu)->vmcb->save.rflags;
@@ -4884,6 +4971,8 @@ static struct kvm_x86_ops svm_x86_ops = {
48844971
.prepare_guest_switch = svm_prepare_guest_switch,
48854972
.vcpu_load = svm_vcpu_load,
48864973
.vcpu_put = svm_vcpu_put,
4974+
.vcpu_blocking = svm_vcpu_blocking,
4975+
.vcpu_unblocking = svm_vcpu_unblocking,
48874976

48884977
.update_bp_intercept = update_bp_intercept,
48894978
.get_msr = svm_get_msr,

0 commit comments

Comments
 (0)