Skip to content

Commit 340d3bc

Browse files
ssuthiku-amdbonzini
authored andcommitted
svm: Add interrupt injection via AVIC
This patch introduces a new mechanism to inject interrupt using AVIC. Since VINTR is not supported when enable AVIC, we need to inject interrupt via APIC backing page instead. This patch also adds support for AVIC doorbell, which is used by KVM to signal a running vcpu to check IRR for injected interrupts. Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 44a95da commit 340d3bc

File tree

1 file changed

+35
-4
lines changed

1 file changed

+35
-4
lines changed

arch/x86/kvm/svm.c

Lines changed: 35 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,8 @@ MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
7171
#define SVM_FEATURE_DECODE_ASSIST (1 << 7)
7272
#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
7373

74+
#define SVM_AVIC_DOORBELL 0xc001011b
75+
7476
#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
7577
#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
7678
#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
@@ -293,6 +295,17 @@ static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
293295
mark_dirty(svm->vmcb, VMCB_AVIC);
294296
}
295297

298+
static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
299+
{
300+
struct vcpu_svm *svm = to_svm(vcpu);
301+
u64 *entry = svm->avic_physical_id_cache;
302+
303+
if (!entry)
304+
return false;
305+
306+
return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
307+
}
308+
296309
static void recalc_intercepts(struct vcpu_svm *svm)
297310
{
298311
struct vmcb_control_area *c, *h;
@@ -2866,10 +2879,11 @@ static int clgi_interception(struct vcpu_svm *svm)
28662879
disable_gif(svm);
28672880

28682881
/* After a CLGI no interrupts should come */
2869-
svm_clear_vintr(svm);
2870-
svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
2871-
2872-
mark_dirty(svm->vmcb, VMCB_INTR);
2882+
if (!kvm_vcpu_apicv_active(&svm->vcpu)) {
2883+
svm_clear_vintr(svm);
2884+
svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
2885+
mark_dirty(svm->vmcb, VMCB_INTR);
2886+
}
28732887

28742888
return 1;
28752889
}
@@ -3763,6 +3777,7 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
37633777
{
37643778
struct vmcb_control_area *control;
37653779

3780+
/* The following fields are ignored when AVIC is enabled */
37663781
control = &svm->vmcb->control;
37673782
control->int_vector = irq;
37683783
control->int_ctl &= ~V_INTR_PRIO_MASK;
@@ -3841,6 +3856,18 @@ static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
38413856
return;
38423857
}
38433858

3859+
static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
3860+
{
3861+
kvm_lapic_set_irr(vec, vcpu->arch.apic);
3862+
smp_mb__after_atomic();
3863+
3864+
if (avic_vcpu_is_running(vcpu))
3865+
wrmsrl(SVM_AVIC_DOORBELL,
3866+
__default_cpu_present_to_apicid(vcpu->cpu));
3867+
else
3868+
kvm_vcpu_wake_up(vcpu);
3869+
}
3870+
38443871
static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
38453872
{
38463873
struct vcpu_svm *svm = to_svm(vcpu);
@@ -3895,6 +3922,9 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
38953922
{
38963923
struct vcpu_svm *svm = to_svm(vcpu);
38973924

3925+
if (kvm_vcpu_apicv_active(vcpu))
3926+
return;
3927+
38983928
/*
38993929
* In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
39003930
* 1, because that's a separate STGI/VMRUN intercept. The next time we
@@ -4638,6 +4668,7 @@ static struct kvm_x86_ops svm_x86_ops = {
46384668
.sched_in = svm_sched_in,
46394669

46404670
.pmu_ops = &amd_pmu_ops,
4671+
.deliver_posted_interrupt = svm_deliver_avic_intr,
46414672
};
46424673

46434674
static int __init svm_init(void)

0 commit comments

Comments
 (0)