Skip to content

Commit fd6fa73

Browse files
agrafbonzini
authored andcommitted
KVM: x86: SVM: Prevent MSR passthrough when MSR access is denied
We will introduce the concept of MSRs that may not be handled in kernel space soon. Some MSRs are directly passed through to the guest, effectively making them handled by KVM from user space's point of view. This patch introduces all logic required to ensure that MSRs that user space wants trapped are not marked as direct access for guests. Signed-off-by: Alexander Graf <graf@amazon.com> Message-Id: <20200925143422.21718-6-graf@amazon.com> [Make terminology a bit more similar to VMX. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 476c9bd commit fd6fa73

File tree

2 files changed

+76
-8
lines changed

2 files changed

+76
-8
lines changed

arch/x86/kvm/svm/svm.c

Lines changed: 69 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ static DEFINE_PER_CPU(u64, current_tsc_ratio);
9191
static const struct svm_direct_access_msrs {
9292
u32 index; /* Index of the MSR */
9393
bool always; /* True if intercept is always on */
94-
} direct_access_msrs[] = {
94+
} direct_access_msrs[MAX_DIRECT_ACCESS_MSRS] = {
9595
{ .index = MSR_STAR, .always = true },
9696
{ .index = MSR_IA32_SYSENTER_CS, .always = true },
9797
#ifdef CONFIG_X86_64
@@ -553,15 +553,41 @@ static int svm_cpu_init(int cpu)
553553

554554
}
555555

556-
static bool valid_msr_intercept(u32 index)
556+
static int direct_access_msr_slot(u32 msr)
557557
{
558-
int i;
558+
u32 i;
559559

560560
for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
561-
if (direct_access_msrs[i].index == index)
562-
return true;
561+
if (direct_access_msrs[i].index == msr)
562+
return i;
563563

564-
return false;
564+
return -ENOENT;
565+
}
566+
567+
static void set_shadow_msr_intercept(struct kvm_vcpu *vcpu, u32 msr, int read,
568+
int write)
569+
{
570+
struct vcpu_svm *svm = to_svm(vcpu);
571+
int slot = direct_access_msr_slot(msr);
572+
573+
if (slot == -ENOENT)
574+
return;
575+
576+
/* Set the shadow bitmaps to the desired intercept states */
577+
if (read)
578+
set_bit(slot, svm->shadow_msr_intercept.read);
579+
else
580+
clear_bit(slot, svm->shadow_msr_intercept.read);
581+
582+
if (write)
583+
set_bit(slot, svm->shadow_msr_intercept.write);
584+
else
585+
clear_bit(slot, svm->shadow_msr_intercept.write);
586+
}
587+
588+
static bool valid_msr_intercept(u32 index)
589+
{
590+
return direct_access_msr_slot(index) != -ENOENT;
565591
}
566592

567593
static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
@@ -583,8 +609,8 @@ static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
583609
return !!test_bit(bit_write, &tmp);
584610
}
585611

586-
static void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
587-
int read, int write)
612+
static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
613+
u32 msr, int read, int write)
588614
{
589615
u8 bit_read, bit_write;
590616
unsigned long tmp;
@@ -596,6 +622,13 @@ static void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
596622
*/
597623
WARN_ON(!valid_msr_intercept(msr));
598624

625+
/* Enforce non allowed MSRs to trap */
626+
if (read && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
627+
read = 0;
628+
629+
if (write && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
630+
write = 0;
631+
599632
offset = svm_msrpm_offset(msr);
600633
bit_read = 2 * (msr & 0x0f);
601634
bit_write = 2 * (msr & 0x0f) + 1;
@@ -609,6 +642,13 @@ static void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
609642
msrpm[offset] = tmp;
610643
}
611644

645+
static void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
646+
int read, int write)
647+
{
648+
set_shadow_msr_intercept(vcpu, msr, read, write);
649+
set_msr_interception_bitmap(vcpu, msrpm, msr, read, write);
650+
}
651+
612652
static u32 *svm_vcpu_alloc_msrpm(void)
613653
{
614654
struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
@@ -639,6 +679,25 @@ static void svm_vcpu_free_msrpm(u32 *msrpm)
639679
__free_pages(virt_to_page(msrpm), MSRPM_ALLOC_ORDER);
640680
}
641681

682+
static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
683+
{
684+
struct vcpu_svm *svm = to_svm(vcpu);
685+
u32 i;
686+
687+
/*
688+
* Set intercept permissions for all direct access MSRs again. They
689+
* will automatically get filtered through the MSR filter, so we are
690+
* back in sync after this.
691+
*/
692+
for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
693+
u32 msr = direct_access_msrs[i].index;
694+
u32 read = test_bit(i, svm->shadow_msr_intercept.read);
695+
u32 write = test_bit(i, svm->shadow_msr_intercept.write);
696+
697+
set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write);
698+
}
699+
}
700+
642701
static void add_msr_offset(u32 offset)
643702
{
644703
int i;
@@ -4222,6 +4281,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
42224281
.can_emulate_instruction = svm_can_emulate_instruction,
42234282

42244283
.apic_init_signal_blocked = svm_apic_init_signal_blocked,
4284+
4285+
.msr_filter_changed = svm_msr_filter_changed,
42254286
};
42264287

42274288
static struct kvm_x86_init_ops svm_init_ops __initdata = {

arch/x86/kvm/svm/svm.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ static const u32 host_save_user_msrs[] = {
3131

3232
#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
3333

34+
#define MAX_DIRECT_ACCESS_MSRS 15
3435
#define MSRPM_OFFSETS 16
3536
extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
3637
extern bool npt_enabled;
@@ -157,6 +158,12 @@ struct vcpu_svm {
157158
*/
158159
struct list_head ir_list;
159160
spinlock_t ir_list_lock;
161+
162+
/* Save desired MSR intercept (read: pass-through) state */
163+
struct {
164+
DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
165+
DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
166+
} shadow_msr_intercept;
160167
};
161168

162169
struct svm_cpu_data {

0 commit comments

Comments
 (0)