Skip to content

Commit b23c83a

Browse files
committed
x86/reboot: VMCLEAR active VMCSes before emergency reboot
VMCLEAR active VMCSes before any emergency reboot, not just if the kernel may kexec into a new kernel after a crash. Per Intel's SDM, the VMX architecture doesn't require the CPU to flush the VMCS cache on INIT. If an emergency reboot doesn't RESET CPUs, cached VMCSes could theoretically be kept and only be written back to memory after the new kernel is booted, i.e. could effectively corrupt memory after reboot. Opportunistically remove the setting of the global pointer to NULL to make checkpatch happy. Cc: Andrew Cooper <Andrew.Cooper3@citrix.com> Link: https://lore.kernel.org/r/20230721201859.2307736-2-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 41e90a6 commit b23c83a

File tree

5 files changed

+27
-40
lines changed

5 files changed

+27
-40
lines changed

arch/x86/include/asm/kexec.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -205,8 +205,6 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image);
205205
#endif
206206
#endif
207207

208-
typedef void crash_vmclear_fn(void);
209-
extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
210208
extern void kdump_nmi_shootdown_cpus(void);
211209

212210
#endif /* __ASSEMBLY__ */

arch/x86/include/asm/reboot.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,8 @@ void __noreturn machine_real_restart(unsigned int type);
2525
#define MRR_BIOS 0
2626
#define MRR_APM 1
2727

28+
typedef void crash_vmclear_fn(void);
29+
extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
2830
void cpu_emergency_disable_virtualization(void);
2931

3032
typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);

arch/x86/kernel/crash.c

Lines changed: 0 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -48,38 +48,12 @@ struct crash_memmap_data {
4848
unsigned int type;
4949
};
5050

51-
/*
52-
* This is used to VMCLEAR all VMCSs loaded on the
53-
* processor. And when loading kvm_intel module, the
54-
* callback function pointer will be assigned.
55-
*
56-
* protected by rcu.
57-
*/
58-
crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
59-
EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
60-
61-
static inline void cpu_crash_vmclear_loaded_vmcss(void)
62-
{
63-
crash_vmclear_fn *do_vmclear_operation = NULL;
64-
65-
rcu_read_lock();
66-
do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
67-
if (do_vmclear_operation)
68-
do_vmclear_operation();
69-
rcu_read_unlock();
70-
}
71-
7251
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
7352

7453
static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
7554
{
7655
crash_save_cpu(regs, cpu);
7756

78-
/*
79-
* VMCLEAR VMCSs loaded on all cpus if needed.
80-
*/
81-
cpu_crash_vmclear_loaded_vmcss();
82-
8357
/*
8458
* Disable Intel PT to stop its logging
8559
*/
@@ -133,11 +107,6 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
133107

134108
crash_smp_send_stop();
135109

136-
/*
137-
* VMCLEAR VMCSs loaded on this cpu if needed.
138-
*/
139-
cpu_crash_vmclear_loaded_vmcss();
140-
141110
cpu_emergency_disable_virtualization();
142111

143112
/*

arch/x86/kernel/reboot.c

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -787,6 +787,26 @@ void machine_crash_shutdown(struct pt_regs *regs)
787787
}
788788
#endif
789789

790+
/*
791+
* This is used to VMCLEAR all VMCSs loaded on the
792+
* processor. And when loading kvm_intel module, the
793+
* callback function pointer will be assigned.
794+
*
795+
* protected by rcu.
796+
*/
797+
crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
798+
EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
799+
800+
static inline void cpu_crash_vmclear_loaded_vmcss(void)
801+
{
802+
crash_vmclear_fn *do_vmclear_operation = NULL;
803+
804+
rcu_read_lock();
805+
do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
806+
if (do_vmclear_operation)
807+
do_vmclear_operation();
808+
rcu_read_unlock();
809+
}
790810

791811
/* This is the CPU performing the emergency shutdown work. */
792812
int crashing_cpu = -1;
@@ -798,6 +818,8 @@ int crashing_cpu = -1;
798818
*/
799819
void cpu_emergency_disable_virtualization(void)
800820
{
821+
cpu_crash_vmclear_loaded_vmcss();
822+
801823
cpu_emergency_vmxoff();
802824
cpu_emergency_svm_disable();
803825
}

arch/x86/kvm/vmx/vmx.c

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@
4141
#include <asm/idtentry.h>
4242
#include <asm/io.h>
4343
#include <asm/irq_remapping.h>
44-
#include <asm/kexec.h>
44+
#include <asm/reboot.h>
4545
#include <asm/perf_event.h>
4646
#include <asm/mmu_context.h>
4747
#include <asm/mshyperv.h>
@@ -725,7 +725,6 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
725725
return ret;
726726
}
727727

728-
#ifdef CONFIG_KEXEC_CORE
729728
static void crash_vmclear_local_loaded_vmcss(void)
730729
{
731730
int cpu = raw_smp_processor_id();
@@ -735,7 +734,6 @@ static void crash_vmclear_local_loaded_vmcss(void)
735734
loaded_vmcss_on_cpu_link)
736735
vmcs_clear(v->vmcs);
737736
}
738-
#endif /* CONFIG_KEXEC_CORE */
739737

740738
static void __loaded_vmcs_clear(void *arg)
741739
{
@@ -8573,10 +8571,9 @@ static void __vmx_exit(void)
85738571
{
85748572
allow_smaller_maxphyaddr = false;
85758573

8576-
#ifdef CONFIG_KEXEC_CORE
85778574
RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
85788575
synchronize_rcu();
8579-
#endif
8576+
85808577
vmx_cleanup_l1d_flush();
85818578
}
85828579

@@ -8623,10 +8620,9 @@ static int __init vmx_init(void)
86238620
pi_init_cpu(cpu);
86248621
}
86258622

8626-
#ifdef CONFIG_KEXEC_CORE
86278623
rcu_assign_pointer(crash_vmclear_loaded_vmcss,
86288624
crash_vmclear_local_loaded_vmcss);
8629-
#endif
8625+
86308626
vmx_check_vmcs12_offsets();
86318627

86328628
/*

0 commit comments

Comments
 (0)