Skip to content

Commit

Permalink
Merge tag 'v5.15.60' into 5.15
Browse files Browse the repository at this point in the history
This is the 5.15.60 stable release
  • Loading branch information
xanmod committed Aug 11, 2022
2 parents bd45248 + 7217df8 commit 24e4f60
Show file tree
Hide file tree
Showing 44 changed files with 514 additions and 122 deletions.
8 changes: 8 additions & 0 deletions Documentation/admin-guide/hw-vuln/spectre.rst
Original file line number Diff line number Diff line change
Expand Up @@ -422,6 +422,14 @@ The possible values in this file are:
'RSB filling' Protection of RSB on context switch enabled
============= ===========================================

- EIBRS Post-barrier Return Stack Buffer (PBRSB) protection status:

=========================== =======================================================
'PBRSB-eIBRS: SW sequence' CPU is affected and protection of RSB on VMEXIT enabled
'PBRSB-eIBRS: Vulnerable' CPU is vulnerable
'PBRSB-eIBRS: Not affected' CPU is not affected by PBRSB
=========================== =======================================================

Full mitigation might require a microcode update from the CPU
vendor. When the necessary microcode is not available, the kernel will
report vulnerability.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ properties:
- brcm,bcm4345c5
- brcm,bcm43540-bt
- brcm,bcm4335a0
- brcm,bcm4349-bt

shutdown-gpios:
maxItems: 1
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 59
SUBLEVEL = 60
EXTRAVERSION =
NAME = Trick or Treat

Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/crypto/poly1305-glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ static void neon_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
{
if (unlikely(!dctx->sset)) {
if (!dctx->rset) {
poly1305_init_arch(dctx, src);
poly1305_init_arm64(&dctx->h, src);
src += POLY1305_BLOCK_SIZE;
len -= POLY1305_BLOCK_SIZE;
dctx->rset = 1;
Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/include/asm/kernel-pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,8 @@
/*
* Initial memory map attributes.
*/
#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED | PTE_UXN)
#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S | PMD_SECT_UXN)

#if ARM64_KERNEL_USES_PMD_MAPS
#define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kernel/head.S
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
subs x1, x1, #64
b.ne 1b

mov x7, SWAPPER_MM_MMUFLAGS
mov_q x7, SWAPPER_MM_MMUFLAGS

/*
* Create the identity mapping.
Expand Down
8 changes: 4 additions & 4 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -2428,7 +2428,7 @@ config RETPOLINE
config RETHUNK
bool "Enable return-thunks"
depends on RETPOLINE && CC_HAS_RETURN_THUNK
default y
default y if X86_64
help
Compile the kernel with the return-thunks compiler option to guard
against kernel-to-user data leaks by avoiding return speculation.
Expand All @@ -2437,21 +2437,21 @@ config RETHUNK

config CPU_UNRET_ENTRY
bool "Enable UNRET on kernel entry"
depends on CPU_SUP_AMD && RETHUNK
depends on CPU_SUP_AMD && RETHUNK && X86_64
default y
help
Compile the kernel with support for the retbleed=unret mitigation.

config CPU_IBPB_ENTRY
bool "Enable IBPB on kernel entry"
depends on CPU_SUP_AMD
depends on CPU_SUP_AMD && X86_64
default y
help
Compile the kernel with support for the retbleed=ibpb mitigation.

config CPU_IBRS_ENTRY
bool "Enable IBRS on kernel entry"
depends on CPU_SUP_INTEL
depends on CPU_SUP_INTEL && X86_64
default y
help
Compile the kernel with support for the spectre_v2=ibrs mitigation.
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/include/asm/cpufeatures.h
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,7 @@
#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */

/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
Expand Down Expand Up @@ -447,5 +448,6 @@
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
#define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */
#define X86_BUG_EIBRS_PBRSB X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */

#endif /* _ASM_X86_CPUFEATURES_H */
3 changes: 3 additions & 0 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -642,6 +642,7 @@ struct kvm_vcpu_arch {
u64 ia32_misc_enable_msr;
u64 smbase;
u64 smi_count;
bool at_instruction_boundary;
bool tpr_access_reporting;
bool xsaves_enabled;
u64 ia32_xss;
Expand Down Expand Up @@ -1271,6 +1272,8 @@ struct kvm_vcpu_stat {
u64 nested_run;
u64 directed_yield_attempted;
u64 directed_yield_successful;
u64 preemption_reported;
u64 preemption_other;
u64 guest_mode;
};

Expand Down
4 changes: 4 additions & 0 deletions arch/x86/include/asm/msr-index.h
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,10 @@
* are restricted to targets in
* kernel.
*/
#define ARCH_CAP_PBRSB_NO BIT(24) /*
* Not susceptible to Post-Barrier
* Return Stack Buffer Predictions.
*/

#define MSR_IA32_FLUSH_CMD 0x0000010b
#define L1D_FLUSH BIT(0) /*
Expand Down
21 changes: 19 additions & 2 deletions arch/x86/include/asm/nospec-branch.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,9 @@
774: \
add $(BITS_PER_LONG/8) * 2, sp; \
dec reg; \
jnz 771b;
jnz 771b; \
/* barrier for jnz misprediction */ \
lfence;

#ifdef __ASSEMBLY__

Expand Down Expand Up @@ -118,13 +120,28 @@
#endif
.endm

.macro ISSUE_UNBALANCED_RET_GUARD
ANNOTATE_INTRA_FUNCTION_CALL
call .Lunbalanced_ret_guard_\@
int3
.Lunbalanced_ret_guard_\@:
add $(BITS_PER_LONG/8), %_ASM_SP
lfence
.endm

/*
* A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
* monstrosity above, manually.
*/
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2
.ifb \ftr2
ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
.else
ALTERNATIVE_2 "jmp .Lskip_rsb_\@", "", \ftr, "jmp .Lunbalanced_\@", \ftr2
.endif
__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
.Lunbalanced_\@:
ISSUE_UNBALANCED_RET_GUARD
.Lskip_rsb_\@:
.endm

Expand Down
86 changes: 63 additions & 23 deletions arch/x86/kernel/cpu/bugs.c
Original file line number Diff line number Diff line change
Expand Up @@ -1328,6 +1328,53 @@ static void __init spec_ctrl_disable_kernel_rrsba(void)
}
}

static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
{
/*
* Similar to context switches, there are two types of RSB attacks
* after VM exit:
*
* 1) RSB underflow
*
* 2) Poisoned RSB entry
*
* When retpoline is enabled, both are mitigated by filling/clearing
* the RSB.
*
* When IBRS is enabled, while #1 would be mitigated by the IBRS branch
* prediction isolation protections, RSB still needs to be cleared
* because of #2. Note that SMEP provides no protection here, unlike
* user-space-poisoned RSB entries.
*
* eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
* bug is present then a LITE version of RSB protection is required,
* just a single call needs to retire before a RET is executed.
*/
switch (mode) {
case SPECTRE_V2_NONE:
return;

case SPECTRE_V2_EIBRS_LFENCE:
case SPECTRE_V2_EIBRS:
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
}
return;

case SPECTRE_V2_EIBRS_RETPOLINE:
case SPECTRE_V2_RETPOLINE:
case SPECTRE_V2_LFENCE:
case SPECTRE_V2_IBRS:
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
return;
}

pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
dump_stack();
}

static void __init spectre_v2_select_mitigation(void)
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
Expand Down Expand Up @@ -1478,28 +1525,7 @@ static void __init spectre_v2_select_mitigation(void)
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");

/*
* Similar to context switches, there are two types of RSB attacks
* after vmexit:
*
* 1) RSB underflow
*
* 2) Poisoned RSB entry
*
* When retpoline is enabled, both are mitigated by filling/clearing
* the RSB.
*
* When IBRS is enabled, while #1 would be mitigated by the IBRS branch
* prediction isolation protections, RSB still needs to be cleared
* because of #2. Note that SMEP provides no protection here, unlike
* user-space-poisoned RSB entries.
*
* eIBRS, on the other hand, has RSB-poisoning protections, so it
* doesn't need RSB clearing after vmexit.
*/
if (boot_cpu_has(X86_FEATURE_RETPOLINE) ||
boot_cpu_has(X86_FEATURE_KERNEL_IBRS))
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
spectre_v2_determine_rsb_fill_type_at_vmexit(mode);

/*
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
Expand Down Expand Up @@ -2285,6 +2311,19 @@ static char *ibpb_state(void)
return "";
}

static char *pbrsb_eibrs_state(void)
{
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
return ", PBRSB-eIBRS: SW sequence";
else
return ", PBRSB-eIBRS: Vulnerable";
} else {
return ", PBRSB-eIBRS: Not affected";
}
}

static ssize_t spectre_v2_show_state(char *buf)
{
if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
Expand All @@ -2297,12 +2336,13 @@ static ssize_t spectre_v2_show_state(char *buf)
spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");

return sprintf(buf, "%s%s%s%s%s%s\n",
return sprintf(buf, "%s%s%s%s%s%s%s\n",
spectre_v2_strings[spectre_v2_enabled],
ibpb_state(),
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
stibp_state(),
boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
pbrsb_eibrs_state(),
spectre_v2_module_string());
}

Expand Down
12 changes: 10 additions & 2 deletions arch/x86/kernel/cpu/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -1027,6 +1027,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#define NO_SWAPGS BIT(6)
#define NO_ITLB_MULTIHIT BIT(7)
#define NO_SPECTRE_V2 BIT(8)
#define NO_EIBRS_PBRSB BIT(9)

#define VULNWL(vendor, family, model, whitelist) \
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
Expand Down Expand Up @@ -1067,7 +1068,7 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {

VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),

/*
* Technically, swapgs isn't serializing on AMD (despite it previously
Expand All @@ -1077,7 +1078,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
* good enough for our purposes.
*/

VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT),
VULNWL_INTEL(ATOM_TREMONT, NO_EIBRS_PBRSB),
VULNWL_INTEL(ATOM_TREMONT_L, NO_EIBRS_PBRSB),
VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),

/* AMD Family 0xf - 0x12 */
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
Expand Down Expand Up @@ -1255,6 +1258,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_RETBLEED);
}

if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
!(ia32_cap & ARCH_CAP_PBRSB_NO))
setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);

if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;

Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kvm/svm/sev.c
Original file line number Diff line number Diff line change
Expand Up @@ -832,7 +832,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,

/* If source buffer is not aligned then use an intermediate buffer */
if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
src_tpage = alloc_page(GFP_KERNEL);
src_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
if (!src_tpage)
return -ENOMEM;

Expand All @@ -853,7 +853,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
int dst_offset;

dst_tpage = alloc_page(GFP_KERNEL);
dst_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
if (!dst_tpage) {
ret = -ENOMEM;
goto e_free;
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/kvm/svm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -4263,6 +4263,8 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,

static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
{
if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR)
vcpu->arch.at_instruction_boundary = true;
}

static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
Expand Down
8 changes: 5 additions & 3 deletions arch/x86/kvm/vmx/vmenter.S
Original file line number Diff line number Diff line change
Expand Up @@ -197,11 +197,13 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
* entries and (in some cases) RSB underflow.
*
* eIBRS has its own protection against poisoned RSB, so it doesn't
* need the RSB filling sequence. But it does need to be enabled
* before the first unbalanced RET.
* need the RSB filling sequence. But it does need to be enabled, and a
* single call to retire, before the first unbalanced RET.
*/

FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT,\
X86_FEATURE_RSB_VMEXIT_LITE


pop %_ASM_ARG2 /* @flags */
pop %_ASM_ARG1 /* @vmx */
Expand Down
1 change: 1 addition & 0 deletions arch/x86/kvm/vmx/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -6471,6 +6471,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
return;

handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc));
vcpu->arch.at_instruction_boundary = true;
}

static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
Expand Down

0 comments on commit 24e4f60

Please sign in to comment.