Skip to content

Commit

Permalink
Merge tag 'v6.6.29' into 6.6
Browse files Browse the repository at this point in the history
This is the 6.6.29 stable release
  • Loading branch information
xanmod committed Apr 28, 2024
2 parents ac361ff + a3463f0 commit 8922cd0
Show file tree
Hide file tree
Showing 204 changed files with 2,530 additions and 1,211 deletions.
3 changes: 3 additions & 0 deletions Documentation/admin-guide/kernel-parameters.txt
Expand Up @@ -6864,6 +6864,9 @@
pause after every control message);
o = USB_QUIRK_HUB_SLOW_RESET (Hub needs extra
delay after resetting its port);
p = USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT
(Reduce timeout of the SET_ADDRESS
request from 5000 ms to 500 ms);
Example: quirks=0781:5580:bk,0a5c:5834:gij

usbhid.mousepoll=
Expand Down
2 changes: 1 addition & 1 deletion MAINTAINERS
Expand Up @@ -8142,7 +8142,7 @@ M: Geoffrey D. Bennett <g@b4.vu>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
F: sound/usb/mixer_scarlett_gen2.c
F: sound/usb/mixer_scarlett2.c

FORCEDETH GIGABIT ETHERNET DRIVER
M: Rain River <rain.1986.08.12@gmail.com>
Expand Down
2 changes: 1 addition & 1 deletion Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 28
SUBLEVEL = 29
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

Expand Down
10 changes: 10 additions & 0 deletions arch/arm/mach-omap2/pdata-quirks.c
Expand Up @@ -275,9 +275,19 @@ static struct platform_device pandora_backlight = {
.id = -1,
};

static struct gpiod_lookup_table pandora_soc_audio_gpios = {
.dev_id = "soc-audio",
.table = {
GPIO_LOOKUP("gpio-112-127", 6, "dac", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio-0-15", 14, "amp", GPIO_ACTIVE_HIGH),
{ }
},
};

static void __init omap3_pandora_legacy_init(void)
{
platform_device_register(&pandora_backlight);
gpiod_add_lookup_table(&pandora_soc_audio_gpios);
}
#endif /* CONFIG_ARCH_OMAP3 */

Expand Down
40 changes: 21 additions & 19 deletions arch/arm64/include/asm/tlbflush.h
Expand Up @@ -152,12 +152,18 @@ static inline unsigned long get_trans_granule(void)
#define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3)

/*
* Generate 'num' values from -1 to 30 with -1 rejected by the
* __flush_tlb_range() loop below.
* Generate 'num' values from -1 to 31 with -1 rejected by the
* __flush_tlb_range() loop below. Its return value is only
* significant for a maximum of MAX_TLBI_RANGE_PAGES pages. If
* 'pages' is more than that, you must iterate over the overall
* range.
*/
#define TLBI_RANGE_MASK GENMASK_ULL(4, 0)
#define __TLBI_RANGE_NUM(pages, scale) \
((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)
#define __TLBI_RANGE_NUM(pages, scale) \
({ \
int __pages = min((pages), \
__TLBI_RANGE_PAGES(31, (scale))); \
(__pages >> (5 * (scale) + 1)) - 1; \
})

/*
* TLB Invalidation
Expand Down Expand Up @@ -351,29 +357,25 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
* entries one by one at the granularity of 'stride'. If the TLB
* range ops are supported, then:
*
* 1. If 'pages' is odd, flush the first page through non-range
* operations;
*
* 2. For remaining pages: the minimum range granularity is decided
* by 'scale', so multiple range TLBI operations may be required.
* Start from scale = 0, flush the corresponding number of pages
* ((num+1)*2^(5*scale+1) starting from 'addr'), then increase it
* until no pages left.
* 1. The minimum range granularity is decided by 'scale', so multiple range
* TLBI operations may be required. Start from scale = 3, flush the largest
* possible number of pages ((num+1)*2^(5*scale+1)) that fit into the
* requested range, then decrement scale and continue until one or zero pages
* are left.
*
* Note that certain ranges can be represented by either num = 31 and
* scale or num = 0 and scale + 1. The loop below favours the latter
* since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
* 2. If there is 1 page remaining, flush it through non-range operations. Range
* operations can only span an even number of pages.
*/
#define __flush_tlb_range_op(op, start, pages, stride, \
asid, tlb_level, tlbi_user) \
do { \
int num = 0; \
int scale = 0; \
int scale = 3; \
unsigned long addr; \
\
while (pages > 0) { \
if (!system_supports_tlb_range() || \
pages % 2 == 1) { \
pages == 1) { \
addr = __TLBI_VADDR(start, asid); \
__tlbi_level(op, addr, tlb_level); \
if (tlbi_user) \
Expand All @@ -393,7 +395,7 @@ do { \
start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
pages -= __TLBI_RANGE_PAGES(num, scale); \
} \
scale++; \
scale--; \
} \
} while (0)

Expand Down
5 changes: 5 additions & 0 deletions arch/arm64/kernel/head.S
Expand Up @@ -569,6 +569,11 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
adr_l x1, __hyp_text_end
adr_l x2, dcache_clean_poc
blr x2

mov_q x0, INIT_SCTLR_EL2_MMU_OFF
pre_disable_mmu_workaround
msr sctlr_el2, x0
isb
0:
mov_q x0, HCR_HOST_NVHE_FLAGS
msr hcr_el2, x0
Expand Down
3 changes: 0 additions & 3 deletions arch/arm64/mm/pageattr.c
Expand Up @@ -219,9 +219,6 @@ bool kernel_page_present(struct page *page)
pte_t *ptep;
unsigned long addr = (unsigned long)page_address(page);

if (!can_set_direct_map())
return true;

pgdp = pgd_offset_k(addr);
if (pgd_none(READ_ONCE(*pgdp)))
return false;
Expand Down
10 changes: 2 additions & 8 deletions arch/powerpc/include/asm/ftrace.h
Expand Up @@ -20,14 +20,6 @@
#ifndef __ASSEMBLY__
extern void _mcount(void);

static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
addr += MCOUNT_INSN_SIZE;

return addr;
}

unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
unsigned long sp);

Expand Down Expand Up @@ -142,8 +134,10 @@ static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; }
#ifdef CONFIG_FUNCTION_TRACER
extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
void ftrace_free_init_tramp(void);
unsigned long ftrace_call_adjust(unsigned long addr);
#else
static inline void ftrace_free_init_tramp(void) { }
static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; }
#endif
#endif /* !__ASSEMBLY__ */

Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/include/asm/sections.h
Expand Up @@ -14,6 +14,7 @@ typedef struct func_desc func_desc_t;

extern char __head_end[];
extern char __srwx_boundary[];
extern char __exittext_begin[], __exittext_end[];

/* Patch sites */
extern s32 patch__call_flush_branch_caches1;
Expand Down
12 changes: 12 additions & 0 deletions arch/powerpc/kernel/trace/ftrace.c
Expand Up @@ -27,10 +27,22 @@
#include <asm/ftrace.h>
#include <asm/syscall.h>
#include <asm/inst.h>
#include <asm/sections.h>

#define NUM_FTRACE_TRAMPS 2
static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];

unsigned long ftrace_call_adjust(unsigned long addr)
{
if (addr >= (unsigned long)__exittext_begin && addr < (unsigned long)__exittext_end)
return 0;

if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
addr += MCOUNT_INSN_SIZE;

return addr;
}

static ppc_inst_t ftrace_create_branch_inst(unsigned long ip, unsigned long addr, int link)
{
ppc_inst_t op;
Expand Down
5 changes: 5 additions & 0 deletions arch/powerpc/kernel/trace/ftrace_64_pg.c
Expand Up @@ -37,6 +37,11 @@
#define NUM_FTRACE_TRAMPS 8
static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];

unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
}

static ppc_inst_t
ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
{
Expand Down
2 changes: 2 additions & 0 deletions arch/powerpc/kernel/vmlinux.lds.S
Expand Up @@ -281,7 +281,9 @@ SECTIONS
* to deal with references from __bug_table
*/
.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
__exittext_begin = .;
EXIT_TEXT
__exittext_end = .;
}

. = ALIGN(PAGE_SIZE);
Expand Down
3 changes: 3 additions & 0 deletions arch/x86/include/asm/barrier.h
Expand Up @@ -79,6 +79,9 @@ do { \
#define __smp_mb__before_atomic() do { } while (0)
#define __smp_mb__after_atomic() do { } while (0)

/* Writing to CR3 provides a full memory barrier in switch_mm(). */
#define smp_mb__after_switch_mm() do { } while (0)

#include <asm-generic/barrier.h>

#endif /* _ASM_X86_BARRIER_H */
1 change: 1 addition & 0 deletions arch/x86/include/asm/kvm_host.h
Expand Up @@ -828,6 +828,7 @@ struct kvm_vcpu_arch {
int cpuid_nent;
struct kvm_cpuid_entry2 *cpuid_entries;
struct kvm_hypervisor_cpuid kvm_cpuid;
bool is_amd_compatible;

/*
* FIXME: Drop this macro and use KVM_NR_GOVERNED_FEATURES directly
Expand Down
11 changes: 7 additions & 4 deletions arch/x86/kernel/cpu/bugs.c
Expand Up @@ -1651,7 +1651,8 @@ static void __init bhi_select_mitigation(void)
return;

/* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
!boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
spec_ctrl_disable_kernel_rrsba();
if (rrsba_disabled)
return;
Expand Down Expand Up @@ -2803,11 +2804,13 @@ static const char *spectre_bhi_state(void)
{
if (!boot_cpu_has_bug(X86_BUG_BHI))
return "; BHI: Not affected";
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
return "; BHI: BHI_DIS_S";
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
return "; BHI: SW loop, KVM: SW loop";
else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && rrsba_disabled)
else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
!boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
rrsba_disabled)
return "; BHI: Retpoline";
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
return "; BHI: Vulnerable, KVM: SW loop";
Expand Down
6 changes: 3 additions & 3 deletions arch/x86/kernel/cpu/cpuid-deps.c
Expand Up @@ -44,7 +44,10 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_F16C, X86_FEATURE_XMM2, },
{ X86_FEATURE_AES, X86_FEATURE_XMM2 },
{ X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 },
{ X86_FEATURE_GFNI, X86_FEATURE_XMM2 },
{ X86_FEATURE_FMA, X86_FEATURE_AVX },
{ X86_FEATURE_VAES, X86_FEATURE_AVX },
{ X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX },
{ X86_FEATURE_AVX2, X86_FEATURE_AVX, },
{ X86_FEATURE_AVX512F, X86_FEATURE_AVX, },
{ X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F },
Expand All @@ -56,9 +59,6 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL },
{ X86_FEATURE_GFNI, X86_FEATURE_AVX512VL },
{ X86_FEATURE_VAES, X86_FEATURE_AVX512VL },
{ X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },
Expand Down
1 change: 1 addition & 0 deletions arch/x86/kvm/cpuid.c
Expand Up @@ -362,6 +362,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)

kvm_update_pv_runtime(vcpu);

vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu);
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);

Expand Down
10 changes: 10 additions & 0 deletions arch/x86/kvm/cpuid.h
Expand Up @@ -125,6 +125,16 @@ static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
}

static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
{
return vcpu->arch.is_amd_compatible;
}

static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
{
return !guest_cpuid_is_amd_compatible(vcpu);
}

static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
Expand Down
3 changes: 2 additions & 1 deletion arch/x86/kvm/lapic.c
Expand Up @@ -2772,7 +2772,8 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;

r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
if (r && lvt_type == APIC_LVTPC)
if (r && lvt_type == APIC_LVTPC &&
guest_cpuid_is_intel_compatible(apic->vcpu))
kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
return r;
}
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/mmu/mmu.c
Expand Up @@ -4788,7 +4788,7 @@ static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
context->cpu_role.base.level, is_efer_nx(context),
guest_can_use(vcpu, X86_FEATURE_GBPAGES),
is_cr4_pse(context),
guest_cpuid_is_amd_or_hygon(vcpu));
guest_cpuid_is_amd_compatible(vcpu));
}

static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
Expand Down
21 changes: 16 additions & 5 deletions arch/x86/kvm/mmu/tdp_mmu.c
Expand Up @@ -1506,6 +1506,16 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
}
}

static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp)
{
/*
* All TDP MMU shadow pages share the same role as their root, aside
* from level, so it is valid to key off any shadow page to determine if
* write protection is needed for an entire tree.
*/
return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled();
}

/*
* Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
* AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
Expand All @@ -1516,7 +1526,8 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end)
{
u64 dbit = kvm_ad_enabled() ? shadow_dirty_mask : PT_WRITABLE_MASK;
const u64 dbit = tdp_mmu_need_write_protect(root) ? PT_WRITABLE_MASK :
shadow_dirty_mask;
struct tdp_iter iter;
bool spte_set = false;

Expand All @@ -1530,7 +1541,7 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
if (!is_shadow_present_pte(iter.old_spte))
continue;

KVM_MMU_WARN_ON(kvm_ad_enabled() &&
KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
spte_ad_need_write_protect(iter.old_spte));

if (!(iter.old_spte & dbit))
Expand Down Expand Up @@ -1578,8 +1589,8 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t gfn, unsigned long mask, bool wrprot)
{
u64 dbit = (wrprot || !kvm_ad_enabled()) ? PT_WRITABLE_MASK :
shadow_dirty_mask;
const u64 dbit = (wrprot || tdp_mmu_need_write_protect(root)) ? PT_WRITABLE_MASK :
shadow_dirty_mask;
struct tdp_iter iter;

lockdep_assert_held_write(&kvm->mmu_lock);
Expand All @@ -1591,7 +1602,7 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
if (!mask)
break;

KVM_MMU_WARN_ON(kvm_ad_enabled() &&
KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
spte_ad_need_write_protect(iter.old_spte));

if (iter.level > PG_LEVEL_4K ||
Expand Down

0 comments on commit 8922cd0

Please sign in to comment.