Skip to content

Commit

Permalink
Merge tag 'v6.8.8' into 6.8
Browse files Browse the repository at this point in the history
This is the 6.8.8 stable release
  • Loading branch information
xanmod committed Apr 28, 2024
2 parents 206b680 + 9fbb5fc commit d497acb
Show file tree
Hide file tree
Showing 178 changed files with 1,902 additions and 827 deletions.
2 changes: 1 addition & 1 deletion Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 8
SUBLEVEL = 7
SUBLEVEL = 8
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

Expand Down
5 changes: 5 additions & 0 deletions arch/arm64/kernel/head.S
Expand Up @@ -569,6 +569,11 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
adr_l x1, __hyp_text_end
adr_l x2, dcache_clean_poc
blr x2

mov_q x0, INIT_SCTLR_EL2_MMU_OFF
pre_disable_mmu_workaround
msr sctlr_el2, x0
isb
0:
mov_q x0, HCR_HOST_NVHE_FLAGS
msr hcr_el2, x0
Expand Down
3 changes: 0 additions & 3 deletions arch/arm64/mm/pageattr.c
Expand Up @@ -219,9 +219,6 @@ bool kernel_page_present(struct page *page)
pte_t *ptep;
unsigned long addr = (unsigned long)page_address(page);

if (!can_set_direct_map())
return true;

pgdp = pgd_offset_k(addr);
if (pgd_none(READ_ONCE(*pgdp)))
return false;
Expand Down
3 changes: 3 additions & 0 deletions arch/x86/include/asm/barrier.h
Expand Up @@ -79,6 +79,9 @@ do { \
#define __smp_mb__before_atomic() do { } while (0)
#define __smp_mb__after_atomic() do { } while (0)

/* Writing to CR3 provides a full memory barrier in switch_mm(). */
#define smp_mb__after_switch_mm() do { } while (0)

#include <asm-generic/barrier.h>

#endif /* _ASM_X86_BARRIER_H */
1 change: 1 addition & 0 deletions arch/x86/include/asm/kvm_host.h
Expand Up @@ -854,6 +854,7 @@ struct kvm_vcpu_arch {
int cpuid_nent;
struct kvm_cpuid_entry2 *cpuid_entries;
struct kvm_hypervisor_cpuid kvm_cpuid;
bool is_amd_compatible;

/*
* FIXME: Drop this macro and use KVM_NR_GOVERNED_FEATURES directly
Expand Down
11 changes: 7 additions & 4 deletions arch/x86/kernel/cpu/bugs.c
Expand Up @@ -1651,7 +1651,8 @@ static void __init bhi_select_mitigation(void)
return;

/* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
!boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
spec_ctrl_disable_kernel_rrsba();
if (rrsba_disabled)
return;
Expand Down Expand Up @@ -2803,11 +2804,13 @@ static const char *spectre_bhi_state(void)
{
if (!boot_cpu_has_bug(X86_BUG_BHI))
return "; BHI: Not affected";
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
return "; BHI: BHI_DIS_S";
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
return "; BHI: SW loop, KVM: SW loop";
else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && rrsba_disabled)
else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
!boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
rrsba_disabled)
return "; BHI: Retpoline";
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
return "; BHI: Vulnerable, KVM: SW loop";
Expand Down
6 changes: 3 additions & 3 deletions arch/x86/kernel/cpu/cpuid-deps.c
Expand Up @@ -44,7 +44,10 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_F16C, X86_FEATURE_XMM2, },
{ X86_FEATURE_AES, X86_FEATURE_XMM2 },
{ X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 },
{ X86_FEATURE_GFNI, X86_FEATURE_XMM2 },
{ X86_FEATURE_FMA, X86_FEATURE_AVX },
{ X86_FEATURE_VAES, X86_FEATURE_AVX },
{ X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX },
{ X86_FEATURE_AVX2, X86_FEATURE_AVX, },
{ X86_FEATURE_AVX512F, X86_FEATURE_AVX, },
{ X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F },
Expand All @@ -56,9 +59,6 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL },
{ X86_FEATURE_GFNI, X86_FEATURE_AVX512VL },
{ X86_FEATURE_VAES, X86_FEATURE_AVX512VL },
{ X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },
Expand Down
1 change: 1 addition & 0 deletions arch/x86/kvm/cpuid.c
Expand Up @@ -366,6 +366,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)

kvm_update_pv_runtime(vcpu);

vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu);
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);

Expand Down
10 changes: 10 additions & 0 deletions arch/x86/kvm/cpuid.h
Expand Up @@ -120,6 +120,16 @@ static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
}

static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
{
return vcpu->arch.is_amd_compatible;
}

static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
{
return !guest_cpuid_is_amd_compatible(vcpu);
}

static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
Expand Down
3 changes: 2 additions & 1 deletion arch/x86/kvm/lapic.c
Expand Up @@ -2771,7 +2771,8 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;

r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
if (r && lvt_type == APIC_LVTPC)
if (r && lvt_type == APIC_LVTPC &&
guest_cpuid_is_intel_compatible(apic->vcpu))
kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
return r;
}
Expand Down
5 changes: 3 additions & 2 deletions arch/x86/kvm/mmu/mmu.c
Expand Up @@ -4922,7 +4922,7 @@ static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
context->cpu_role.base.level, is_efer_nx(context),
guest_can_use(vcpu, X86_FEATURE_GBPAGES),
is_cr4_pse(context),
guest_cpuid_is_amd_or_hygon(vcpu));
guest_cpuid_is_amd_compatible(vcpu));
}

static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
Expand Down Expand Up @@ -7388,7 +7388,8 @@ bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
* by the memslot, KVM can't use a hugepage due to the
* misaligned address regardless of memory attributes.
*/
if (gfn >= slot->base_gfn) {
if (gfn >= slot->base_gfn &&
gfn + nr_pages <= slot->base_gfn + slot->npages) {
if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
hugepage_clear_mixed(slot, gfn, level);
else
Expand Down
21 changes: 16 additions & 5 deletions arch/x86/kvm/mmu/tdp_mmu.c
Expand Up @@ -1498,6 +1498,16 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
}
}

static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp)
{
/*
* All TDP MMU shadow pages share the same role as their root, aside
* from level, so it is valid to key off any shadow page to determine if
* write protection is needed for an entire tree.
*/
return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled();
}

/*
* Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
* AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
Expand All @@ -1508,7 +1518,8 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end)
{
u64 dbit = kvm_ad_enabled() ? shadow_dirty_mask : PT_WRITABLE_MASK;
const u64 dbit = tdp_mmu_need_write_protect(root) ? PT_WRITABLE_MASK :
shadow_dirty_mask;
struct tdp_iter iter;
bool spte_set = false;

Expand All @@ -1523,7 +1534,7 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
continue;

KVM_MMU_WARN_ON(kvm_ad_enabled() &&
KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
spte_ad_need_write_protect(iter.old_spte));

if (!(iter.old_spte & dbit))
Expand Down Expand Up @@ -1570,8 +1581,8 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t gfn, unsigned long mask, bool wrprot)
{
u64 dbit = (wrprot || !kvm_ad_enabled()) ? PT_WRITABLE_MASK :
shadow_dirty_mask;
const u64 dbit = (wrprot || tdp_mmu_need_write_protect(root)) ? PT_WRITABLE_MASK :
shadow_dirty_mask;
struct tdp_iter iter;

lockdep_assert_held_write(&kvm->mmu_lock);
Expand All @@ -1583,7 +1594,7 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
if (!mask)
break;

KVM_MMU_WARN_ON(kvm_ad_enabled() &&
KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
spte_ad_need_write_protect(iter.old_spte));

if (iter.level > PG_LEVEL_4K ||
Expand Down
24 changes: 22 additions & 2 deletions arch/x86/kvm/vmx/vmx.c
Expand Up @@ -7857,8 +7857,28 @@ static u64 vmx_get_perf_capabilities(void)

if (vmx_pebs_supported()) {
perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
perf_cap &= ~PERF_CAP_PEBS_BASELINE;

/*
* Disallow adaptive PEBS as it is functionally broken, can be
* used by the guest to read *host* LBRs, and can be used to
* bypass userspace event filters. To correctly and safely
* support adaptive PEBS, KVM needs to:
*
* 1. Account for the ADAPTIVE flag when (re)programming fixed
* counters.
*
* 2. Gain support from perf (or take direct control of counter
* programming) to support events without adaptive PEBS
* enabled for the hardware counter.
*
* 3. Ensure LBR MSRs cannot hold host data on VM-Entry with
* adaptive PEBS enabled and MSR_PEBS_DATA_CFG.LBRS=1.
*
* 4. Document which PMU events are effectively exposed to the
* guest via adaptive PEBS, and make adaptive PEBS mutually
* exclusive with KVM_SET_PMU_EVENT_FILTER if necessary.
*/
perf_cap &= ~PERF_CAP_PEBS_BASELINE;
}

return perf_cap;
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/x86.c
Expand Up @@ -3422,7 +3422,7 @@ static bool is_mci_status_msr(u32 msr)
static bool can_set_mci_status(struct kvm_vcpu *vcpu)
{
/* McStatusWrEn enabled? */
if (guest_cpuid_is_amd_or_hygon(vcpu))
if (guest_cpuid_is_amd_compatible(vcpu))
return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));

return false;
Expand Down
29 changes: 19 additions & 10 deletions block/bdev.c
Expand Up @@ -639,6 +639,14 @@ static void blkdev_flush_mapping(struct block_device *bdev)
bdev_write_inode(bdev);
}

static void blkdev_put_whole(struct block_device *bdev)
{
if (atomic_dec_and_test(&bdev->bd_openers))
blkdev_flush_mapping(bdev);
if (bdev->bd_disk->fops->release)
bdev->bd_disk->fops->release(bdev->bd_disk);
}

static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode)
{
struct gendisk *disk = bdev->bd_disk;
Expand All @@ -657,20 +665,21 @@ static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode)

if (!atomic_read(&bdev->bd_openers))
set_init_blocksize(bdev);
if (test_bit(GD_NEED_PART_SCAN, &disk->state))
bdev_disk_changed(disk, false);
atomic_inc(&bdev->bd_openers);
if (test_bit(GD_NEED_PART_SCAN, &disk->state)) {
/*
* Only return scanning errors if we are called from contexts
* that explicitly want them, e.g. the BLKRRPART ioctl.
*/
ret = bdev_disk_changed(disk, false);
if (ret && (mode & BLK_OPEN_STRICT_SCAN)) {
blkdev_put_whole(bdev);
return ret;
}
}
return 0;
}

static void blkdev_put_whole(struct block_device *bdev)
{
if (atomic_dec_and_test(&bdev->bd_openers))
blkdev_flush_mapping(bdev);
if (bdev->bd_disk->fops->release)
bdev->bd_disk->fops->release(bdev->bd_disk);
}

static int blkdev_get_part(struct block_device *part, blk_mode_t mode)
{
struct gendisk *disk = part->bd_disk;
Expand Down
3 changes: 2 additions & 1 deletion block/ioctl.c
Expand Up @@ -556,7 +556,8 @@ static int blkdev_common_ioctl(struct block_device *bdev, blk_mode_t mode,
return -EACCES;
if (bdev_is_partition(bdev))
return -EINVAL;
return disk_scan_partitions(bdev->bd_disk, mode);
return disk_scan_partitions(bdev->bd_disk,
mode | BLK_OPEN_STRICT_SCAN);
case BLKTRACESTART:
case BLKTRACESTOP:
case BLKTRACETEARDOWN:
Expand Down
2 changes: 1 addition & 1 deletion drivers/accessibility/speakup/main.c
Expand Up @@ -574,7 +574,7 @@ static u_long get_word(struct vc_data *vc)
}
attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
buf[cnt++] = attr_ch;
while (tmpx < vc->vc_cols - 1) {
while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
tmp_pos += 2;
tmpx++;
ch = get_char(vc, (u_short *)tmp_pos, &temp);
Expand Down
4 changes: 3 additions & 1 deletion drivers/android/binder.c
Expand Up @@ -1708,8 +1708,10 @@ static size_t binder_get_object(struct binder_proc *proc,
size_t object_size = 0;

read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
if (offset > buffer->data_size || read_size < sizeof(*hdr))
if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
!IS_ALIGNED(offset, sizeof(u32)))
return 0;

if (u) {
if (copy_from_user(object, u + offset, read_size))
return 0;
Expand Down
10 changes: 5 additions & 5 deletions drivers/char/random.c
Expand Up @@ -702,7 +702,7 @@ static void extract_entropy(void *buf, size_t len)

static void __cold _credit_init_bits(size_t bits)
{
static struct execute_work set_ready;
static DECLARE_WORK(set_ready, crng_set_ready);
unsigned int new, orig, add;
unsigned long flags;

Expand All @@ -718,8 +718,8 @@ static void __cold _credit_init_bits(size_t bits)

if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
if (static_key_initialized)
execute_in_process_context(crng_set_ready, &set_ready);
if (static_key_initialized && system_unbound_wq)
queue_work(system_unbound_wq, &set_ready);
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
Expand Down Expand Up @@ -890,8 +890,8 @@ void __init random_init(void)

/*
* If we were initialized by the cpu or bootloader before jump labels
* are initialized, then we should enable the static branch here, where
* it's guaranteed that jump labels have been initialized.
* or workqueues are initialized, then we should enable the static
* branch here, where it's guaranteed that these have been initialized.
*/
if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
crng_set_ready(NULL);
Expand Down

0 comments on commit d497acb

Please sign in to comment.