Skip to content

Commit

Permalink
Merge tag 'v6.4.11' into 6.4
Browse files Browse the repository at this point in the history
This is the 6.4.11 stable release
  • Loading branch information
xanmod committed Aug 16, 2023
2 parents 87668d4 + eb3cdb5 commit c1d593d
Show file tree
Hide file tree
Showing 238 changed files with 2,515 additions and 1,033 deletions.
2 changes: 1 addition & 1 deletion Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 4
SUBLEVEL = 10
SUBLEVEL = 11
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

Expand Down
3 changes: 1 addition & 2 deletions arch/alpha/kernel/setup.c
Expand Up @@ -385,8 +385,7 @@ setup_memory(void *kernel_end)
#endif /* CONFIG_BLK_DEV_INITRD */
}

int __init
page_is_ram(unsigned long pfn)
int page_is_ram(unsigned long pfn)
{
struct memclust_struct * cluster;
struct memdesc_struct * memdesc;
Expand Down
15 changes: 4 additions & 11 deletions arch/arm64/kvm/arm.c
Expand Up @@ -1800,8 +1800,6 @@ static void _kvm_arch_hardware_enable(void *discard)

int kvm_arch_hardware_enable(void)
{
int was_enabled;

/*
* Most calls to this function are made with migration
* disabled, but not with preemption disabled. The former is
Expand All @@ -1810,13 +1808,10 @@ int kvm_arch_hardware_enable(void)
*/
preempt_disable();

was_enabled = __this_cpu_read(kvm_arm_hardware_enabled);
_kvm_arch_hardware_enable(NULL);

if (!was_enabled) {
kvm_vgic_cpu_up();
kvm_timer_cpu_up();
}
kvm_vgic_cpu_up();
kvm_timer_cpu_up();

preempt_enable();

Expand All @@ -1833,10 +1828,8 @@ static void _kvm_arch_hardware_disable(void *discard)

void kvm_arch_hardware_disable(void)
{
if (__this_cpu_read(kvm_arm_hardware_enabled)) {
kvm_timer_cpu_down();
kvm_vgic_cpu_down();
}
kvm_timer_cpu_down();
kvm_vgic_cpu_down();

if (!is_protected_kvm_enabled())
_kvm_arch_hardware_disable(NULL);
Expand Down
2 changes: 1 addition & 1 deletion arch/parisc/Kconfig.debug
Expand Up @@ -2,7 +2,7 @@
#
config LIGHTWEIGHT_SPINLOCK_CHECK
bool "Enable lightweight spinlock checks"
depends on SMP && !DEBUG_SPINLOCK
depends on DEBUG_KERNEL && SMP && !DEBUG_SPINLOCK
default y
help
Add checks with low performance impact to the spinlock functions
Expand Down
2 changes: 0 additions & 2 deletions arch/parisc/include/asm/spinlock.h
Expand Up @@ -7,8 +7,6 @@
#include <asm/processor.h>
#include <asm/spinlock_types.h>

#define SPINLOCK_BREAK_INSN 0x0000c006 /* break 6,6 */

static inline void arch_spin_val_check(int lock_val)
{
if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK))
Expand Down
6 changes: 6 additions & 0 deletions arch/parisc/include/asm/spinlock_types.h
Expand Up @@ -4,6 +4,10 @@

#define __ARCH_SPIN_LOCK_UNLOCKED_VAL 0x1a46

#define SPINLOCK_BREAK_INSN 0x0000c006 /* break 6,6 */

#ifndef __ASSEMBLY__

typedef struct {
#ifdef CONFIG_PA20
volatile unsigned int slock;
Expand All @@ -27,6 +31,8 @@ typedef struct {
volatile unsigned int counter;
} arch_rwlock_t;

#endif /* __ASSEMBLY__ */

#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
#define __ARCH_RW_LOCK_UNLOCKED { .lock_mutex = __ARCH_SPIN_LOCK_UNLOCKED, \
.counter = __ARCH_RW_LOCK_UNLOCKED__ }
Expand Down
15 changes: 5 additions & 10 deletions arch/parisc/kernel/sys_parisc.c
Expand Up @@ -26,17 +26,12 @@
#include <linux/compat.h>

/*
* Construct an artificial page offset for the mapping based on the virtual
* Construct an artificial page offset for the mapping based on the physical
* address of the kernel file mapping variable.
* If filp is zero the calculated pgoff value aliases the memory of the given
* address. This is useful for io_uring where the mapping shall alias a kernel
* address and a userspace adress where both the kernel and the userspace
* access the same memory region.
*/
#define GET_FILP_PGOFF(filp, addr) \
((filp ? (((unsigned long) filp->f_mapping) >> 8) \
& ((SHM_COLOUR-1) >> PAGE_SHIFT) : 0UL) \
+ (addr >> PAGE_SHIFT))
#define GET_FILP_PGOFF(filp) \
(filp ? (((unsigned long) filp->f_mapping) >> 8) \
& ((SHM_COLOUR-1) >> PAGE_SHIFT) : 0UL)

static unsigned long shared_align_offset(unsigned long filp_pgoff,
unsigned long pgoff)
Expand Down Expand Up @@ -116,7 +111,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
filp_pgoff = GET_FILP_PGOFF(filp, addr);
filp_pgoff = GET_FILP_PGOFF(filp);

if (flags & MAP_FIXED) {
/* Even MAP_FIXED mappings must reside within TASK_SIZE */
Expand Down
23 changes: 20 additions & 3 deletions arch/parisc/kernel/syscall.S
Expand Up @@ -39,6 +39,7 @@ registers).
#include <asm/assembly.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/spinlock_types.h>

#include <linux/linkage.h>

Expand Down Expand Up @@ -66,6 +67,16 @@ registers).
stw \reg1, 0(%sr2,\reg2)
.endm

/* raise exception if spinlock content is not zero or
* __ARCH_SPIN_LOCK_UNLOCKED_VAL */
.macro spinlock_check spin_val,tmpreg
#ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK
ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmpreg
andcm,= \spin_val, \tmpreg, %r0
.word SPINLOCK_BREAK_INSN
#endif
.endm

.text

.import syscall_exit,code
Expand Down Expand Up @@ -508,7 +519,8 @@ lws_start:

lws_exit_noerror:
lws_pagefault_enable %r1,%r21
stw,ma %r20, 0(%sr2,%r20)
ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, %r21
stw,ma %r21, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
b lws_exit
copy %r0, %r21
Expand All @@ -521,7 +533,8 @@ lws_wouldblock:

lws_pagefault:
lws_pagefault_enable %r1,%r21
stw,ma %r20, 0(%sr2,%r20)
ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, %r21
stw,ma %r21, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
ldo 3(%r0),%r28
b lws_exit
Expand Down Expand Up @@ -619,6 +632,7 @@ lws_compare_and_swap:

/* Try to acquire the lock */
LDCW 0(%sr2,%r20), %r28
spinlock_check %r28, %r21
comclr,<> %r0, %r28, %r0
b,n lws_wouldblock

Expand Down Expand Up @@ -772,6 +786,7 @@ cas2_lock_start:

/* Try to acquire the lock */
LDCW 0(%sr2,%r20), %r28
spinlock_check %r28, %r21
comclr,<> %r0, %r28, %r0
b,n lws_wouldblock

Expand Down Expand Up @@ -1001,6 +1016,7 @@ atomic_xchg_start:

/* Try to acquire the lock */
LDCW 0(%sr2,%r20), %r28
spinlock_check %r28, %r21
comclr,<> %r0, %r28, %r0
b,n lws_wouldblock

Expand Down Expand Up @@ -1199,6 +1215,7 @@ atomic_store_start:

/* Try to acquire the lock */
LDCW 0(%sr2,%r20), %r28
spinlock_check %r28, %r21
comclr,<> %r0, %r28, %r0
b,n lws_wouldblock

Expand Down Expand Up @@ -1330,7 +1347,7 @@ ENTRY(lws_lock_start)
/* lws locks */
.rept 256
/* Keep locks aligned at 16-bytes */
.word 1
.word __ARCH_SPIN_LOCK_UNLOCKED_VAL
.word 0
.word 0
.word 0
Expand Down
16 changes: 8 additions & 8 deletions arch/riscv/include/asm/mmio.h
Expand Up @@ -101,9 +101,9 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* Relaxed I/O memory access primitives. These follow the Device memory
* ordering rules but do not guarantee any ordering relative to Normal memory
* accesses. These are defined to order the indicated access (either a read or
* write) with all other I/O memory accesses. Since the platform specification
* defines that all I/O regions are strongly ordered on channel 2, no explicit
* fences are required to enforce this ordering.
* write) with all other I/O memory accesses to the same peripheral. Since the
* platform specification defines that all I/O regions are strongly ordered on
* channel 0, no explicit fences are required to enforce this ordering.
*/
/* FIXME: These are now the same as asm-generic */
#define __io_rbr() do {} while (0)
Expand All @@ -125,14 +125,14 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#endif

/*
* I/O memory access primitives. Reads are ordered relative to any
* following Normal memory access. Writes are ordered relative to any prior
* Normal memory access. The memory barriers here are necessary as RISC-V
* I/O memory access primitives. Reads are ordered relative to any following
* Normal memory read and delay() loop. Writes are ordered relative to any
* prior Normal memory write. The memory barriers here are necessary as RISC-V
* doesn't define any ordering between the memory space and the I/O space.
*/
#define __io_br() do {} while (0)
#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory")
#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory")
#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); })
#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); })
#define __io_aw() mmiowb_set_pending()

#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
Expand Down
2 changes: 2 additions & 0 deletions arch/riscv/include/asm/pgtable.h
Expand Up @@ -188,6 +188,8 @@ extern struct pt_alloc_ops pt_ops __initdata;
#define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP)

extern pgd_t swapper_pg_dir[];
extern pgd_t trampoline_pg_dir[];
extern pgd_t early_pg_dir[];

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_present(pmd_t pmd)
Expand Down
3 changes: 2 additions & 1 deletion arch/riscv/kernel/elf_kexec.c
Expand Up @@ -281,7 +281,7 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf,
kbuf.buffer = initrd;
kbuf.bufsz = kbuf.memsz = initrd_len;
kbuf.buf_align = PAGE_SIZE;
kbuf.top_down = false;
kbuf.top_down = true;
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf);
if (ret)
Expand Down Expand Up @@ -425,6 +425,7 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
* sym, instead of searching the whole relsec.
*/
case R_RISCV_PCREL_HI20:
case R_RISCV_CALL_PLT:
case R_RISCV_CALL:
*(u64 *)loc = CLEAN_IMM(UITYPE, *(u64 *)loc) |
ENCODE_UJTYPE_IMM(val - addr);
Expand Down
16 changes: 11 additions & 5 deletions arch/riscv/mm/init.c
Expand Up @@ -26,12 +26,13 @@
#include <linux/kfence.h>

#include <asm/fixmap.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
#include <asm/soc.h>
#include <asm/io.h>
#include <asm/ptdump.h>
#include <asm/numa.h>
#include <asm/pgtable.h>
#include <asm/ptdump.h>
#include <asm/sections.h>
#include <asm/soc.h>
#include <asm/tlbflush.h>

#include "../kernel/head.h"

Expand Down Expand Up @@ -214,8 +215,13 @@ static void __init setup_bootmem(void)
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);

phys_ram_end = memblock_end_of_DRAM();

/*
* Make sure we align the start of the memory on a PMD boundary so that
* at worst, we map the linear mapping with PMD mappings.
*/
if (!IS_ENABLED(CONFIG_XIP_KERNEL))
phys_ram_base = memblock_start_of_DRAM();
phys_ram_base = memblock_start_of_DRAM() & PMD_MASK;

/*
* In 64-bit, any use of __va/__pa before this point is wrong as we
Expand Down
1 change: 0 additions & 1 deletion arch/riscv/mm/kasan_init.c
Expand Up @@ -22,7 +22,6 @@
* region is not and then we have to go down to the PUD level.
*/

extern pgd_t early_pg_dir[PTRS_PER_PGD];
pgd_t tmp_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
p4d_t tmp_p4d[PTRS_PER_P4D] __page_aligned_bss;
pud_t tmp_pud[PTRS_PER_PUD] __page_aligned_bss;
Expand Down
9 changes: 8 additions & 1 deletion arch/x86/boot/compressed/idt_64.c
Expand Up @@ -63,7 +63,14 @@ void load_stage2_idt(void)
set_idt_entry(X86_TRAP_PF, boot_page_fault);

#ifdef CONFIG_AMD_MEM_ENCRYPT
set_idt_entry(X86_TRAP_VC, boot_stage2_vc);
/*
* Clear the second stage #VC handler in case guest types
* needing #VC have not been detected.
*/
if (sev_status & BIT(1))
set_idt_entry(X86_TRAP_VC, boot_stage2_vc);
else
set_idt_entry(X86_TRAP_VC, NULL);
#endif

load_boot_idt(&boot_idt_desc);
Expand Down
37 changes: 35 additions & 2 deletions arch/x86/boot/compressed/sev.c
Expand Up @@ -352,21 +352,54 @@ void sev_enable(struct boot_params *bp)
if (bp)
bp->cc_blob_address = 0;

/*
* Do an initial SEV capability check before snp_init() which
* loads the CPUID page and the same checks afterwards are done
* without the hypervisor and are trustworthy.
*
* If the HV fakes SEV support, the guest will crash'n'burn
* which is good enough.
*/

/* Check for the SME/SEV support leaf */
eax = 0x80000000;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
if (eax < 0x8000001f)
return;

/*
* Check for the SME/SEV feature:
* CPUID Fn8000_001F[EAX]
* - Bit 0 - Secure Memory Encryption support
* - Bit 1 - Secure Encrypted Virtualization support
* CPUID Fn8000_001F[EBX]
* - Bits 5:0 - Pagetable bit position used to indicate encryption
*/
eax = 0x8000001f;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
/* Check whether SEV is supported */
if (!(eax & BIT(1)))
return;

/*
* Setup/preliminary detection of SNP. This will be sanity-checked
* against CPUID/MSR values later.
*/
snp = snp_init(bp);

/* Check for the SME/SEV support leaf */
/* Now repeat the checks with the SNP CPUID table. */

/* Recheck the SME/SEV support leaf */
eax = 0x80000000;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
if (eax < 0x8000001f)
return;

/*
* Check for the SME/SEV feature:
* Recheck for the SME/SEV feature:
* CPUID Fn8000_001F[EAX]
* - Bit 0 - Secure Memory Encryption support
* - Bit 1 - Secure Encrypted Virtualization support
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/entry/vdso/vma.c
Expand Up @@ -299,8 +299,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)

/* Round the lowest possible end address up to a PMD boundary. */
end = (start + len + PMD_SIZE - 1) & PMD_MASK;
if (end >= TASK_SIZE_MAX)
end = TASK_SIZE_MAX;
if (end >= DEFAULT_MAP_WINDOW)
end = DEFAULT_MAP_WINDOW;
end -= len;

if (end > start) {
Expand Down

0 comments on commit c1d593d

Please sign in to comment.