Skip to content

Commit

Permalink
Merge tag 'v6.1.46' into 6.1
Browse files Browse the repository at this point in the history
This is the 6.1.46 stable release
  • Loading branch information
xanmod committed Aug 16, 2023
2 parents fc7f1e4 + 6c44e13 commit 7705b75
Show file tree
Hide file tree
Showing 189 changed files with 1,975 additions and 734 deletions.
2 changes: 1 addition & 1 deletion Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 45
SUBLEVEL = 46
EXTRAVERSION =
NAME = Curry Ramen

Expand Down
3 changes: 1 addition & 2 deletions arch/alpha/kernel/setup.c
Expand Up @@ -385,8 +385,7 @@ setup_memory(void *kernel_end)
#endif /* CONFIG_BLK_DEV_INITRD */
}

int __init
page_is_ram(unsigned long pfn)
int page_is_ram(unsigned long pfn)
{
struct memclust_struct * cluster;
struct memdesc_struct * memdesc;
Expand Down
1 change: 0 additions & 1 deletion arch/loongarch/Kconfig
Expand Up @@ -10,7 +10,6 @@ config LOONGARCH
select ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_ENABLE_MEMORY_HOTREMOVE
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
Expand Down
6 changes: 0 additions & 6 deletions arch/loongarch/kernel/setup.c
Expand Up @@ -12,7 +12,6 @@
*/
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/export.h>
Expand Down Expand Up @@ -81,11 +80,6 @@ const char *get_system_type(void)
return "generic-loongson-machine";
}

void __init arch_cpu_finalize_init(void)
{
alternative_instructions();
}

static const char *dmi_string_parse(const struct dmi_header *dm, u8 s)
{
const u8 *bp = ((u8 *) dm) + dm->length;
Expand Down
16 changes: 8 additions & 8 deletions arch/riscv/include/asm/mmio.h
Expand Up @@ -101,9 +101,9 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* Relaxed I/O memory access primitives. These follow the Device memory
* ordering rules but do not guarantee any ordering relative to Normal memory
* accesses. These are defined to order the indicated access (either a read or
* write) with all other I/O memory accesses. Since the platform specification
* defines that all I/O regions are strongly ordered on channel 2, no explicit
* fences are required to enforce this ordering.
* write) with all other I/O memory accesses to the same peripheral. Since the
* platform specification defines that all I/O regions are strongly ordered on
* channel 0, no explicit fences are required to enforce this ordering.
*/
/* FIXME: These are now the same as asm-generic */
#define __io_rbr() do {} while (0)
Expand All @@ -125,14 +125,14 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#endif

/*
* I/O memory access primitives. Reads are ordered relative to any
* following Normal memory access. Writes are ordered relative to any prior
* Normal memory access. The memory barriers here are necessary as RISC-V
* I/O memory access primitives. Reads are ordered relative to any following
* Normal memory read and delay() loop. Writes are ordered relative to any
* prior Normal memory write. The memory barriers here are necessary as RISC-V
* doesn't define any ordering between the memory space and the I/O space.
*/
#define __io_br() do {} while (0)
#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory")
#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory")
#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); })
#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); })
#define __io_aw() mmiowb_set_pending()

#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
Expand Down
3 changes: 2 additions & 1 deletion arch/riscv/kernel/elf_kexec.c
Expand Up @@ -281,7 +281,7 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf,
kbuf.buffer = initrd;
kbuf.bufsz = kbuf.memsz = initrd_len;
kbuf.buf_align = PAGE_SIZE;
kbuf.top_down = false;
kbuf.top_down = true;
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf);
if (ret)
Expand Down Expand Up @@ -425,6 +425,7 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
* sym, instead of searching the whole relsec.
*/
case R_RISCV_PCREL_HI20:
case R_RISCV_CALL_PLT:
case R_RISCV_CALL:
*(u64 *)loc = CLEAN_IMM(UITYPE, *(u64 *)loc) |
ENCODE_UJTYPE_IMM(val - addr);
Expand Down
9 changes: 8 additions & 1 deletion arch/x86/boot/compressed/idt_64.c
Expand Up @@ -63,7 +63,14 @@ void load_stage2_idt(void)
set_idt_entry(X86_TRAP_PF, boot_page_fault);

#ifdef CONFIG_AMD_MEM_ENCRYPT
set_idt_entry(X86_TRAP_VC, boot_stage2_vc);
/*
* Clear the second stage #VC handler in case guest types
* needing #VC have not been detected.
*/
if (sev_status & BIT(1))
set_idt_entry(X86_TRAP_VC, boot_stage2_vc);
else
set_idt_entry(X86_TRAP_VC, NULL);
#endif

load_boot_idt(&boot_idt_desc);
Expand Down
37 changes: 35 additions & 2 deletions arch/x86/boot/compressed/sev.c
Expand Up @@ -354,21 +354,54 @@ void sev_enable(struct boot_params *bp)
if (bp)
bp->cc_blob_address = 0;

/*
* Do an initial SEV capability check before snp_init() which
* loads the CPUID page and the same checks afterwards are done
* without the hypervisor and are trustworthy.
*
* If the HV fakes SEV support, the guest will crash'n'burn
* which is good enough.
*/

/* Check for the SME/SEV support leaf */
eax = 0x80000000;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
if (eax < 0x8000001f)
return;

/*
* Check for the SME/SEV feature:
* CPUID Fn8000_001F[EAX]
* - Bit 0 - Secure Memory Encryption support
* - Bit 1 - Secure Encrypted Virtualization support
* CPUID Fn8000_001F[EBX]
* - Bits 5:0 - Pagetable bit position used to indicate encryption
*/
eax = 0x8000001f;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
/* Check whether SEV is supported */
if (!(eax & BIT(1)))
return;

/*
* Setup/preliminary detection of SNP. This will be sanity-checked
* against CPUID/MSR values later.
*/
snp = snp_init(bp);

/* Check for the SME/SEV support leaf */
/* Now repeat the checks with the SNP CPUID table. */

/* Recheck the SME/SEV support leaf */
eax = 0x80000000;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
if (eax < 0x8000001f)
return;

/*
* Check for the SME/SEV feature:
* Recheck for the SME/SEV feature:
* CPUID Fn8000_001F[EAX]
* - Bit 0 - Secure Memory Encryption support
* - Bit 1 - Secure Encrypted Virtualization support
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/entry/vdso/vma.c
Expand Up @@ -322,8 +322,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)

/* Round the lowest possible end address up to a PMD boundary. */
end = (start + len + PMD_SIZE - 1) & PMD_MASK;
if (end >= TASK_SIZE_MAX)
end = TASK_SIZE_MAX;
if (end >= DEFAULT_MAP_WINDOW)
end = DEFAULT_MAP_WINDOW;
end -= len;

if (end > start) {
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/include/asm/processor.h
Expand Up @@ -867,4 +867,6 @@ bool arch_is_platform_page(u64 paddr);
#define arch_is_platform_page arch_is_platform_page
#endif

extern bool gds_ucode_mitigated(void);

#endif /* _ASM_X86_PROCESSOR_H */
1 change: 1 addition & 0 deletions arch/x86/kernel/cpu/amd.c
Expand Up @@ -73,6 +73,7 @@ static const int amd_erratum_1054[] =
static const int amd_zenbleed[] =
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf),
AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
AMD_MODEL_RANGE(0x17, 0x90, 0x0, 0x91, 0xf),
AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));

static const int amd_div0[] =
Expand Down
12 changes: 9 additions & 3 deletions arch/x86/kernel/vmlinux.lds.S
Expand Up @@ -514,11 +514,17 @@ INIT_PER_CPU(irq_stack_backing_store);

#ifdef CONFIG_CPU_SRSO
/*
* GNU ld cannot do XOR so do: (A | B) - (A & B) in order to compute the XOR
* GNU ld cannot do XOR until 2.41.
* https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1
*
* LLVM lld cannot do XOR until lld-17.
* https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb
*
* Instead do: (A | B) - (A & B) in order to compute the XOR
* of the two function addresses:
*/
. = ASSERT(((srso_untrain_ret_alias | srso_safe_ret_alias) -
(srso_untrain_ret_alias & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) -
(ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
"SRSO function pair won't alias");
#endif

Expand Down

0 comments on commit 7705b75

Please sign in to comment.