Skip to content

Commit

Permalink
Merge tag 'v5.8.13' into 5.8
Browse files Browse the repository at this point in the history
This is the 5.8.13 stable release
  • Loading branch information
xanmod committed Oct 2, 2020
2 parents ed0ec0f + cdcec68 commit 074e83e
Show file tree
Hide file tree
Showing 120 changed files with 865 additions and 409 deletions.
2 changes: 1 addition & 1 deletion Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 8
SUBLEVEL = 12
SUBLEVEL = 13
EXTRAVERSION =
NAME = Kleptomaniac Octopus

Expand Down
12 changes: 10 additions & 2 deletions arch/arm64/include/asm/kvm_emulate.h
Expand Up @@ -319,15 +319,15 @@ static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
}

static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
}

static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
kvm_vcpu_abt_iss1tw(vcpu); /* AF/DBM update */
}

static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
Expand Down Expand Up @@ -356,6 +356,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
}

static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
}

static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
Expand Down Expand Up @@ -393,6 +398,9 @@ static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)

static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
{
if (kvm_vcpu_abt_iss1tw(vcpu))
return true;

if (kvm_vcpu_trap_is_iabt(vcpu))
return false;

Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kvm/hyp/switch.c
Expand Up @@ -599,7 +599,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
kvm_vcpu_dabt_isvalid(vcpu) &&
!kvm_vcpu_dabt_isextabt(vcpu) &&
!kvm_vcpu_dabt_iss1tw(vcpu);
!kvm_vcpu_abt_iss1tw(vcpu);

if (valid) {
int ret = __vgic_v2_perform_cpuif_access(vcpu);
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kvm/mmio.c
Expand Up @@ -146,7 +146,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
}

/* Page table accesses IO mem: tell guest to fix its TTBR */
if (kvm_vcpu_dabt_iss1tw(vcpu)) {
if (kvm_vcpu_abt_iss1tw(vcpu)) {
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
return 1;
}
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kvm/mmu.c
Expand Up @@ -1845,7 +1845,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
unsigned long vma_pagesize, flags = 0;

write_fault = kvm_is_write_fault(vcpu);
exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
VM_BUG_ON(write_fault && exec_fault);

if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
Expand Down
6 changes: 3 additions & 3 deletions arch/ia64/mm/init.c
Expand Up @@ -538,7 +538,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start),
args->nid, args->zone, page_to_pfn(map_start),
MEMMAP_EARLY, NULL);
MEMINIT_EARLY, NULL);
return 0;
}

Expand All @@ -547,8 +547,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn)
{
if (!vmem_map) {
memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
NULL);
memmap_init_zone(size, nid, zone, start_pfn,
MEMINIT_EARLY, NULL);
} else {
struct page *start;
struct memmap_init_callback_data args;
Expand Down
1 change: 1 addition & 0 deletions arch/mips/include/asm/cpu-type.h
Expand Up @@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_34K:
case CPU_1004K:
case CPU_74K:
case CPU_1074K:
case CPU_M14KC:
case CPU_M14KEC:
case CPU_INTERAPTIV:
Expand Down
4 changes: 4 additions & 0 deletions arch/mips/loongson2ef/Platform
Expand Up @@ -22,6 +22,10 @@ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
endif
endif

# Some -march= flags enable MMI instructions, and GCC complains about that
# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
cflags-y += $(call cc-option,-mno-loongson-mmi)

#
# Loongson Machines' Support
#
Expand Down
24 changes: 8 additions & 16 deletions arch/mips/loongson64/cop2-ex.c
Expand Up @@ -95,10 +95,8 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
if (res)
goto fault;

set_fpr64(current->thread.fpu.fpr,
insn.loongson3_lswc2_format.rt, value);
set_fpr64(current->thread.fpu.fpr,
insn.loongson3_lswc2_format.rq, value_next);
set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0, value);
set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0, value_next);
compute_return_epc(regs);
own_fpu(1);
}
Expand Down Expand Up @@ -130,15 +128,13 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
goto sigbus;

lose_fpu(1);
value_next = get_fpr64(current->thread.fpu.fpr,
insn.loongson3_lswc2_format.rq);
value_next = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0);

StoreDW(addr + 8, value_next, res);
if (res)
goto fault;

value = get_fpr64(current->thread.fpu.fpr,
insn.loongson3_lswc2_format.rt);
value = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0);

StoreDW(addr, value, res);
if (res)
Expand Down Expand Up @@ -204,8 +200,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
if (res)
goto fault;

set_fpr64(current->thread.fpu.fpr,
insn.loongson3_lsdc2_format.rt, value);
set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
compute_return_epc(regs);
own_fpu(1);

Expand All @@ -221,8 +216,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
if (res)
goto fault;

set_fpr64(current->thread.fpu.fpr,
insn.loongson3_lsdc2_format.rt, value);
set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
compute_return_epc(regs);
own_fpu(1);
break;
Expand Down Expand Up @@ -286,8 +280,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
goto sigbus;

lose_fpu(1);
value = get_fpr64(current->thread.fpu.fpr,
insn.loongson3_lsdc2_format.rt);
value = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);

StoreW(addr, value, res);
if (res)
Expand All @@ -305,8 +298,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
goto sigbus;

lose_fpu(1);
value = get_fpr64(current->thread.fpu.fpr,
insn.loongson3_lsdc2_format.rt);
value = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);

StoreDW(addr, value, res);
if (res)
Expand Down
6 changes: 4 additions & 2 deletions arch/riscv/boot/dts/kendryte/k210.dtsi
Expand Up @@ -95,10 +95,12 @@
#clock-cells = <1>;
};

clint0: interrupt-controller@2000000 {
clint0: clint@2000000 {
#interrupt-cells = <1>;
compatible = "riscv,clint0";
reg = <0x2000000 0xC000>;
interrupts-extended = <&cpu0_intc 3>, <&cpu1_intc 3>;
interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7
&cpu1_intc 3 &cpu1_intc 7>;
clocks = <&sysctl K210_CLK_ACLK>;
};

Expand Down
7 changes: 7 additions & 0 deletions arch/riscv/include/asm/ftrace.h
Expand Up @@ -66,6 +66,13 @@ do { \
* Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
*/
#define MCOUNT_INSN_SIZE 8

#ifndef __ASSEMBLY__
struct dyn_ftrace;
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
#define ftrace_init_nop ftrace_init_nop
#endif

#endif

#endif /* _ASM_RISCV_FTRACE_H */
19 changes: 19 additions & 0 deletions arch/riscv/kernel/ftrace.c
Expand Up @@ -97,6 +97,25 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return __ftrace_modify_call(rec->ip, addr, false);
}


/*
* This is called early on, and isn't wrapped by
* ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
* text_mutex, which triggers a lockdep failure. SMP isn't running so we could
* just directly poke the text, but it's simpler to just take the lock
* ourselves.
*/
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
{
int out;

ftrace_arch_code_modify_prepare();
out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
ftrace_arch_code_modify_post_process();

return out;
}

int ftrace_update_ftrace_func(ftrace_func_t func)
{
int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
Expand Down
42 changes: 30 additions & 12 deletions arch/s390/include/asm/pgtable.h
Expand Up @@ -1260,26 +1260,44 @@ static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)

#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)

static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
{
if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
return (p4d_t *) pgd_deref(*pgd) + p4d_index(address);
return (p4d_t *) pgd;
if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
return (p4d_t *) pgdp;
}
#define p4d_offset_lockless p4d_offset_lockless

static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
{
if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
return (pud_t *) p4d_deref(*p4d) + pud_index(address);
return (pud_t *) p4d;
return p4d_offset_lockless(pgdp, *pgdp, address);
}

static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
{
if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
return (pud_t *) p4d_deref(p4d) + pud_index(address);
return (pud_t *) p4dp;
}
#define pud_offset_lockless pud_offset_lockless

static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
{
return pud_offset_lockless(p4dp, *p4dp, address);
}
#define pud_offset pud_offset

static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
{
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
return (pmd_t *) pud_deref(pud) + pmd_index(address);
return (pmd_t *) pudp;
}
#define pmd_offset_lockless pmd_offset_lockless

static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
{
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
return (pmd_t *) pud_deref(*pud) + pmd_index(address);
return (pmd_t *) pud;
return pmd_offset_lockless(pudp, *pudp, address);
}
#define pmd_offset pmd_offset

Expand Down
6 changes: 3 additions & 3 deletions arch/s390/kernel/setup.c
Expand Up @@ -619,7 +619,7 @@ static struct notifier_block kdump_mem_nb = {
/*
* Make sure that the area behind memory_end is protected
*/
static void reserve_memory_end(void)
static void __init reserve_memory_end(void)
{
if (memory_end_set)
memblock_reserve(memory_end, ULONG_MAX);
Expand All @@ -628,7 +628,7 @@ static void reserve_memory_end(void)
/*
* Make sure that oldmem, where the dump is stored, is protected
*/
static void reserve_oldmem(void)
static void __init reserve_oldmem(void)
{
#ifdef CONFIG_CRASH_DUMP
if (OLDMEM_BASE)
Expand All @@ -640,7 +640,7 @@ static void reserve_oldmem(void)
/*
* Make sure that oldmem, where the dump is stored, is protected
*/
static void remove_oldmem(void)
static void __init remove_oldmem(void)
{
#ifdef CONFIG_CRASH_DUMP
if (OLDMEM_BASE)
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/entry/common.c
Expand Up @@ -814,7 +814,7 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
old_regs = set_irq_regs(regs);

instrumentation_begin();
run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, NULL, regs);
run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
instrumentation_begin();

set_irq_regs(old_regs);
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/entry/entry_64.S
Expand Up @@ -682,6 +682,8 @@ SYM_CODE_END(.Lbad_gs)
* rdx: Function argument (can be NULL if none)
*/
SYM_FUNC_START(asm_call_on_stack)
SYM_INNER_LABEL(asm_call_sysvec_on_stack, SYM_L_GLOBAL)
SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL)
/*
* Save the frame pointer unconditionally. This allows the ORC
* unwinder to handle the stack switch.
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/idtentry.h
Expand Up @@ -246,7 +246,7 @@ __visible noinstr void func(struct pt_regs *regs) \
instrumentation_begin(); \
irq_enter_rcu(); \
kvm_set_cpu_l1tf_flush_l1d(); \
run_on_irqstack_cond(__##func, regs, regs); \
run_sysvec_on_irqstack_cond(__##func, regs); \
irq_exit_rcu(); \
instrumentation_end(); \
idtentry_exit_cond_rcu(regs, rcu_exit); \
Expand Down

0 comments on commit 074e83e

Please sign in to comment.