Skip to content

Commit

Permalink
treewide: Linux 4.14.197
Browse files Browse the repository at this point in the history
source kernel.org
https://cdn.kernel.org/pub/linux/kernel/v4.x/incr/patch-4.14.196-197.xz

Signed-off-by: Stefan Rücker <s.ruecker@gmx.de>
  • Loading branch information
stefanhh0 authored and jerpelea committed Oct 28, 2020
1 parent 4dd37c5 commit 38acf57
Show file tree
Hide file tree
Showing 86 changed files with 801 additions and 287 deletions.
16 changes: 10 additions & 6 deletions Documentation/filesystems/affs.txt
Original file line number Diff line number Diff line change
Expand Up @@ -93,13 +93,15 @@ The Amiga protection flags RWEDRWEDHSPARWED are handled as follows:

- R maps to r for user, group and others. On directories, R implies x.

- If both W and D are allowed, w will be set.
- W maps to w.

- E maps to x.

- H and P are always retained and ignored under Linux.
- D is ignored.

- A is always reset when a file is written to.
- H, S and P are always retained and ignored under Linux.

- A is cleared when a file is written to.

User id and group id will be used unless set[gu]id are given as mount
options. Since most of the Amiga file systems are single user systems
Expand All @@ -111,11 +113,13 @@ Linux -> Amiga:

The Linux rwxrwxrwx file mode is handled as follows:

- r permission will set R for user, group and others.
- r permission will allow R for user, group and others.

- w permission will allow W for user, group and others.

- w permission will set W and D for user, group and others.
- x permission of the user will allow E for plain files.

- x permission of the user will set E for plain files.
- D will be allowed for user, group and others.

- All other flags (suid, sgid, ...) are ignored and will
not be retained.
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 196
SUBLEVEL = 197
EXTRAVERSION =
NAME = Petit Gorille

Expand Down
3 changes: 2 additions & 1 deletion arch/arm64/include/asm/kvm_arm.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,10 +78,11 @@
* IMO: Override CPSR.I and enable signaling with VI
* FMO: Override CPSR.F and enable signaling with VF
* SWIO: Turn set/way invalidates into set/way clean+invalidate
* PTW: Take a stage2 fault if a stage1 walk steps in device memory
*/
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW)
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_PTW)
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
#define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO)
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
Expand Down
43 changes: 43 additions & 0 deletions arch/arm64/include/asm/kvm_asm.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,34 @@ extern u32 __init_stage2_translation(void);
*__hyp_this_cpu_ptr(sym); \
})

#define __KVM_EXTABLE(from, to) \
" .pushsection __kvm_ex_table, \"a\"\n" \
" .align 3\n" \
" .long (" #from " - .), (" #to " - .)\n" \
" .popsection\n"


#define __kvm_at(at_op, addr) \
( { \
int __kvm_at_err = 0; \
u64 spsr, elr; \
asm volatile( \
" mrs %1, spsr_el2\n" \
" mrs %2, elr_el2\n" \
"1: at "at_op", %3\n" \
" isb\n" \
" b 9f\n" \
"2: msr spsr_el2, %1\n" \
" msr elr_el2, %2\n" \
" mov %w0, %4\n" \
"9:\n" \
__KVM_EXTABLE(1b, 2b) \
: "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \
: "r" (addr), "i" (-EFAULT)); \
__kvm_at_err; \
} )


#else /* __ASSEMBLY__ */

.macro hyp_adr_this_cpu reg, sym, tmp
Expand All @@ -107,6 +135,21 @@ extern u32 __init_stage2_translation(void);
kern_hyp_va \vcpu
.endm

/*
* KVM extable for unexpected exceptions.
* In the same format _asm_extable, but output to a different section so that
* it can be mapped to EL2. The KVM version is not sorted. The caller must
* ensure:
* x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
* code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
*/
.macro _kvm_extable, from, to
.pushsection __kvm_ex_table, "a"
.align 3
.long (\from - .), (\to - .)
.popsection
.endm

#endif

#endif /* __ARM_KVM_ASM_H__ */
8 changes: 8 additions & 0 deletions arch/arm64/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,13 @@ ENTRY(_text)

jiffies = jiffies_64;


#define HYPERVISOR_EXTABLE \
. = ALIGN(SZ_8); \
VMLINUX_SYMBOL(__start___kvm_ex_table) = .; \
*(__kvm_ex_table) \
VMLINUX_SYMBOL(__stop___kvm_ex_table) = .;

#define HYPERVISOR_TEXT \
/* \
* Align to 4 KB so that \
Expand All @@ -39,6 +46,7 @@ jiffies = jiffies_64;
VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \
VMLINUX_SYMBOL(__hyp_text_start) = .; \
*(.hyp.text) \
HYPERVISOR_EXTABLE \
VMLINUX_SYMBOL(__hyp_text_end) = .;

#define IDMAP_TEXT \
Expand Down
26 changes: 20 additions & 6 deletions arch/arm64/kvm/hyp/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

#include <linux/linkage.h>

#include <asm/alternative.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/fpsimdmacros.h>
Expand Down Expand Up @@ -64,6 +65,15 @@ ENTRY(__guest_enter)
// Store the host regs
save_callee_saved_regs x1

// Now the host state is stored if we have a pending RAS SError it must
// affect the host. If any asynchronous exception is pending we defer
// the guest entry.
mrs x1, isr_el1
cbz x1, 1f
mov x0, #ARM_EXCEPTION_IRQ
ret

1:
add x18, x0, #VCPU_CONTEXT

// Restore guest regs x0-x17
Expand Down Expand Up @@ -133,18 +143,22 @@ ENTRY(__guest_exit)
// This is our single instruction exception window. A pending
// SError is guaranteed to occur at the earliest when we unmask
// it, and at the latest just after the ISB.
.global abort_guest_exit_start
abort_guest_exit_start:

isb

.global abort_guest_exit_end
abort_guest_exit_end:
msr daifset, #4 // Mask aborts
ret

_kvm_extable abort_guest_exit_start, 9997f
_kvm_extable abort_guest_exit_end, 9997f
9997:
msr daifset, #4 // Mask aborts
mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)

// If the exception took place, restore the EL1 exception
// context so that we can report some information.
// Merge the exception code with the SError pending bit.
tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
// restore the EL1 exception context so that we can report some
// information. Merge the exception code with the SError pending bit.
msr elr_el2, x2
msr esr_el2, x3
msr spsr_el2, x4
Expand Down
63 changes: 42 additions & 21 deletions arch/arm64/kvm/hyp/hyp-entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,30 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>

.macro save_caller_saved_regs_vect
stp x0, x1, [sp, #-16]!
stp x2, x3, [sp, #-16]!
stp x4, x5, [sp, #-16]!
stp x6, x7, [sp, #-16]!
stp x8, x9, [sp, #-16]!
stp x10, x11, [sp, #-16]!
stp x12, x13, [sp, #-16]!
stp x14, x15, [sp, #-16]!
stp x16, x17, [sp, #-16]!
.endm

.macro restore_caller_saved_regs_vect
ldp x16, x17, [sp], #16
ldp x14, x15, [sp], #16
ldp x12, x13, [sp], #16
ldp x10, x11, [sp], #16
ldp x8, x9, [sp], #16
ldp x6, x7, [sp], #16
ldp x4, x5, [sp], #16
ldp x2, x3, [sp], #16
ldp x0, x1, [sp], #16
.endm

.text
.pushsection .hyp.text, "ax"

Expand Down Expand Up @@ -183,26 +207,24 @@ el1_error:
mov x0, #ARM_EXCEPTION_EL1_SERROR
b __guest_exit

el2_sync:
save_caller_saved_regs_vect
stp x29, x30, [sp, #-16]!
bl kvm_unexpected_el2_exception
ldp x29, x30, [sp], #16
restore_caller_saved_regs_vect

eret

el2_error:
/*
* Only two possibilities:
* 1) Either we come from the exit path, having just unmasked
* PSTATE.A: change the return code to an EL2 fault, and
* carry on, as we're already in a sane state to handle it.
* 2) Or we come from anywhere else, and that's a bug: we panic.
*
* For (1), x0 contains the original return code and x1 doesn't
* contain anything meaningful at that stage. We can reuse them
* as temp registers.
* For (2), who cares?
*/
mrs x0, elr_el2
adr x1, abort_guest_exit_start
cmp x0, x1
adr x1, abort_guest_exit_end
ccmp x0, x1, #4, ne
b.ne __hyp_panic
mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
save_caller_saved_regs_vect
stp x29, x30, [sp, #-16]!

bl kvm_unexpected_el2_exception

ldp x29, x30, [sp], #16
restore_caller_saved_regs_vect

eret

ENTRY(__hyp_do_panic)
Expand Down Expand Up @@ -231,7 +253,6 @@ ENDPROC(\label)
invalid_vector el2t_irq_invalid
invalid_vector el2t_fiq_invalid
invalid_vector el2t_error_invalid
invalid_vector el2h_sync_invalid
invalid_vector el2h_irq_invalid
invalid_vector el2h_fiq_invalid
invalid_vector el1_sync_invalid
Expand All @@ -248,7 +269,7 @@ ENTRY(__kvm_hyp_vector)
ventry el2t_fiq_invalid // FIQ EL2t
ventry el2t_error_invalid // Error EL2t

ventry el2h_sync_invalid // Synchronous EL2h
ventry el2_sync // Synchronous EL2h
ventry el2h_irq_invalid // IRQ EL2h
ventry el2h_fiq_invalid // FIQ EL2h
ventry el2_error // Error EL2h
Expand Down
39 changes: 35 additions & 4 deletions arch/arm64/kvm/hyp/switch.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,15 @@

#include <kvm/arm_psci.h>

#include <asm/extable.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
#include <asm/fpsimd.h>

extern struct exception_table_entry __start___kvm_ex_table;
extern struct exception_table_entry __stop___kvm_ex_table;

static bool __hyp_text __fpsimd_enabled_nvhe(void)
{
return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
Expand Down Expand Up @@ -216,10 +220,10 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
* saved the guest context yet, and we may return early...
*/
par = read_sysreg(par_el1);
asm volatile("at s1e1r, %0" : : "r" (far));
isb();

tmp = read_sysreg(par_el1);
if (!__kvm_at("s1e1r", far))
tmp = read_sysreg(par_el1);
else
tmp = 1; /* back to the guest */
write_sysreg(par, par_el1);

if (unlikely(tmp & 1))
Expand Down Expand Up @@ -486,3 +490,30 @@ void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)

unreachable();
}

asmlinkage void __hyp_text kvm_unexpected_el2_exception(void)
{
unsigned long addr, fixup;
struct kvm_cpu_context *host_ctxt;
struct exception_table_entry *entry, *end;
unsigned long elr_el2 = read_sysreg(elr_el2);

entry = hyp_symbol_addr(__start___kvm_ex_table);
end = hyp_symbol_addr(__stop___kvm_ex_table);
host_ctxt = __hyp_this_cpu_ptr(kvm_host_cpu_state);

while (entry < end) {
addr = (unsigned long)&entry->insn + entry->insn;
fixup = (unsigned long)&entry->fixup + entry->fixup;

if (addr != elr_el2) {
entry++;
continue;
}

write_sysreg(fixup, elr_el2);
return;
}

hyp_panic(host_ctxt);
}
2 changes: 2 additions & 0 deletions arch/mips/kernel/smp-bmips.c
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,8 @@ static int bmips_boot_secondary(int cpu, struct task_struct *idle)
*/
static void bmips_init_secondary(void)
{
bmips_cpu_setup();

switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
Expand Down
4 changes: 4 additions & 0 deletions arch/mips/mm/c-r4k.c
Original file line number Diff line number Diff line change
Expand Up @@ -1781,7 +1781,11 @@ static void setup_scache(void)
printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
scache_size >> 10,
way_string[c->scache.ways], c->scache.linesz);

if (current_cpu_type() == CPU_BMIPS5000)
c->options |= MIPS_CPU_INCLUSIVE_CACHES;
}

#else
if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
Expand Down
Loading

0 comments on commit 38acf57

Please sign in to comment.