Skip to content

Commit

Permalink
Merge tag 'v5.12.7' into 5.12-cacule
Browse files Browse the repository at this point in the history
This is the 5.12.7 stable release
  • Loading branch information
xanmod committed May 26, 2021
2 parents 625d991 + 55c17a6 commit bb2739e
Show file tree
Hide file tree
Showing 132 changed files with 1,363 additions and 738 deletions.
10 changes: 10 additions & 0 deletions Documentation/powerpc/syscall64-abi.rst
Expand Up @@ -109,6 +109,16 @@ auxiliary vector.

scv 0 syscalls will always behave as PPC_FEATURE2_HTM_NOSC.

ptrace
------
When ptracing system calls (PTRACE_SYSCALL), the pt_regs.trap value contains
the system call type that can be used to distinguish between sc and scv 0
system calls, and the different register conventions can be accounted for.

If the value of (pt_regs.trap & 0xfff0) is 0xc00 then the system call was
performed with the sc instruction, if it is 0x3000 then the system call was
performed with the scv 0 instruction.

vsyscall
========

Expand Down
2 changes: 1 addition & 1 deletion Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 12
SUBLEVEL = 6
SUBLEVEL = 7
EXTRAVERSION =
NAME = Frozen Wasteland

Expand Down
2 changes: 2 additions & 0 deletions arch/openrisc/kernel/setup.c
Expand Up @@ -278,6 +278,8 @@ void calibrate_delay(void)
pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);

of_node_put(cpu);
}

void __init setup_arch(char **cmdline_p)
Expand Down
3 changes: 1 addition & 2 deletions arch/openrisc/mm/init.c
Expand Up @@ -75,7 +75,6 @@ static void __init map_ram(void)
/* These mark extents of read-only kernel pages...
* ...from vmlinux.lds.S
*/
struct memblock_region *region;

v = PAGE_OFFSET;

Expand Down Expand Up @@ -121,7 +120,7 @@ static void __init map_ram(void)
}

printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
region->base, region->base + region->size);
start, end);
}
}

Expand Down
3 changes: 3 additions & 0 deletions arch/powerpc/include/asm/hvcall.h
Expand Up @@ -446,6 +446,9 @@
*/
long plpar_hcall_norets(unsigned long opcode, ...);

/* Variant which does not do hcall tracing */
long plpar_hcall_norets_notrace(unsigned long opcode, ...);

/**
* plpar_hcall: - Make a pseries hypervisor call
* @opcode: The hypervisor call to make.
Expand Down
22 changes: 19 additions & 3 deletions arch/powerpc/include/asm/paravirt.h
Expand Up @@ -28,19 +28,35 @@ static inline u32 yield_count_of(int cpu)
return be32_to_cpu(yield_count);
}

/*
* Spinlock code confers and prods, so don't trace the hcalls because the
* tracing code takes spinlocks which can cause recursion deadlocks.
*
* These calls are made while the lock is not held: the lock slowpath yields if
* it can not acquire the lock, and unlock slow path might prod if a waiter has
* yielded). So this may not be a problem for simple spin locks because the
* tracing does not technically recurse on the lock, but we avoid it anyway.
*
* However the queued spin lock contended path is more strictly ordered: the
* H_CONFER hcall is made after the task has queued itself on the lock, so then
* recursing on that lock will cause the task to then queue up again behind the
* first instance (or worse: queued spinlocks use tricks that assume a context
* never waits on more than one spinlock, so such recursion may cause random
* corruption in the lock code).
*/
static inline void yield_to_preempted(int cpu, u32 yield_count)
{
plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
}

static inline void prod_cpu(int cpu)
{
plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
}

static inline void yield_to_any(void)
{
plpar_hcall_norets(H_CONFER, -1, 0);
plpar_hcall_norets_notrace(H_CONFER, -1, 0);
}
#else
static inline bool is_shared_processor(void)
Expand Down
45 changes: 26 additions & 19 deletions arch/powerpc/include/asm/ptrace.h
Expand Up @@ -19,6 +19,7 @@
#ifndef _ASM_POWERPC_PTRACE_H
#define _ASM_POWERPC_PTRACE_H

#include <linux/err.h>
#include <uapi/asm/ptrace.h>
#include <asm/asm-const.h>

Expand Down Expand Up @@ -152,25 +153,6 @@ extern unsigned long profile_pc(struct pt_regs *regs);
long do_syscall_trace_enter(struct pt_regs *regs);
void do_syscall_trace_leave(struct pt_regs *regs);

#define kernel_stack_pointer(regs) ((regs)->gpr[1])
static inline int is_syscall_success(struct pt_regs *regs)
{
return !(regs->ccr & 0x10000000);
}

static inline long regs_return_value(struct pt_regs *regs)
{
if (is_syscall_success(regs))
return regs->gpr[3];
else
return -regs->gpr[3];
}

static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
{
regs->gpr[3] = rc;
}

#ifdef __powerpc64__
#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
#else
Expand Down Expand Up @@ -252,6 +234,31 @@ static inline void set_trap_norestart(struct pt_regs *regs)
regs->trap |= 0x10;
}

#define kernel_stack_pointer(regs) ((regs)->gpr[1])
static inline int is_syscall_success(struct pt_regs *regs)
{
if (trap_is_scv(regs))
return !IS_ERR_VALUE((unsigned long)regs->gpr[3]);
else
return !(regs->ccr & 0x10000000);
}

static inline long regs_return_value(struct pt_regs *regs)
{
if (trap_is_scv(regs))
return regs->gpr[3];

if (is_syscall_success(regs))
return regs->gpr[3];
else
return -regs->gpr[3];
}

static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
{
regs->gpr[3] = rc;
}

#define arch_has_single_step() (1)
#define arch_has_block_step() (true)
#define ARCH_HAS_USER_SINGLE_STEP_REPORT
Expand Down
42 changes: 26 additions & 16 deletions arch/powerpc/include/asm/syscall.h
Expand Up @@ -41,11 +41,17 @@ static inline void syscall_rollback(struct task_struct *task,
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
/*
* If the system call failed,
* regs->gpr[3] contains a positive ERRORCODE.
*/
return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
if (trap_is_scv(regs)) {
unsigned long error = regs->gpr[3];

return IS_ERR_VALUE(error) ? error : 0;
} else {
/*
* If the system call failed,
* regs->gpr[3] contains a positive ERRORCODE.
*/
return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
}
}

static inline long syscall_get_return_value(struct task_struct *task,
Expand All @@ -58,18 +64,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
/*
* In the general case it's not obvious that we must deal with CCR
* here, as the syscall exit path will also do that for us. However
* there are some places, eg. the signal code, which check ccr to
* decide if the value in r3 is actually an error.
*/
if (error) {
regs->ccr |= 0x10000000L;
regs->gpr[3] = error;
if (trap_is_scv(regs)) {
regs->gpr[3] = (long) error ?: val;
} else {
regs->ccr &= ~0x10000000L;
regs->gpr[3] = val;
/*
* In the general case it's not obvious that we must deal with
* CCR here, as the syscall exit path will also do that for us.
* However there are some places, eg. the signal code, which
* check ccr to decide if the value in r3 is actually an error.
*/
if (error) {
regs->ccr |= 0x10000000L;
regs->gpr[3] = error;
} else {
regs->ccr &= ~0x10000000L;
regs->gpr[3] = val;
}
}
}

Expand Down
4 changes: 2 additions & 2 deletions arch/powerpc/kernel/setup_64.c
Expand Up @@ -369,11 +369,11 @@ void __init early_setup(unsigned long dt_ptr)
apply_feature_fixups();
setup_feature_keys();

early_ioremap_setup();

/* Initialize the hash table or TLB handling */
early_init_mmu();

early_ioremap_setup();

/*
* After firmware and early platform setup code has set things up,
* we note the SPR values for configurable control/performance
Expand Down
10 changes: 10 additions & 0 deletions arch/powerpc/platforms/pseries/hvCall.S
Expand Up @@ -102,6 +102,16 @@ END_FTR_SECTION(0, 1); \
#define HCALL_BRANCH(LABEL)
#endif

_GLOBAL_TOC(plpar_hcall_norets_notrace)
HMT_MEDIUM

mfcr r0
stw r0,8(r1)
HVSC /* invoke the hypervisor */
lwz r0,8(r1)
mtcrf 0xff,r0
blr /* return r3 = status */

_GLOBAL_TOC(plpar_hcall_norets)
HMT_MEDIUM

Expand Down
3 changes: 1 addition & 2 deletions arch/powerpc/platforms/pseries/lpar.c
Expand Up @@ -1830,8 +1830,7 @@ void hcall_tracepoint_unregfunc(void)

/*
* Since the tracing code might execute hcalls we need to guard against
* recursion. One example of this are spinlocks calling H_YIELD on
* shared processor partitions.
* recursion.
*/
static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);

Expand Down
12 changes: 6 additions & 6 deletions arch/x86/Makefile
Expand Up @@ -207,11 +207,6 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
endif

ifdef CONFIG_LTO_CLANG
KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
-plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
endif

# Workaround for a gcc prelease that unfortunately was shipped in a suse release
KBUILD_CFLAGS += -Wno-sign-compare
#
Expand All @@ -231,7 +226,12 @@ ifdef CONFIG_RETPOLINE
endif
endif

KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)

ifdef CONFIG_LTO_CLANG
KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
-plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
endif

ifdef CONFIG_X86_NEED_RELOCS
LDFLAGS_vmlinux := --emit-relocs --discard-none
Expand Down

0 comments on commit bb2739e

Please sign in to comment.