Skip to content

Commit

Permalink
Merge remote-tracking branch 'stable/linux-4.14.y' into rpi-4.14.y
Browse files Browse the repository at this point in the history
  • Loading branch information
popcornmix committed Jun 5, 2018
2 parents 4fca48b + 2c6025e commit 58eb131
Show file tree
Hide file tree
Showing 560 changed files with 5,730 additions and 2,341 deletions.
4 changes: 4 additions & 0 deletions Documentation/devicetree/bindings/clock/sunxi-ccu.txt
Expand Up @@ -20,6 +20,7 @@ Required properties :
- "allwinner,sun50i-a64-ccu" - "allwinner,sun50i-a64-ccu"
- "allwinner,sun50i-a64-r-ccu" - "allwinner,sun50i-a64-r-ccu"
- "allwinner,sun50i-h5-ccu" - "allwinner,sun50i-h5-ccu"
- "allwinner,sun50i-h6-ccu"
- "nextthing,gr8-ccu" - "nextthing,gr8-ccu"


- reg: Must contain the registers base address and length - reg: Must contain the registers base address and length
Expand All @@ -31,6 +32,9 @@ Required properties :
- #clock-cells : must contain 1 - #clock-cells : must contain 1
- #reset-cells : must contain 1 - #reset-cells : must contain 1


For the main CCU on H6, one more clock is needed:
- "iosc": the SoC's internal frequency oscillator

For the PRCM CCUs on A83T/H3/A64, two more clocks are needed: For the PRCM CCUs on A83T/H3/A64, two more clocks are needed:
- "pll-periph": the SoC's peripheral PLL from the main CCU - "pll-periph": the SoC's peripheral PLL from the main CCU
- "iosc": the SoC's internal frequency oscillator - "iosc": the SoC's internal frequency oscillator
Expand Down
6 changes: 5 additions & 1 deletion Documentation/devicetree/bindings/dma/mv-xor-v2.txt
Expand Up @@ -11,7 +11,11 @@ Required properties:
interrupts. interrupts.


Optional properties: Optional properties:
- clocks: Optional reference to the clock used by the XOR engine. - clocks: Optional reference to the clocks used by the XOR engine.
- clock-names: mandatory if there is a second clock, in this case the
name must be "core" for the first clock and "reg" for the second
one



Example: Example:


Expand Down
Expand Up @@ -20,7 +20,8 @@ Required subnode-properties:
gpio: cpuclkoutgrp0, udlclkoutgrp0, i2c1grp0, i2c2grp0, gpio: cpuclkoutgrp0, udlclkoutgrp0, i2c1grp0, i2c2grp0,
i2c3grp0, i2s0grp0, i2s1grp0, i2srefclkgrp0, spi0grp0, i2c3grp0, i2s0grp0, i2s1grp0, i2srefclkgrp0, spi0grp0,
spi1grp0, pciedebuggrp0, uart0grp0, uart0grp1, uart1grp0, spi1grp0, pciedebuggrp0, uart0grp0, uart0grp1, uart1grp0,
uart2grp0, uart2grp1, uart3grp0, uart4grp0, uart5grp0 uart2grp0, uart2grp1, uart3grp0, uart4grp0, uart5grp0,
uart5nocts
cpuclkout: cpuclkoutgrp0 cpuclkout: cpuclkoutgrp0
udlclkout: udlclkoutgrp0 udlclkout: udlclkoutgrp0
i2c1: i2c1grp0 i2c1: i2c1grp0
Expand All @@ -37,7 +38,7 @@ Required subnode-properties:
uart2: uart2grp0, uart2grp1 uart2: uart2grp0, uart2grp1
uart3: uart3grp0 uart3: uart3grp0
uart4: uart4grp0 uart4: uart4grp0
uart5: uart5grp0 uart5: uart5grp0, uart5nocts
nand: nandgrp0 nand: nandgrp0
sdio0: sdio0grp0 sdio0: sdio0grp0
sdio1: sdio1grp0 sdio1: sdio1grp0
Expand Down
10 changes: 2 additions & 8 deletions Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 4 VERSION = 4
PATCHLEVEL = 14 PATCHLEVEL = 14
SUBLEVEL = 44 SUBLEVEL = 48
EXTRAVERSION = EXTRAVERSION =
NAME = Petit Gorille NAME = Petit Gorille


Expand Down Expand Up @@ -369,11 +369,6 @@ HOSTCXXFLAGS := -O2 $(HOST_LFS_CFLAGS)
HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) HOSTLDFLAGS := $(HOST_LFS_LDFLAGS)
HOST_LOADLIBES := $(HOST_LFS_LIBS) HOST_LOADLIBES := $(HOST_LFS_LIBS)


ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
-Wno-missing-field-initializers -fno-delete-null-pointer-checks
endif

# Make variables (CC, etc...) # Make variables (CC, etc...)
AS = $(CROSS_COMPILE)as AS = $(CROSS_COMPILE)as
LD = $(CROSS_COMPILE)ld LD = $(CROSS_COMPILE)ld
Expand Down Expand Up @@ -711,7 +706,6 @@ KBUILD_CFLAGS += $(stackp-flag)


ifeq ($(cc-name),clang) ifeq ($(cc-name),clang)
KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,) KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier) KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
KBUILD_CFLAGS += $(call cc-disable-warning, gnu) KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
Expand All @@ -729,9 +723,9 @@ else
# These warnings generated too much noise in a regular build. # These warnings generated too much noise in a regular build.
# Use make W=1 to enable them (see scripts/Makefile.extrawarn) # Use make W=1 to enable them (see scripts/Makefile.extrawarn)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable) KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
endif endif


KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
ifdef CONFIG_FRAME_POINTER ifdef CONFIG_FRAME_POINTER
KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
else else
Expand Down
30 changes: 22 additions & 8 deletions arch/alpha/include/asm/xchg.h
Expand Up @@ -12,13 +12,18 @@
* Atomic exchange. * Atomic exchange.
* Since it can be used to implement critical sections * Since it can be used to implement critical sections
* it must clobber "memory" (also for interrupts in UP). * it must clobber "memory" (also for interrupts in UP).
*
* The leading and the trailing memory barriers guarantee that these
* operations are fully ordered.
*
*/ */


static inline unsigned long static inline unsigned long
____xchg(_u8, volatile char *m, unsigned long val) ____xchg(_u8, volatile char *m, unsigned long val)
{ {
unsigned long ret, tmp, addr64; unsigned long ret, tmp, addr64;


smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
" andnot %4,7,%3\n" " andnot %4,7,%3\n"
" insbl %1,%4,%1\n" " insbl %1,%4,%1\n"
Expand All @@ -43,6 +48,7 @@ ____xchg(_u16, volatile short *m, unsigned long val)
{ {
unsigned long ret, tmp, addr64; unsigned long ret, tmp, addr64;


smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
" andnot %4,7,%3\n" " andnot %4,7,%3\n"
" inswl %1,%4,%1\n" " inswl %1,%4,%1\n"
Expand All @@ -67,6 +73,7 @@ ____xchg(_u32, volatile int *m, unsigned long val)
{ {
unsigned long dummy; unsigned long dummy;


smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
"1: ldl_l %0,%4\n" "1: ldl_l %0,%4\n"
" bis $31,%3,%1\n" " bis $31,%3,%1\n"
Expand All @@ -87,6 +94,7 @@ ____xchg(_u64, volatile long *m, unsigned long val)
{ {
unsigned long dummy; unsigned long dummy;


smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
"1: ldq_l %0,%4\n" "1: ldq_l %0,%4\n"
" bis $31,%3,%1\n" " bis $31,%3,%1\n"
Expand Down Expand Up @@ -128,17 +136,20 @@ ____xchg(, volatile void *ptr, unsigned long x, int size)
* store NEW in MEM. Return the initial value in MEM. Success is * store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD. * indicated by comparing RETURN with OLD.
* *
* The memory barrier should be placed in SMP only when we actually * The leading and the trailing memory barriers guarantee that these
* make the change. If we don't change anything (so if the returned * operations are fully ordered.
* prev is equal to old) then we aren't acquiring anything new and *
* we don't need any memory barrier as far I can tell. * The trailing memory barrier is placed in SMP unconditionally, in
* order to guarantee that dependency ordering is preserved when a
* dependency is headed by an unsuccessful operation.
*/ */


static inline unsigned long static inline unsigned long
____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
{ {
unsigned long prev, tmp, cmp, addr64; unsigned long prev, tmp, cmp, addr64;


smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
" andnot %5,7,%4\n" " andnot %5,7,%4\n"
" insbl %1,%5,%1\n" " insbl %1,%5,%1\n"
Expand All @@ -150,8 +161,8 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
" or %1,%2,%2\n" " or %1,%2,%2\n"
" stq_c %2,0(%4)\n" " stq_c %2,0(%4)\n"
" beq %2,3f\n" " beq %2,3f\n"
__ASM__MB
"2:\n" "2:\n"
__ASM__MB
".subsection 2\n" ".subsection 2\n"
"3: br 1b\n" "3: br 1b\n"
".previous" ".previous"
Expand All @@ -166,6 +177,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
{ {
unsigned long prev, tmp, cmp, addr64; unsigned long prev, tmp, cmp, addr64;


smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
" andnot %5,7,%4\n" " andnot %5,7,%4\n"
" inswl %1,%5,%1\n" " inswl %1,%5,%1\n"
Expand All @@ -177,8 +189,8 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
" or %1,%2,%2\n" " or %1,%2,%2\n"
" stq_c %2,0(%4)\n" " stq_c %2,0(%4)\n"
" beq %2,3f\n" " beq %2,3f\n"
__ASM__MB
"2:\n" "2:\n"
__ASM__MB
".subsection 2\n" ".subsection 2\n"
"3: br 1b\n" "3: br 1b\n"
".previous" ".previous"
Expand All @@ -193,15 +205,16 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
{ {
unsigned long prev, cmp; unsigned long prev, cmp;


smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
"1: ldl_l %0,%5\n" "1: ldl_l %0,%5\n"
" cmpeq %0,%3,%1\n" " cmpeq %0,%3,%1\n"
" beq %1,2f\n" " beq %1,2f\n"
" mov %4,%1\n" " mov %4,%1\n"
" stl_c %1,%2\n" " stl_c %1,%2\n"
" beq %1,3f\n" " beq %1,3f\n"
__ASM__MB
"2:\n" "2:\n"
__ASM__MB
".subsection 2\n" ".subsection 2\n"
"3: br 1b\n" "3: br 1b\n"
".previous" ".previous"
Expand All @@ -216,15 +229,16 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
{ {
unsigned long prev, cmp; unsigned long prev, cmp;


smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
"1: ldq_l %0,%5\n" "1: ldq_l %0,%5\n"
" cmpeq %0,%3,%1\n" " cmpeq %0,%3,%1\n"
" beq %1,2f\n" " beq %1,2f\n"
" mov %4,%1\n" " mov %4,%1\n"
" stq_c %1,%2\n" " stq_c %1,%2\n"
" beq %1,3f\n" " beq %1,3f\n"
__ASM__MB
"2:\n" "2:\n"
__ASM__MB
".subsection 2\n" ".subsection 2\n"
"3: br 1b\n" "3: br 1b\n"
".previous" ".previous"
Expand Down
1 change: 0 additions & 1 deletion arch/arc/Kconfig
Expand Up @@ -487,7 +487,6 @@ config ARC_CURR_IN_REG


config ARC_EMUL_UNALIGNED config ARC_EMUL_UNALIGNED
bool "Emulate unaligned memory access (userspace only)" bool "Emulate unaligned memory access (userspace only)"
default N
select SYSCTL_ARCH_UNALIGN_NO_WARN select SYSCTL_ARCH_UNALIGN_NO_WARN
select SYSCTL_ARCH_UNALIGN_ALLOW select SYSCTL_ARCH_UNALIGN_ALLOW
depends on ISA_ARCOMPACT depends on ISA_ARCOMPACT
Expand Down
3 changes: 2 additions & 1 deletion arch/arc/include/asm/bug.h
Expand Up @@ -23,7 +23,8 @@ void die(const char *str, struct pt_regs *regs, unsigned long address);


#define BUG() do { \ #define BUG() do { \
pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
dump_stack(); \ barrier_before_unreachable(); \
__builtin_trap(); \
} while (0) } while (0)


#define HAVE_ARCH_BUG #define HAVE_ARCH_BUG
Expand Down
74 changes: 69 additions & 5 deletions arch/arc/kernel/mcip.c
Expand Up @@ -22,10 +22,79 @@ static DEFINE_RAW_SPINLOCK(mcip_lock);


static char smp_cpuinfo_buf[128]; static char smp_cpuinfo_buf[128];


/*
* Set mask to halt GFRC if any online core in SMP cluster is halted.
* Only works for ARC HS v3.0+, on earlier versions has no effect.
*/
static void mcip_update_gfrc_halt_mask(int cpu)
{
struct bcr_generic gfrc;
unsigned long flags;
u32 gfrc_halt_mask;

READ_BCR(ARC_REG_GFRC_BUILD, gfrc);

/*
* CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in
* GFRC 0x3 version.
*/
if (gfrc.ver < 0x3)
return;

raw_spin_lock_irqsave(&mcip_lock, flags);

__mcip_cmd(CMD_GFRC_READ_CORE, 0);
gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
gfrc_halt_mask |= BIT(cpu);
__mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask);

raw_spin_unlock_irqrestore(&mcip_lock, flags);
}

static void mcip_update_debug_halt_mask(int cpu)
{
u32 mcip_mask = 0;
unsigned long flags;

raw_spin_lock_irqsave(&mcip_lock, flags);

/*
* mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK
* commands. So read it once instead of reading both CMD_DEBUG_READ_MASK
* and CMD_DEBUG_READ_SELECT.
*/
__mcip_cmd(CMD_DEBUG_READ_SELECT, 0);
mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK);

mcip_mask |= BIT(cpu);

__mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask);
/*
* Parameter specified halt cause:
* STATUS32[H]/actionpoint/breakpoint/self-halt
* We choose all of them (0xF).
*/
__mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask);

raw_spin_unlock_irqrestore(&mcip_lock, flags);
}

static void mcip_setup_per_cpu(int cpu) static void mcip_setup_per_cpu(int cpu)
{ {
struct mcip_bcr mp;

READ_BCR(ARC_REG_MCIP_BCR, mp);

smp_ipi_irq_setup(cpu, IPI_IRQ); smp_ipi_irq_setup(cpu, IPI_IRQ);
smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ); smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);

/* Update GFRC halt mask as new CPU came online */
if (mp.gfrc)
mcip_update_gfrc_halt_mask(cpu);

/* Update MCIP debug mask as new CPU came online */
if (mp.dbg)
mcip_update_debug_halt_mask(cpu);
} }


static void mcip_ipi_send(int cpu) static void mcip_ipi_send(int cpu)
Expand Down Expand Up @@ -101,11 +170,6 @@ static void mcip_probe_n_setup(void)
IS_AVAIL1(mp.gfrc, "GFRC")); IS_AVAIL1(mp.gfrc, "GFRC"));


cpuinfo_arc700[0].extn.gfrc = mp.gfrc; cpuinfo_arc700[0].extn.gfrc = mp.gfrc;

if (mp.dbg) {
__mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
__mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
}
} }


struct plat_smp_ops plat_smp_ops = { struct plat_smp_ops plat_smp_ops = {
Expand Down

0 comments on commit 58eb131

Please sign in to comment.