diff --git a/Documentation/devicetree/bindings/clock/imx31-clock.txt b/Documentation/devicetree/bindings/clock/imx31-clock.txt index 19df842c694..8163d565f69 100644 --- a/Documentation/devicetree/bindings/clock/imx31-clock.txt +++ b/Documentation/devicetree/bindings/clock/imx31-clock.txt @@ -77,7 +77,7 @@ Examples: clks: ccm@53f80000{ compatible = "fsl,imx31-ccm"; reg = <0x53f80000 0x4000>; - interrupts = <0 31 0x04 0 53 0x04>; + interrupts = <31>, <53>; #clock-cells = <1>; }; diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 3559dfe2874..2a4ecfe405f 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -959,6 +959,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. When zero, profiling data is discarded and associated debugfs files are removed at module unload time. + goldfish [X86] Enable the goldfish android emulator platform. + Don't use this when you are not running on the + android emulator + gpt [EFI] Forces disk with valid GPT signature but invalid Protective MBR to be treated as GPT. @@ -2912,6 +2916,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted. spia_pedr= spia_peddr= + stack_guard_gap= [MM] + override the default stack gap protection. The value + is in page units and it defines how many pages prior + to (for stacks growing down) resp. after (for stacks + growing up) the main stack are reserved for no other + mapping. Default value is 256 pages. + stacktrace [FTRACE] Enabled the stack tracer on boot up. diff --git a/Makefile b/Makefile index 9f0d252c4bd..936fc581867 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 10 -SUBLEVEL = 105 +SUBLEVEL = 107 EXTRAVERSION = NAME = TOSSUG Baby Fish diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c index 116d3e09b5b..e6b365d9e0a 100644 --- a/arch/arc/kernel/unaligned.c +++ b/arch/arc/kernel/unaligned.c @@ -228,8 +228,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs, if (state.fault) goto fault; + /* clear any remanants of delay slot */ if (delay_mode(regs)) { - regs->ret = regs->bta; + regs->ret = regs->bta & ~1U; regs->status32 &= ~STATUS_DE_MASK; } else { regs->ret += state.instr_len; diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c index 2e06d56e987..cf4ae695824 100644 --- a/arch/arc/mm/mmap.c +++ b/arch/arc/mm/mmap.c @@ -64,7 +64,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts index c914357c0d8..d3c206e7887 100644 --- a/arch/arm/boot/dts/da850-evm.dts +++ b/arch/arm/boot/dts/da850-evm.dts @@ -59,6 +59,7 @@ #size-cells = <1>; compatible = "m25p64"; spi-max-frequency = <30000000>; + m25p,fast-read; reg = <0>; partition@0 { label = "U-Boot-SPL"; diff --git a/arch/arm/boot/dts/imx31.dtsi b/arch/arm/boot/dts/imx31.dtsi index c5449257ad9..b73190d08ba 100644 --- a/arch/arm/boot/dts/imx31.dtsi +++ b/arch/arm/boot/dts/imx31.dtsi @@ -20,11 +20,11 @@ serial4 = &uart5; }; - avic: avic-interrupt-controller@60000000 { + avic: interrupt-controller@68000000 { compatible = "fsl,imx31-avic", "fsl,avic"; interrupt-controller; #interrupt-cells = <1>; - reg = <0x60000000 0x100000>; + reg = <0x68000000 0x100000>; }; soc { @@ -93,13 +93,6 @@ clock-names = "ipg", "per"; status = "disabled"; }; - - clks: ccm@53f80000{ - compatible = "fsl,imx31-ccm"; - reg = <0x53f80000 0x4000>; - interrupts = <0 31 0x04 0 53 0x04>; - #clock-cells = <1>; - }; }; aips@53f00000 { /* AIPS2 */ @@ -109,6 +102,13 @@ reg = <0x53f00000 0x100000>; ranges; + clks: ccm@53f80000{ + compatible = "fsl,imx31-ccm"; + reg = <0x53f80000 0x4000>; + interrupts = <31>, <53>; + #clock-cells = <1>; + }; + gpt: timer@53f90000 { compatible = "fsl,imx31-gpt"; reg = <0x53f90000 0x4000>; diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 98168cba3f2..e1c1f229d50 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -59,6 +59,9 @@ #define ARM_CPU_XSCALE_ARCH_V2 0x4000 #define ARM_CPU_XSCALE_ARCH_V3 0x6000 +/* Qualcomm implemented cores */ +#define ARM_CPU_PART_SCORPION 0x510002d0 + extern unsigned int processor_id; #ifdef CONFIG_CPU_CP15 diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 9ba31233678..1122ba9dce3 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -1088,6 +1088,22 @@ static int __init arch_hw_breakpoint_init(void) return 0; } + /* + * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD + * whenever a WFI is issued, even if the core is not powered down, in + * violation of the architecture. When DBGPRSR.SPD is set, accesses to + * breakpoint and watchpoint registers are treated as undefined, so + * this results in boot time and runtime failures when these are + * accessed and we unexpectedly take a trap. + * + * It's not clear if/how this can be worked around, so we blacklist + * Scorpion CPUs to avoid these issues. + */ + if ((read_cpuid_id() & 0xff00fff0) == ARM_CPU_PART_SCORPION) { + pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n"); + return 0; + } + has_ossr = core_has_os_save_restore(); /* Determine how many BRPs/WRPs are available. */ diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index b91eb6a17af..f91ac5b4500 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target, const void *kbuf, const void __user *ubuf) { int ret; - struct pt_regs newregs; + struct pt_regs newregs = *task_pt_regs(target); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c index 1a468f0fd22..9d532568b8b 100644 --- a/arch/arm/mach-ux500/pm.c +++ b/arch/arm/mach-ux500/pm.c @@ -128,8 +128,8 @@ bool prcmu_pending_irq(void) */ bool prcmu_is_cpu_in_wfi(int cpu) { - return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : - PRCM_ARM_WFI_STANDBY_WFI0; + return readl(PRCM_ARM_WFI_STANDBY) & + (cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0); } /* diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 2d689d1f88f..dae47dfc8da 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -89,7 +89,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -140,7 +140,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 77e80d3bbe4..b18b7c1ddbb 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -258,8 +258,7 @@ static int __init xen_guest_init(void) * for secondary CPUs as they are brought up. * For uniformity we use VCPUOP_register_vcpu_info even on cpu0. */ - xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info), - sizeof(struct vcpu_info)); + xen_vcpu_info = alloc_percpu(struct vcpu_info); if (xen_vcpu_info == NULL) return -ENOMEM; diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index 6913643bbe5..c136fd53c84 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -75,6 +75,7 @@ struct user_fpsimd_state { __uint128_t vregs[32]; __u32 fpsr; __u32 fpcr; + __u32 __reserved[2]; }; struct user_hwdebug_state { diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index e04659b48e9..dbb52c0dad5 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -447,6 +447,8 @@ static int hw_break_set(struct task_struct *target, /* (address, ctrl) registers */ limit = regset->n * regset->size; while (count && offset < limit) { + if (count < PTRACE_HBP_ADDR_SZ) + return -EINVAL; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, offset, offset + PTRACE_HBP_ADDR_SZ); if (ret) @@ -456,6 +458,8 @@ static int hw_break_set(struct task_struct *target, return ret; offset += PTRACE_HBP_ADDR_SZ; + if (!count) + break; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, offset, offset + PTRACE_HBP_CTRL_SZ); if (ret) @@ -492,7 +496,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset, const void *kbuf, const void __user *ubuf) { int ret; - struct user_pt_regs newregs; + struct user_pt_regs newregs = task_pt_regs(target)->user_regs; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); if (ret) @@ -522,7 +526,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, const void *kbuf, const void __user *ubuf) { int ret; - struct user_fpsimd_state newstate; + struct user_fpsimd_state newstate = + target->thread.fpsimd_state.user_fpsimd; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1); if (ret) @@ -545,7 +550,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset, const void *kbuf, const void __user *ubuf) { int ret; - unsigned long tls; + unsigned long tls = target->thread.tp_value; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); if (ret) diff --git a/arch/c6x/kernel/ptrace.c b/arch/c6x/kernel/ptrace.c index 3c494e84444..a511ac16a8e 100644 --- a/arch/c6x/kernel/ptrace.c +++ b/arch/c6x/kernel/ptrace.c @@ -69,46 +69,6 @@ static int gpr_get(struct task_struct *target, 0, sizeof(*regs)); } -static int gpr_set(struct task_struct *target, - const struct user_regset *regset, - unsigned int pos, unsigned int count, - const void *kbuf, const void __user *ubuf) -{ - int ret; - struct pt_regs *regs = task_pt_regs(target); - - /* Don't copyin TSR or CSR */ - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - ®s, - 0, PT_TSR * sizeof(long)); - if (ret) - return ret; - - ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, - PT_TSR * sizeof(long), - (PT_TSR + 1) * sizeof(long)); - if (ret) - return ret; - - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - ®s, - (PT_TSR + 1) * sizeof(long), - PT_CSR * sizeof(long)); - if (ret) - return ret; - - ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, - PT_CSR * sizeof(long), - (PT_CSR + 1) * sizeof(long)); - if (ret) - return ret; - - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - ®s, - (PT_CSR + 1) * sizeof(long), -1); - return ret; -} - enum c6x_regset { REGSET_GPR, }; @@ -120,7 +80,6 @@ static const struct user_regset c6x_regsets[] = { .size = sizeof(u32), .align = sizeof(u32), .get = gpr_get, - .set = gpr_set }, }; diff --git a/arch/cris/boot/rescue/Makefile b/arch/cris/boot/rescue/Makefile index 52bd0bd1dd2..d98edbb30a1 100644 --- a/arch/cris/boot/rescue/Makefile +++ b/arch/cris/boot/rescue/Makefile @@ -10,6 +10,9 @@ asflags-y += $(LINUXINCLUDE) ccflags-y += -O2 $(LINUXINCLUDE) + +ifdef CONFIG_ETRAX_AXISFLASHMAP + arch-$(CONFIG_ETRAX_ARCH_V10) = v10 arch-$(CONFIG_ETRAX_ARCH_V32) = v32 @@ -28,6 +31,11 @@ $(obj)/rescue.bin: $(obj)/rescue.o FORCE $(call if_changed,objcopy) cp -p $(obj)/rescue.bin $(objtree) +else +$(obj)/rescue.bin: + +endif + $(obj)/testrescue.bin: $(obj)/testrescue.o $(OBJCOPY) $(OBJCOPYFLAGS) $(obj)/testrescue.o tr.bin # Pad it to 784 bytes diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c index 836f14707a6..efa59f1f802 100644 --- a/arch/frv/mm/elf-fdpic.c +++ b/arch/frv/mm/elf-fdpic.c @@ -74,7 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi addr = PAGE_ALIGN(addr); vma = find_vma(current->mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) goto success; } diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h index d28fa8fe26f..c598d847d56 100644 --- a/arch/m68k/include/asm/delay.h +++ b/arch/m68k/include/asm/delay.h @@ -114,6 +114,6 @@ static inline void __udelay(unsigned long usecs) */ #define HZSCALE (268435456 / (1000000 / HZ)) -#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000)); +#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000)) #endif /* defined(_M68K_DELAY_H) */ diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h index 7841f229038..9d523375f68 100644 --- a/arch/metag/include/asm/uaccess.h +++ b/arch/metag/include/asm/uaccess.h @@ -192,20 +192,21 @@ extern long __must_check strnlen_user(const char __user *src, long count); #define strlen_user(str) strnlen_user(str, 32767) -extern unsigned long __must_check __copy_user_zeroing(void *to, - const void __user *from, - unsigned long n); +extern unsigned long raw_copy_from_user(void *to, const void __user *from, + unsigned long n); static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { + unsigned long res = n; if (likely(access_ok(VERIFY_READ, from, n))) - return __copy_user_zeroing(to, from, n); - memset(to, 0, n); - return n; + res = raw_copy_from_user(to, from, n); + if (unlikely(res)) + memset(to + (n - res), 0, res); + return res; } -#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n) +#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n) #define __copy_from_user_inatomic __copy_from_user extern unsigned long __must_check __copy_user(void __user *to, diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c index 7563628822b..5e2dc7defd2 100644 --- a/arch/metag/kernel/ptrace.c +++ b/arch/metag/kernel/ptrace.c @@ -24,6 +24,16 @@ * user_regset definitions. */ +static unsigned long user_txstatus(const struct pt_regs *regs) +{ + unsigned long data = (unsigned long)regs->ctx.Flags; + + if (regs->ctx.SaveMask & TBICTX_CBUF_BIT) + data |= USER_GP_REGS_STATUS_CATCH_BIT; + + return data; +} + int metag_gp_regs_copyout(const struct pt_regs *regs, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) @@ -62,9 +72,7 @@ int metag_gp_regs_copyout(const struct pt_regs *regs, if (ret) goto out; /* TXSTATUS */ - data = (unsigned long)regs->ctx.Flags; - if (regs->ctx.SaveMask & TBICTX_CBUF_BIT) - data |= USER_GP_REGS_STATUS_CATCH_BIT; + data = user_txstatus(regs); ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &data, 4*25, 4*26); if (ret) @@ -119,6 +127,7 @@ int metag_gp_regs_copyin(struct pt_regs *regs, if (ret) goto out; /* TXSTATUS */ + data = user_txstatus(regs); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &data, 4*25, 4*26); if (ret) @@ -244,6 +253,8 @@ int metag_rp_state_copyin(struct pt_regs *regs, unsigned long long *ptr; int ret, i; + if (count < 4*13) + return -EINVAL; /* Read the entire pipeline before making any changes */ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &rp, 0, 4*13); @@ -303,7 +314,7 @@ static int metag_tls_set(struct task_struct *target, const void *kbuf, const void __user *ubuf) { int ret; - void __user *tls; + void __user *tls = target->thread.tls_ptr; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); if (ret) diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c index b3ebfe9c8e8..2792fc62108 100644 --- a/arch/metag/lib/usercopy.c +++ b/arch/metag/lib/usercopy.c @@ -29,7 +29,6 @@ COPY \ "1:\n" \ " .section .fixup,\"ax\"\n" \ - " MOV D1Ar1,#0\n" \ FIXUP \ " MOVT D1Ar1,#HI(1b)\n" \ " JUMP D1Ar1,#LO(1b)\n" \ @@ -260,27 +259,31 @@ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "22:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ - "SUB %3, %3, #32\n" \ "23:\n" \ - "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "SUB %3, %3, #32\n" \ "24:\n" \ + "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "25:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "26:\n" \ "SUB %3, %3, #32\n" \ "DCACHE [%1+#-64], D0Ar6\n" \ "BR $Lloop"id"\n" \ \ "MOV RAPF, %1\n" \ - "25:\n" \ + "27:\n" \ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "26:\n" \ + "28:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "29:\n" \ "SUB %3, %3, #32\n" \ - "27:\n" \ + "30:\n" \ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "28:\n" \ + "31:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "32:\n" \ "SUB %0, %0, #8\n" \ - "29:\n" \ + "33:\n" \ "SETL [%0++], D0.7, D1.7\n" \ "SUB %3, %3, #32\n" \ "1:" \ @@ -312,11 +315,15 @@ " .long 26b,3b\n" \ " .long 27b,3b\n" \ " .long 28b,3b\n" \ - " .long 29b,4b\n" \ + " .long 29b,3b\n" \ + " .long 30b,3b\n" \ + " .long 31b,3b\n" \ + " .long 32b,3b\n" \ + " .long 33b,4b\n" \ " .previous\n" \ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ : "0" (to), "1" (from), "2" (ret), "3" (n) \ - : "D1Ar1", "D0Ar2", "memory") + : "D1Ar1", "D0Ar2", "cc", "memory") /* rewind 'to' and 'from' pointers when a fault occurs * @@ -342,7 +349,7 @@ #define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\ __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ "LSR D0Ar2, D0Ar2, #8\n" \ - "AND D0Ar2, D0Ar2, #0x7\n" \ + "ANDS D0Ar2, D0Ar2, #0x7\n" \ "ADDZ D0Ar2, D0Ar2, #4\n" \ "SUB D0Ar2, D0Ar2, #1\n" \ "MOV D1Ar1, #4\n" \ @@ -403,47 +410,55 @@ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "22:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ - "SUB %3, %3, #16\n" \ "23:\n" \ - "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "24:\n" \ - "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "SUB %3, %3, #16\n" \ - "25:\n" \ + "24:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "26:\n" \ + "25:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "26:\n" \ "SUB %3, %3, #16\n" \ "27:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "28:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "29:\n" \ + "SUB %3, %3, #16\n" \ + "30:\n" \ + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "31:\n" \ + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "32:\n" \ "SUB %3, %3, #16\n" \ "DCACHE [%1+#-64], D0Ar6\n" \ "BR $Lloop"id"\n" \ \ "MOV RAPF, %1\n" \ - "29:\n" \ + "33:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "30:\n" \ + "34:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "35:\n" \ "SUB %3, %3, #16\n" \ - "31:\n" \ + "36:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "32:\n" \ + "37:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "38:\n" \ "SUB %3, %3, #16\n" \ - "33:\n" \ + "39:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "34:\n" \ + "40:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "41:\n" \ "SUB %3, %3, #16\n" \ - "35:\n" \ + "42:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "36:\n" \ + "43:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "44:\n" \ "SUB %0, %0, #4\n" \ - "37:\n" \ + "45:\n" \ "SETD [%0++], D0.7\n" \ "SUB %3, %3, #16\n" \ "1:" \ @@ -483,11 +498,19 @@ " .long 34b,3b\n" \ " .long 35b,3b\n" \ " .long 36b,3b\n" \ - " .long 37b,4b\n" \ + " .long 37b,3b\n" \ + " .long 38b,3b\n" \ + " .long 39b,3b\n" \ + " .long 40b,3b\n" \ + " .long 41b,3b\n" \ + " .long 42b,3b\n" \ + " .long 43b,3b\n" \ + " .long 44b,3b\n" \ + " .long 45b,4b\n" \ " .previous\n" \ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ : "0" (to), "1" (from), "2" (ret), "3" (n) \ - : "D1Ar1", "D0Ar2", "memory") + : "D1Ar1", "D0Ar2", "cc", "memory") /* rewind 'to' and 'from' pointers when a fault occurs * @@ -513,7 +536,7 @@ #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\ __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ "LSR D0Ar2, D0Ar2, #8\n" \ - "AND D0Ar2, D0Ar2, #0x7\n" \ + "ANDS D0Ar2, D0Ar2, #0x7\n" \ "ADDZ D0Ar2, D0Ar2, #4\n" \ "SUB D0Ar2, D0Ar2, #1\n" \ "MOV D1Ar1, #4\n" \ @@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, if ((unsigned long) src & 1) { __asm_copy_to_user_1(dst, src, retn); n--; + if (retn) + return retn + n; } if ((unsigned long) dst & 1) { /* Worst case - byte copy */ while (n > 0) { __asm_copy_to_user_1(dst, src, retn); n--; + if (retn) + return retn + n; } } if (((unsigned long) src & 2) && n >= 2) { __asm_copy_to_user_2(dst, src, retn); n -= 2; + if (retn) + return retn + n; } if ((unsigned long) dst & 2) { /* Second worst case - word copy */ while (n >= 2) { __asm_copy_to_user_2(dst, src, retn); n -= 2; + if (retn) + return retn + n; } } @@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, while (n >= 8) { __asm_copy_to_user_8x64(dst, src, retn); n -= 8; + if (retn) + return retn + n; } } if (n >= RAPF_MIN_BUF_SIZE) { @@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, while (n >= 8) { __asm_copy_to_user_8x64(dst, src, retn); n -= 8; + if (retn) + return retn + n; } } #endif @@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, while (n >= 16) { __asm_copy_to_user_16(dst, src, retn); n -= 16; + if (retn) + return retn + n; } while (n >= 4) { __asm_copy_to_user_4(dst, src, retn); n -= 4; + if (retn) + return retn + n; } switch (n) { @@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, break; } + /* + * If we get here, retn correctly reflects the number of failing + * bytes. + */ return retn; } EXPORT_SYMBOL(__copy_user); @@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user); __asm_copy_user_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "2: SETB [%0++],D1Ar1\n", \ - "3: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ + "3: ADD %2,%2,#1\n", \ " .long 2b,3b\n") #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_user_cont(to, from, ret, \ " GETW D1Ar1,[%1++]\n" \ "2: SETW [%0++],D1Ar1\n" COPY, \ - "3: ADD %2,%2,#2\n" \ - " SETW [%0++],D1Ar1\n" FIXUP, \ + "3: ADD %2,%2,#2\n" FIXUP, \ " .long 2b,3b\n" TENTRY) #define __asm_copy_from_user_2(to, from, ret) \ @@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user); __asm_copy_from_user_2x_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "4: SETB [%0++],D1Ar1\n", \ - "5: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ + "5: ADD %2,%2,#1\n", \ " .long 4b,5b\n") #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_user_cont(to, from, ret, \ " GETD D1Ar1,[%1++]\n" \ "2: SETD [%0++],D1Ar1\n" COPY, \ - "3: ADD %2,%2,#4\n" \ - " SETD [%0++],D1Ar1\n" FIXUP, \ + "3: ADD %2,%2,#4\n" FIXUP, \ " .long 2b,3b\n" TENTRY) #define __asm_copy_from_user_4(to, from, ret) \ __asm_copy_from_user_4x_cont(to, from, ret, "", "", "") -#define __asm_copy_from_user_5(to, from, ret) \ - __asm_copy_from_user_4x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "4: SETB [%0++],D1Ar1\n", \ - "5: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 4b,5b\n") - -#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_4x_cont(to, from, ret, \ - " GETW D1Ar1,[%1++]\n" \ - "4: SETW [%0++],D1Ar1\n" COPY, \ - "5: ADD %2,%2,#2\n" \ - " SETW [%0++],D1Ar1\n" FIXUP, \ - " .long 4b,5b\n" TENTRY) - -#define __asm_copy_from_user_6(to, from, ret) \ - __asm_copy_from_user_6x_cont(to, from, ret, "", "", "") - -#define __asm_copy_from_user_7(to, from, ret) \ - __asm_copy_from_user_6x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "6: SETB [%0++],D1Ar1\n", \ - "7: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 6b,7b\n") - -#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_4x_cont(to, from, ret, \ - " GETD D1Ar1,[%1++]\n" \ - "4: SETD [%0++],D1Ar1\n" COPY, \ - "5: ADD %2,%2,#4\n" \ - " SETD [%0++],D1Ar1\n" FIXUP, \ - " .long 4b,5b\n" TENTRY) - -#define __asm_copy_from_user_8(to, from, ret) \ - __asm_copy_from_user_8x_cont(to, from, ret, "", "", "") - -#define __asm_copy_from_user_9(to, from, ret) \ - __asm_copy_from_user_8x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "6: SETB [%0++],D1Ar1\n", \ - "7: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 6b,7b\n") - -#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_8x_cont(to, from, ret, \ - " GETW D1Ar1,[%1++]\n" \ - "6: SETW [%0++],D1Ar1\n" COPY, \ - "7: ADD %2,%2,#2\n" \ - " SETW [%0++],D1Ar1\n" FIXUP, \ - " .long 6b,7b\n" TENTRY) - -#define __asm_copy_from_user_10(to, from, ret) \ - __asm_copy_from_user_10x_cont(to, from, ret, "", "", "") - -#define __asm_copy_from_user_11(to, from, ret) \ - __asm_copy_from_user_10x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "8: SETB [%0++],D1Ar1\n", \ - "9: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 8b,9b\n") - -#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_8x_cont(to, from, ret, \ - " GETD D1Ar1,[%1++]\n" \ - "6: SETD [%0++],D1Ar1\n" COPY, \ - "7: ADD %2,%2,#4\n" \ - " SETD [%0++],D1Ar1\n" FIXUP, \ - " .long 6b,7b\n" TENTRY) - -#define __asm_copy_from_user_12(to, from, ret) \ - __asm_copy_from_user_12x_cont(to, from, ret, "", "", "") - -#define __asm_copy_from_user_13(to, from, ret) \ - __asm_copy_from_user_12x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "8: SETB [%0++],D1Ar1\n", \ - "9: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 8b,9b\n") - -#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_12x_cont(to, from, ret, \ - " GETW D1Ar1,[%1++]\n" \ - "8: SETW [%0++],D1Ar1\n" COPY, \ - "9: ADD %2,%2,#2\n" \ - " SETW [%0++],D1Ar1\n" FIXUP, \ - " .long 8b,9b\n" TENTRY) - -#define __asm_copy_from_user_14(to, from, ret) \ - __asm_copy_from_user_14x_cont(to, from, ret, "", "", "") - -#define __asm_copy_from_user_15(to, from, ret) \ - __asm_copy_from_user_14x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "10: SETB [%0++],D1Ar1\n", \ - "11: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 10b,11b\n") - -#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_12x_cont(to, from, ret, \ - " GETD D1Ar1,[%1++]\n" \ - "8: SETD [%0++],D1Ar1\n" COPY, \ - "9: ADD %2,%2,#4\n" \ - " SETD [%0++],D1Ar1\n" FIXUP, \ - " .long 8b,9b\n" TENTRY) - -#define __asm_copy_from_user_16(to, from, ret) \ - __asm_copy_from_user_16x_cont(to, from, ret, "", "", "") - #define __asm_copy_from_user_8x64(to, from, ret) \ asm volatile ( \ " GETL D0Ar2,D1Ar1,[%1++]\n" \ "2: SETL [%0++],D0Ar2,D1Ar1\n" \ "1:\n" \ " .section .fixup,\"ax\"\n" \ - " MOV D1Ar1,#0\n" \ - " MOV D0Ar2,#0\n" \ "3: ADD %2,%2,#8\n" \ - " SETL [%0++],D0Ar2,D1Ar1\n" \ " MOVT D0Ar2,#HI(1b)\n" \ " JUMP D0Ar2,#LO(1b)\n" \ " .previous\n" \ @@ -789,36 +711,57 @@ EXPORT_SYMBOL(__copy_user); * * Rationale: * A fault occurs while reading from user buffer, which is the - * source. Since the fault is at a single address, we only - * need to rewind by 8 bytes. + * source. * Since we don't write to kernel buffer until we read first, * the kernel buffer is at the right state and needn't be - * corrected. + * corrected, but the source must be rewound to the beginning of + * the block, which is LSM_STEP*8 bytes. + * LSM_STEP is bits 10:8 in TXSTATUS which is already read + * and stored in D0Ar2 + * + * NOTE: If a fault occurs at the last operation in M{G,S}ETL + * LSM_STEP will be 0. ie: we do 4 writes in our case, if + * a fault happens at the 4th write, LSM_STEP will be 0 + * instead of 4. The code copes with that. */ #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \ __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ - "SUB %1, %1, #8\n") + "LSR D0Ar2, D0Ar2, #5\n" \ + "ANDS D0Ar2, D0Ar2, #0x38\n" \ + "ADDZ D0Ar2, D0Ar2, #32\n" \ + "SUB %1, %1, D0Ar2\n") /* rewind 'from' pointer when a fault occurs * * Rationale: * A fault occurs while reading from user buffer, which is the - * source. Since the fault is at a single address, we only - * need to rewind by 4 bytes. + * source. * Since we don't write to kernel buffer until we read first, * the kernel buffer is at the right state and needn't be - * corrected. + * corrected, but the source must be rewound to the beginning of + * the block, which is LSM_STEP*4 bytes. + * LSM_STEP is bits 10:8 in TXSTATUS which is already read + * and stored in D0Ar2 + * + * NOTE: If a fault occurs at the last operation in M{G,S}ETL + * LSM_STEP will be 0. ie: we do 4 writes in our case, if + * a fault happens at the 4th write, LSM_STEP will be 0 + * instead of 4. The code copes with that. */ #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \ __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ - "SUB %1, %1, #4\n") + "LSR D0Ar2, D0Ar2, #6\n" \ + "ANDS D0Ar2, D0Ar2, #0x1c\n" \ + "ADDZ D0Ar2, D0Ar2, #16\n" \ + "SUB %1, %1, D0Ar2\n") -/* Copy from user to kernel, zeroing the bytes that were inaccessible in - userland. The return-value is the number of bytes that were - inaccessible. */ -unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, - unsigned long n) +/* + * Copy from user to kernel. The return-value is the number of bytes that were + * inaccessible. + */ +unsigned long raw_copy_from_user(void *pdst, const void __user *psrc, + unsigned long n) { register char *dst asm ("A0.2") = pdst; register const char __user *src asm ("A1.2") = psrc; @@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, if ((unsigned long) src & 1) { __asm_copy_from_user_1(dst, src, retn); n--; + if (retn) + return retn + n; } if ((unsigned long) dst & 1) { /* Worst case - byte copy */ @@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_1(dst, src, retn); n--; if (retn) - goto copy_exception_bytes; + return retn + n; } } if (((unsigned long) src & 2) && n >= 2) { __asm_copy_from_user_2(dst, src, retn); n -= 2; + if (retn) + return retn + n; } if ((unsigned long) dst & 2) { /* Second worst case - word copy */ @@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_2(dst, src, retn); n -= 2; if (retn) - goto copy_exception_bytes; + return retn + n; } } - /* We only need one check after the unalignment-adjustments, - because if both adjustments were done, either both or - neither reference had an exception. */ - if (retn != 0) - goto copy_exception_bytes; - #ifdef USE_RAPF /* 64 bit copy loop */ if (!(((unsigned long) src | (unsigned long) dst) & 7)) { @@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_8x64(dst, src, retn); n -= 8; if (retn) - goto copy_exception_bytes; + return retn + n; } } @@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_8x64(dst, src, retn); n -= 8; if (retn) - goto copy_exception_bytes; + return retn + n; } } #endif @@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, n -= 4; if (retn) - goto copy_exception_bytes; + return retn + n; } /* If we get here, there were no memory read faults. */ @@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, /* If we get here, retn correctly reflects the number of failing bytes. */ return retn; - - copy_exception_bytes: - /* We already have "retn" bytes cleared, and need to clear the - remaining "n" bytes. A non-optimized simple byte-for-byte in-line - memset is preferred here, since this isn't speed-critical code and - we'd rather have this a leaf-function than calling memset. */ - { - char *endp; - for (endp = dst + n; dst < endp; dst++) - *dst = 0; - } - - return retn + n; } -EXPORT_SYMBOL(__copy_user_zeroing); +EXPORT_SYMBOL(raw_copy_from_user); #define __asm_clear_8x64(to, ret) \ asm volatile ( \ diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S index 64e08df51d6..8b700413249 100644 --- a/arch/mips/cavium-octeon/octeon-memcpy.S +++ b/arch/mips/cavium-octeon/octeon-memcpy.S @@ -208,18 +208,18 @@ EXC( STORE t2, UNIT(6)(dst), s_exc_p10u) ADD src, src, 16*NBYTES EXC( STORE t3, UNIT(7)(dst), s_exc_p9u) ADD dst, dst, 16*NBYTES -EXC( LOAD t0, UNIT(-8)(src), l_exc_copy) -EXC( LOAD t1, UNIT(-7)(src), l_exc_copy) -EXC( LOAD t2, UNIT(-6)(src), l_exc_copy) -EXC( LOAD t3, UNIT(-5)(src), l_exc_copy) +EXC( LOAD t0, UNIT(-8)(src), l_exc_copy_rewind16) +EXC( LOAD t1, UNIT(-7)(src), l_exc_copy_rewind16) +EXC( LOAD t2, UNIT(-6)(src), l_exc_copy_rewind16) +EXC( LOAD t3, UNIT(-5)(src), l_exc_copy_rewind16) EXC( STORE t0, UNIT(-8)(dst), s_exc_p8u) EXC( STORE t1, UNIT(-7)(dst), s_exc_p7u) EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u) EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u) -EXC( LOAD t0, UNIT(-4)(src), l_exc_copy) -EXC( LOAD t1, UNIT(-3)(src), l_exc_copy) -EXC( LOAD t2, UNIT(-2)(src), l_exc_copy) -EXC( LOAD t3, UNIT(-1)(src), l_exc_copy) +EXC( LOAD t0, UNIT(-4)(src), l_exc_copy_rewind16) +EXC( LOAD t1, UNIT(-3)(src), l_exc_copy_rewind16) +EXC( LOAD t2, UNIT(-2)(src), l_exc_copy_rewind16) +EXC( LOAD t3, UNIT(-1)(src), l_exc_copy_rewind16) EXC( STORE t0, UNIT(-4)(dst), s_exc_p4u) EXC( STORE t1, UNIT(-3)(dst), s_exc_p3u) EXC( STORE t2, UNIT(-2)(dst), s_exc_p2u) @@ -383,6 +383,10 @@ done: nop END(memcpy) +l_exc_copy_rewind16: + /* Rewind src and dst by 16*NBYTES for l_exc_copy */ + SUB src, src, 16*NBYTES + SUB dst, dst, 16*NBYTES l_exc_copy: /* * Copy bytes from src until faulting load address (or until a diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig index 0e36abcd39c..7446284dd7b 100644 --- a/arch/mips/configs/ip27_defconfig +++ b/arch/mips/configs/ip27_defconfig @@ -206,7 +206,6 @@ CONFIG_MLX4_EN=m # CONFIG_MLX4_DEBUG is not set CONFIG_TEHUTI=m CONFIG_BNX2X=m -CONFIG_QLGE=m CONFIG_SFC=m CONFIG_BE2NET=m CONFIG_LIBERTAS_THINFIRM=m diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h index ac3d2b8a20d..d48cf440010 100644 --- a/arch/mips/include/asm/checksum.h +++ b/arch/mips/include/asm/checksum.h @@ -155,7 +155,9 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, " daddu %0, %4 \n" " dsll32 $1, %0, 0 \n" " daddu %0, $1 \n" + " sltu $1, %0, $1 \n" " dsra32 %0, %0, 0 \n" + " addu %0, $1 \n" #endif " .set pop" : "=r" (sum) diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c index 93aa302948d..c68312947ed 100644 --- a/arch/mips/kernel/crash.c +++ b/arch/mips/kernel/crash.c @@ -15,12 +15,22 @@ static int crashing_cpu = -1; static cpumask_t cpus_in_crash = CPU_MASK_NONE; #ifdef CONFIG_SMP -static void crash_shutdown_secondary(void *ignore) +static void crash_shutdown_secondary(void *passed_regs) { - struct pt_regs *regs; + struct pt_regs *regs = passed_regs; int cpu = smp_processor_id(); - regs = task_pt_regs(current); + /* + * If we are passed registers, use those. Otherwise get the + * regs from the last interrupt, which should be correct, as + * we are in an interrupt. But if the regs are not there, + * pull them from the top of the stack. They are probably + * wrong, but we need something to keep from crashing again. + */ + if (!regs) + regs = get_irq_regs(); + if (!regs) + regs = task_pt_regs(current); if (!cpu_online(cpu)) return; diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index fcaac2f132f..910db386d9e 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c @@ -236,9 +236,6 @@ static int compute_signal(int tt) void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) { int reg; - struct thread_info *ti = task_thread_info(p); - unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32; - struct pt_regs *regs = (struct pt_regs *)ksp - 1; #if (KGDB_GDB_REG_SIZE == 32) u32 *ptr = (u32 *)gdb_regs; #else @@ -246,25 +243,46 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) #endif for (reg = 0; reg < 16; reg++) - *(ptr++) = regs->regs[reg]; + *(ptr++) = 0; /* S0 - S7 */ - for (reg = 16; reg < 24; reg++) - *(ptr++) = regs->regs[reg]; + *(ptr++) = p->thread.reg16; + *(ptr++) = p->thread.reg17; + *(ptr++) = p->thread.reg18; + *(ptr++) = p->thread.reg19; + *(ptr++) = p->thread.reg20; + *(ptr++) = p->thread.reg21; + *(ptr++) = p->thread.reg22; + *(ptr++) = p->thread.reg23; for (reg = 24; reg < 28; reg++) *(ptr++) = 0; /* GP, SP, FP, RA */ - for (reg = 28; reg < 32; reg++) - *(ptr++) = regs->regs[reg]; - - *(ptr++) = regs->cp0_status; - *(ptr++) = regs->lo; - *(ptr++) = regs->hi; - *(ptr++) = regs->cp0_badvaddr; - *(ptr++) = regs->cp0_cause; - *(ptr++) = regs->cp0_epc; + *(ptr++) = (long)p; + *(ptr++) = p->thread.reg29; + *(ptr++) = p->thread.reg30; + *(ptr++) = p->thread.reg31; + + *(ptr++) = p->thread.cp0_status; + + /* lo, hi */ + *(ptr++) = 0; + *(ptr++) = 0; + + /* + * BadVAddr, Cause + * Ideally these would come from the last exception frame up the stack + * but that requires unwinding, otherwise we can't know much for sure. + */ + *(ptr++) = 0; + *(ptr++) = 0; + + /* + * PC + * use return address (RA), i.e. the moment after return from resume() + */ + *(ptr++) = p->thread.reg31; } void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index c6a041d9d05..3cfa3bc288f 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -214,11 +214,9 @@ struct mips_frame_info { #define J_TARGET(pc,target) \ (((unsigned long)(pc) & 0xf0000000) | ((target) << 2)) -static inline int is_ra_save_ins(union mips_instruction *ip) +static inline int is_ra_save_ins(union mips_instruction *ip, int *poff) { #ifdef CONFIG_CPU_MICROMIPS - union mips_instruction mmi; - /* * swsp ra,offset * swm16 reglist,offset(sp) @@ -228,29 +226,71 @@ static inline int is_ra_save_ins(union mips_instruction *ip) * * microMIPS is way more fun... */ - if (mm_insn_16bit(ip->halfword[0])) { - mmi.word = (ip->halfword[0] << 16); - return ((mmi.mm16_r5_format.opcode == mm_swsp16_op && - mmi.mm16_r5_format.rt == 31) || - (mmi.mm16_m_format.opcode == mm_pool16c_op && - mmi.mm16_m_format.func == mm_swm16_op)); + if (mm_insn_16bit(ip->halfword[1])) { + switch (ip->mm16_r5_format.opcode) { + case mm_swsp16_op: + if (ip->mm16_r5_format.rt != 31) + return 0; + + *poff = ip->mm16_r5_format.simmediate; + *poff = (*poff << 2) / sizeof(ulong); + return 1; + + case mm_pool16c_op: + switch (ip->mm16_m_format.func) { + case mm_swm16_op: + *poff = ip->mm16_m_format.imm; + *poff += 1 + ip->mm16_m_format.rlist; + *poff = (*poff << 2) / sizeof(ulong); + return 1; + + default: + return 0; + } + + default: + return 0; + } } - else { - mmi.halfword[0] = ip->halfword[1]; - mmi.halfword[1] = ip->halfword[0]; - return ((mmi.mm_m_format.opcode == mm_pool32b_op && - mmi.mm_m_format.rd > 9 && - mmi.mm_m_format.base == 29 && - mmi.mm_m_format.func == mm_swm32_func) || - (mmi.i_format.opcode == mm_sw32_op && - mmi.i_format.rs == 29 && - mmi.i_format.rt == 31)); + + switch (ip->i_format.opcode) { + case mm_sw32_op: + if (ip->i_format.rs != 29) + return 0; + if (ip->i_format.rt != 31) + return 0; + + *poff = ip->i_format.simmediate / sizeof(ulong); + return 1; + + case mm_pool32b_op: + switch (ip->mm_m_format.func) { + case mm_swm32_func: + if (ip->mm_m_format.rd < 0x10) + return 0; + if (ip->mm_m_format.base != 29) + return 0; + + *poff = ip->mm_m_format.simmediate; + *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32); + *poff /= sizeof(ulong); + return 1; + default: + return 0; + } + + default: + return 0; } #else /* sw / sd $ra, offset($sp) */ - return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && - ip->i_format.rs == 29 && - ip->i_format.rt == 31; + if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && + ip->i_format.rs == 29 && ip->i_format.rt == 31) { + *poff = ip->i_format.simmediate / sizeof(ulong); + return 1; + } + + return 0; #endif } @@ -265,13 +305,16 @@ static inline int is_jump_ins(union mips_instruction *ip) * * microMIPS is kind of more fun... */ - union mips_instruction mmi; - - mmi.word = (ip->halfword[0] << 16); + if (mm_insn_16bit(ip->halfword[1])) { + if ((ip->mm16_r5_format.opcode == mm_pool16c_op && + (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op)) + return 1; + return 0; + } - if ((mmi.mm16_r5_format.opcode == mm_pool16c_op && - (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) || - ip->j_format.opcode == mm_jal32_op) + if (ip->j_format.opcode == mm_j32_op) + return 1; + if (ip->j_format.opcode == mm_jal32_op) return 1; if (ip->r_format.opcode != mm_pool32a_op || ip->r_format.func != mm_pool32axf_op) @@ -299,15 +342,13 @@ static inline int is_sp_move_ins(union mips_instruction *ip) * * microMIPS is not more fun... */ - if (mm_insn_16bit(ip->halfword[0])) { - union mips_instruction mmi; - - mmi.word = (ip->halfword[0] << 16); - return ((mmi.mm16_r3_format.opcode == mm_pool16d_op && - mmi.mm16_r3_format.simmediate && mm_addiusp_func) || - (mmi.mm16_r5_format.opcode == mm_pool16d_op && - mmi.mm16_r5_format.rt == 29)); + if (mm_insn_16bit(ip->halfword[1])) { + return (ip->mm16_r3_format.opcode == mm_pool16d_op && + ip->mm16_r3_format.simmediate && mm_addiusp_func) || + (ip->mm16_r5_format.opcode == mm_pool16d_op && + ip->mm16_r5_format.rt == 29); } + return (ip->mm_i_format.opcode == mm_addiu32_op && ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29); #else @@ -322,30 +363,36 @@ static inline int is_sp_move_ins(union mips_instruction *ip) static int get_frame_info(struct mips_frame_info *info) { -#ifdef CONFIG_CPU_MICROMIPS - union mips_instruction *ip = (void *) (((char *) info->func) - 1); -#else - union mips_instruction *ip = info->func; -#endif - unsigned max_insns = info->func_size / sizeof(union mips_instruction); - unsigned i; + bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); + union mips_instruction insn, *ip, *ip_end; + const unsigned int max_insns = 128; + unsigned int i; info->pc_offset = -1; info->frame_size = 0; + ip = (void *)msk_isa16_mode((ulong)info->func); if (!ip) goto err; - if (max_insns == 0) - max_insns = 128U; /* unknown function size */ - max_insns = min(128U, max_insns); - - for (i = 0; i < max_insns; i++, ip++) { + ip_end = (void *)ip + info->func_size; + + for (i = 0; i < max_insns && ip < ip_end; i++, ip++) { + if (is_mmips && mm_insn_16bit(ip->halfword[0])) { + insn.halfword[0] = 0; + insn.halfword[1] = ip->halfword[0]; + } else if (is_mmips) { + insn.halfword[0] = ip->halfword[1]; + insn.halfword[1] = ip->halfword[0]; + } else { + insn.word = ip->word; + } - if (is_jump_ins(ip)) + if (is_jump_ins(&insn)) break; + if (!info->frame_size) { - if (is_sp_move_ins(ip)) + if (is_sp_move_ins(&insn)) { #ifdef CONFIG_CPU_MICROMIPS if (mm_insn_16bit(ip->halfword[0])) @@ -368,11 +415,9 @@ static int get_frame_info(struct mips_frame_info *info) } continue; } - if (info->pc_offset == -1 && is_ra_save_ins(ip)) { - info->pc_offset = - ip->i_format.simmediate / sizeof(long); + if (info->pc_offset == -1 && + is_ra_save_ins(&insn, &info->pc_offset)) break; - } } if (info->frame_size && info->pc_offset >= 0) /* nested */ return 0; diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 5ab9e96d522..7f463465b4e 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -92,7 +92,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h index 8c9b631d2a7..8c00e6c0626 100644 --- a/arch/parisc/include/asm/bitops.h +++ b/arch/parisc/include/asm/bitops.h @@ -6,7 +6,7 @@ #endif #include -#include /* for BITS_PER_LONG/SHIFT_PER_LONG */ +#include #include #include @@ -16,6 +16,12 @@ * to include/asm-i386/bitops.h or kerneldoc */ +#if __BITS_PER_LONG == 64 +#define SHIFT_PER_LONG 6 +#else +#define SHIFT_PER_LONG 5 +#endif + #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1)) diff --git a/arch/parisc/include/uapi/asm/bitsperlong.h b/arch/parisc/include/uapi/asm/bitsperlong.h index 75196b415d3..540c94de442 100644 --- a/arch/parisc/include/uapi/asm/bitsperlong.h +++ b/arch/parisc/include/uapi/asm/bitsperlong.h @@ -9,10 +9,8 @@ */ #if (defined(__KERNEL__) && defined(CONFIG_64BIT)) || defined (__LP64__) #define __BITS_PER_LONG 64 -#define SHIFT_PER_LONG 6 #else #define __BITS_PER_LONG 32 -#define SHIFT_PER_LONG 5 #endif #include diff --git a/arch/parisc/include/uapi/asm/swab.h b/arch/parisc/include/uapi/asm/swab.h index e78403b129e..928e1bbac98 100644 --- a/arch/parisc/include/uapi/asm/swab.h +++ b/arch/parisc/include/uapi/asm/swab.h @@ -1,6 +1,7 @@ #ifndef _PARISC_SWAB_H #define _PARISC_SWAB_H +#include #include #include @@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x) } #define __arch_swab32 __arch_swab32 -#if BITS_PER_LONG > 32 +#if __BITS_PER_LONG > 32 /* ** From "PA-RISC 2.0 Architecture", HP Professional Books. ** See Appendix I page 8 , "Endian Byte Swapping". @@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x) return x; } #define __arch_swab64 __arch_swab64 -#endif /* BITS_PER_LONG > 32 */ +#endif /* __BITS_PER_LONG > 32 */ #endif /* _PARISC_SWAB_H */ diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 56a4a5d205a..a008b872d4b 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -273,6 +273,14 @@ checkbin: echo 'disable kernel modules' ; \ false ; \ fi + @if test "x${CONFIG_CPU_LITTLE_ENDIAN}" = "xy" \ + && $(LD) --version | head -1 | grep ' 2\.24$$' >/dev/null ; then \ + echo -n '*** binutils 2.24 miscompiles weak symbols ' ; \ + echo 'in some circumstances.' ; \ + echo -n '*** Please use a different binutils version.' ; \ + false ; \ + fi + CLEAN_FILES += $(TOUT) diff --git a/arch/powerpc/boot/ps3-head.S b/arch/powerpc/boot/ps3-head.S index b6fcbaf5027..3dc44b05fb9 100644 --- a/arch/powerpc/boot/ps3-head.S +++ b/arch/powerpc/boot/ps3-head.S @@ -57,11 +57,6 @@ __system_reset_overlay: bctr 1: - /* Save the value at addr zero for a null pointer write check later. */ - - li r4, 0 - lwz r3, 0(r4) - /* Primary delays then goes to _zimage_start in wrapper. */ or 31, 31, 31 /* db16cyc */ diff --git a/arch/powerpc/boot/ps3.c b/arch/powerpc/boot/ps3.c index 9954d98871d..029ea3ce158 100644 --- a/arch/powerpc/boot/ps3.c +++ b/arch/powerpc/boot/ps3.c @@ -119,13 +119,12 @@ void ps3_copy_vectors(void) flush_cache((void *)0x100, 512); } -void platform_init(unsigned long null_check) +void platform_init(void) { const u32 heapsize = 0x1000000 - (u32)_end; /* 16MiB */ void *chosen; unsigned long ft_addr; u64 rm_size; - unsigned long val; console_ops.write = ps3_console_write; platform_ops.exit = ps3_exit; @@ -153,11 +152,6 @@ void platform_init(unsigned long null_check) printf(" flat tree at 0x%lx\n\r", ft_addr); - val = *(unsigned long *)0; - - if (val != null_check) - printf("null check failed: %lx != %lx\n\r", val, null_check); - ((kernel_entry_t)0)(ft_addr, 0, NULL); ps3_exit(); diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 52e5758ea36..b3bab9575d3 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -25,6 +25,7 @@ #include #include #include +#include struct aligninfo { unsigned char len; @@ -764,14 +765,25 @@ int fix_alignment(struct pt_regs *regs) nb = aligninfo[instr].len; flags = aligninfo[instr].flags; - /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */ - if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) { - nb = 8; - flags = LD+SW; - } else if (IS_XFORM(instruction) && - ((instruction >> 1) & 0x3ff) == 660) { - nb = 8; - flags = ST+SW; + /* + * Handle some cases which give overlaps in the DSISR values. + */ + if (IS_XFORM(instruction)) { + switch (get_xop(instruction)) { + case 532: /* ldbrx */ + nb = 8; + flags = LD+SW; + break; + case 660: /* stdbrx */ + nb = 8; + flags = ST+SW; + break; + case 20: /* lwarx */ + case 84: /* ldarx */ + case 116: /* lharx */ + case 276: /* lqarx */ + return 0; /* not emulated ever */ + } } /* Byteswap little endian loads and stores */ diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index f0b47d1a6b0..7531f9abf10 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -228,8 +228,10 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) rcu_read_lock(); bp = __get_cpu_var(bp_per_reg); - if (!bp) + if (!bp) { + rc = NOTIFY_DONE; goto out; + } info = counter_arch_bp(bp); /* diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index 8220baa46fa..a1812fbc264 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -180,6 +180,7 @@ static int ibmebus_create_device(struct device_node *dn) static int ibmebus_create_devices(const struct of_device_id *matches) { struct device_node *root, *child; + struct device *dev; int ret = 0; root = of_find_node_by_path("/"); @@ -188,9 +189,12 @@ static int ibmebus_create_devices(const struct of_device_id *matches) if (!of_match_node(matches, child)) continue; - if (bus_find_device(&ibmebus_bus_type, NULL, child, - ibmebus_match_node)) + dev = bus_find_device(&ibmebus_bus_type, NULL, child, + ibmebus_match_node); + if (dev) { + put_device(dev); continue; + } ret = ibmebus_create_device(child); if (ret) { @@ -262,6 +266,7 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus, const char *buf, size_t count) { struct device_node *dn = NULL; + struct device *dev; char *path; ssize_t rc = 0; @@ -269,8 +274,10 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus, if (!path) return -ENOMEM; - if (bus_find_device(&ibmebus_bus_type, NULL, path, - ibmebus_match_path)) { + dev = bus_find_device(&ibmebus_bus_type, NULL, path, + ibmebus_match_path); + if (dev) { + put_device(dev); printk(KERN_WARNING "%s: %s has already been probed\n", __func__, path); rc = -EEXIST; @@ -306,6 +313,7 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus, if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path, ibmebus_match_path))) { of_device_unregister(to_platform_device(dev)); + put_device(dev); kfree(path); return count; diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index e11863f4e59..ccef1728a4c 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S @@ -94,7 +94,7 @@ _GLOBAL(power7_nap) std r0,0(r1) ptesync ld r0,0(r1) -1: cmp cr0,r0,r0 +1: cmpd cr0,r0,r0 bne 1b PPC_NAP b . diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index e469f30e6ee..ad8573f053d 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -295,7 +295,7 @@ _GLOBAL(flush_instruction_cache) lis r3, KERNELBASE@h iccci 0,r3 #endif -#elif CONFIG_FSL_BOOKE +#elif defined(CONFIG_FSL_BOOKE) BEGIN_FTR_SECTION mfspr r3,SPRN_L1CSR0 ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 389fb8077cc..1d3d3d65367 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -142,6 +142,15 @@ static void check_smt_enabled(void) of_node_put(dn); } } + + /* + * Fixup HFSCR:TM based on CPU features. The bit is set by our + * early asm init because at that point we haven't updated our + * CPU features from firmware and device-tree. Here we have, + * so let's do it. + */ + if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP)) + mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM); } /* Look for smt-enabled= cmdline option */ diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 631a2650e4e..50b482bcbea 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -511,7 +511,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); - kvmppc_core_queue_program(vcpu, 0); } } diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 7ce9cf3b698..887365a82c0 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, if ((mm->task_size - len) < addr) return 0; vma = find_vma(mm, addr); - return (!vma || (addr + len) <= vma->vm_start); + return (!vma || (addr + len) <= vm_start_gap(vma)); } static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c index c4c6a1cf221..05ab8824925 100644 --- a/arch/s390/boot/compressed/misc.c +++ b/arch/s390/boot/compressed/misc.c @@ -138,31 +138,34 @@ static void check_ipl_parmblock(void *start, unsigned long size) unsigned long decompress_kernel(void) { - unsigned long output_addr; - unsigned char *output; + void *output, *kernel_end; - output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL; - check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start); - memset(&_bss, 0, &_ebss - &_bss); - free_mem_ptr = (unsigned long)&_end; - free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; - output = (unsigned char *) output_addr; + output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE); + kernel_end = output + SZ__bss_start; + check_ipl_parmblock((void *) 0, (unsigned long) kernel_end); #ifdef CONFIG_BLK_DEV_INITRD /* * Move the initrd right behind the end of the decompressed - * kernel image. + * kernel image. This also prevents initrd corruption caused by + * bss clearing since kernel_end will always be located behind the + * current bss section.. */ - if (INITRD_START && INITRD_SIZE && - INITRD_START < (unsigned long) output + SZ__bss_start) { - check_ipl_parmblock(output + SZ__bss_start, - INITRD_START + INITRD_SIZE); - memmove(output + SZ__bss_start, - (void *) INITRD_START, INITRD_SIZE); - INITRD_START = (unsigned long) output + SZ__bss_start; + if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) { + check_ipl_parmblock(kernel_end, INITRD_SIZE); + memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE); + INITRD_START = (unsigned long) kernel_end; } #endif + /* + * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be + * initialized afterwards since they reside in bss. + */ + memset(&_bss, 0, &_ebss - &_bss); + free_mem_ptr = (unsigned long) &_end; + free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; + puts("Uncompressing Linux... "); decompress(input_data, input_len, NULL, NULL, output, NULL, error); puts("Ok, booting the kernel.\n"); diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 6b499870662..52ef30cfedf 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -43,14 +43,17 @@ extern void execve_tail(void); #ifndef CONFIG_64BIT #define TASK_SIZE (1UL << 31) +#define TASK_MAX_SIZE (1UL << 31) #define TASK_UNMAPPED_BASE (1UL << 30) #else /* CONFIG_64BIT */ -#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) +#define TASK_SIZE_OF(tsk) ((tsk)->mm ? \ + (tsk)->mm->context.asce_limit : TASK_MAX_SIZE) #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ (1UL << 30) : (1UL << 41)) #define TASK_SIZE TASK_SIZE_OF(current) +#define TASK_MAX_SIZE (1UL << 53) #endif /* CONFIG_64BIT */ diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index a938b548f07..14a77e6d8fc 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -335,7 +335,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from, if ((from | to | len) & (PMD_SIZE - 1)) return -EINVAL; - if (len == 0 || from + len > PGDIR_SIZE || + if (len == 0 || from + len > TASK_MAX_SIZE || from + len < from || to + len < to) return -EINVAL; diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index f8e69d5bc0a..aae199b3e04 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -416,7 +416,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev) zdev->dma_table = dma_alloc_cpu_table(); if (!zdev->dma_table) { rc = -ENOMEM; - goto out_clean; + goto out; } zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET; @@ -429,7 +429,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev) bitmap_order); if (!zdev->iommu_bitmap) { rc = -ENOMEM; - goto out_reg; + goto free_dma_table; } rc = zpci_register_ioat(zdev, @@ -438,12 +438,16 @@ int zpci_dma_init_device(struct zpci_dev *zdev) zdev->start_dma + zdev->iommu_size - 1, (u64) zdev->dma_table); if (rc) - goto out_reg; - return 0; + goto free_bitmap; -out_reg: + return 0; +free_bitmap: + vfree(zdev->iommu_bitmap); + zdev->iommu_bitmap = NULL; +free_dma_table: dma_free_cpu_table(zdev->dma_table); -out_clean: + zdev->dma_table = NULL; +out: return rc; } diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 6777177807c..7df7d594418 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -63,7 +63,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -113,7 +113,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c index 7ff45e4ba68..875ddf00dab 100644 --- a/arch/sparc/kernel/ptrace_64.c +++ b/arch/sparc/kernel/ptrace_64.c @@ -308,7 +308,7 @@ static int genregs64_set(struct task_struct *target, } if (!ret) { - unsigned long y; + unsigned long y = regs->y; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &y, diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 21bca2152ea..ea80c0b003f 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -119,7 +119,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -182,7 +182,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 96399646570..64ee8884f37 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -118,7 +118,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, HPAGE_SIZE); vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c index 0f83ed4602b..d0dac73a2d8 100644 --- a/arch/tile/kernel/ptrace.c +++ b/arch/tile/kernel/ptrace.c @@ -110,7 +110,7 @@ static int tile_gpr_set(struct task_struct *target, const void *kbuf, const void __user *ubuf) { int ret; - struct pt_regs regs; + struct pt_regs regs = *task_pt_regs(target); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s, 0, sizeof(regs)); diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index 0ac3599e578..d4352152337 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c @@ -302,7 +302,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (current->mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c index 4bcf841e470..3deb8e53335 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_glue.c +++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c @@ -218,6 +218,29 @@ static int ghash_async_final(struct ahash_request *req) } } +static int ghash_async_import(struct ahash_request *req, const void *in) +{ + struct ahash_request *cryptd_req = ahash_request_ctx(req); + struct shash_desc *desc = cryptd_shash_desc(cryptd_req); + struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); + + ghash_async_init(req); + memcpy(dctx, in, sizeof(*dctx)); + return 0; + +} + +static int ghash_async_export(struct ahash_request *req, void *out) +{ + struct ahash_request *cryptd_req = ahash_request_ctx(req); + struct shash_desc *desc = cryptd_shash_desc(cryptd_req); + struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); + + memcpy(out, dctx, sizeof(*dctx)); + return 0; + +} + static int ghash_async_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -285,8 +308,11 @@ static struct ahash_alg ghash_async_alg = { .final = ghash_async_final, .setkey = ghash_async_setkey, .digest = ghash_async_digest, + .export = ghash_async_export, + .import = ghash_async_import, .halg = { .digestsize = GHASH_DIGEST_SIZE, + .statesize = sizeof(struct ghash_desc_ctx), .base = { .cra_name = "ghash", .cra_driver_name = "ghash-clmulni", diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 01f15b227d7..2fa7f4f6ecb 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -272,7 +272,7 @@ struct task_struct; #define ARCH_DLINFO_IA32(vdso_enabled) \ do { \ - if (vdso_enabled) { \ + if (VDSO_CURRENT_BASE) { \ NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ } \ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 81e0fe48b9b..7e09789d2cf 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1066,7 +1066,7 @@ static __init int setup_disablecpuid(char *arg) { int bit; - if (get_option(&arg, &bit) && bit < NCAPINTS*32) + if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32) setup_clear_cpu_cap(bit); else return 0; diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 9cb52767999..338a4ae486b 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -51,7 +51,7 @@ static const char * const th_names[] = { "load_store", "insn_fetch", "combined_unit", - "", + "decode_unit", "northbridge", "execution_unit", }; diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 5c38e2b298c..c502340ef27 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1103,8 +1103,8 @@ ftrace_graph_call: jmp ftrace_stub #endif -.globl ftrace_stub -ftrace_stub: +/* This is weak to keep gas from relaxing the jumps */ +WEAK(ftrace_stub) ret END(ftrace_caller) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 6ed8f16fd61..cc89b36e556 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -122,7 +122,8 @@ GLOBAL(ftrace_graph_call) jmp ftrace_stub #endif -GLOBAL(ftrace_stub) +/* This is weak to keep gas from relaxing the jumps */ +WEAK(ftrace_stub) retq END(ftrace_caller) diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 1ffc32dbe45..8c43930ce1a 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -744,6 +744,18 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, unsigned long return_hooker = (unsigned long) &return_to_handler; + /* + * When resuming from suspend-to-ram, this function can be indirectly + * called from early CPU startup code while the CPU is in real mode, + * which would fail miserably. Make sure the stack pointer is a + * virtual address. + * + * This check isn't as accurate as virt_addr_valid(), but it should be + * good enough for this purpose, and it's fast. + */ + if (unlikely((long)__builtin_frame_address(0) >= 0)) + return; + if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 30277e27431..d050393d3be 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -127,7 +127,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (end - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -166,7 +166,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index ddad189e596..c96485054f6 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -906,6 +906,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt, return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); } +static int segmented_write_std(struct x86_emulate_ctxt *ctxt, + struct segmented_address addr, + void *data, + unsigned int size) +{ + int rc; + ulong linear; + + rc = linearize(ctxt, addr, size, true, &linear); + if (rc != X86EMUL_CONTINUE) + return rc; + return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception); +} + /* * Fetch the next byte of the instruction being emulated which is pointed to * by ctxt->_eip, then increment ctxt->_eip. @@ -1599,7 +1613,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, &ctxt->exception); } -/* Does not support long mode */ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg) { @@ -1612,6 +1625,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, int ret; u16 dummy; + + /* + * None of MOV, POP and LSS can load a NULL selector in CPL=3, but + * they can load it at CPL<3 (Intel's manual says only LSS can, + * but it's wrong). + * + * However, the Intel manual says that putting IST=1/DPL=3 in + * an interrupt gate will result in SS=3 (the AMD manual instead + * says it doesn't), so allow SS=3 in __load_segment_descriptor + * and only forbid it here. + */ + if (seg == VCPU_SREG_SS && selector == 3 && + ctxt->mode == X86EMUL_MODE_PROT64) + return emulate_exception(ctxt, GP_VECTOR, 0, true); + memset(&seg_desc, 0, sizeof seg_desc); if (ctxt->mode == X86EMUL_MODE_REAL) { @@ -1634,20 +1662,34 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, rpl = selector & 3; cpl = ctxt->ops->cpl(ctxt); - /* NULL selector is not valid for TR, CS and SS (except for long mode) */ - if ((seg == VCPU_SREG_CS - || (seg == VCPU_SREG_SS - && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)) - || seg == VCPU_SREG_TR) - && null_selector) - goto exception; - /* TR should be in GDT only */ if (seg == VCPU_SREG_TR && (selector & (1 << 2))) goto exception; - if (null_selector) /* for NULL selector skip all following checks */ + /* NULL selector is not valid for TR, CS and (except for long mode) SS */ + if (null_selector) { + if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR) + goto exception; + + if (seg == VCPU_SREG_SS) { + if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl) + goto exception; + + /* + * ctxt->ops->set_segment expects the CPL to be in + * SS.DPL, so fake an expand-up 32-bit data segment. + */ + seg_desc.type = 3; + seg_desc.p = 1; + seg_desc.s = 1; + seg_desc.dpl = cpl; + seg_desc.d = 1; + seg_desc.g = 1; + } + + /* Skip all following checks */ goto load; + } ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) @@ -3333,8 +3375,8 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, } /* Disable writeback. */ ctxt->dst.type = OP_NONE; - return segmented_write(ctxt, ctxt->dst.addr.mem, - &desc_ptr, 2 + ctxt->op_bytes); + return segmented_write_std(ctxt, ctxt->dst.addr.mem, + &desc_ptr, 2 + ctxt->op_bytes); } static int em_sgdt(struct x86_emulate_ctxt *ctxt) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 7e9ca58ae87..d9016e4a80f 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1047,10 +1047,10 @@ static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12, return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; } -static inline bool is_exception(u32 intr_info) +static inline bool is_nmi(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) - == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK); + == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK); } static void nested_vmx_vmexit(struct kvm_vcpu *vcpu); @@ -3074,7 +3074,7 @@ static void fix_rmode_seg(int seg, struct kvm_segment *save) } vmcs_write16(sf->selector, var.selector); - vmcs_write32(sf->base, var.base); + vmcs_writel(sf->base, var.base); vmcs_write32(sf->limit, var.limit); vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var)); } @@ -4716,7 +4716,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) if (is_machine_check(intr_info)) return handle_machine_check(vcpu); - if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR) + if (is_nmi(intr_info)) return 1; /* already handled by vmx_vcpu_run() */ if (is_no_device(intr_info)) { @@ -6507,7 +6507,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) switch (exit_reason) { case EXIT_REASON_EXCEPTION_NMI: - if (!is_exception(intr_info)) + if (is_nmi(intr_info)) return 0; else if (is_page_fault(intr_info)) return enable_ept; @@ -6803,8 +6803,7 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) kvm_machine_check(); /* We need to handle NMIs before interrupts are enabled */ - if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR && - (exit_intr_info & INTR_INFO_VALID_MASK)) { + if (is_nmi(exit_intr_info)) { kvm_before_handle_nmi(&vmx->vcpu); asm("int $2"); kvm_after_handle_nmi(&vmx->vcpu); diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 4348803b553..1bb55700a24 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -349,7 +349,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 3595c758bac..2cd89be6bb4 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -67,22 +67,21 @@ static int mmap_is_legacy(void) static unsigned long mmap_rnd(void) { - unsigned long rnd = 0; - - if (current->flags & PF_RANDOMIZE) { - if (mmap_is_ia32()) -#ifdef CONFIG_COMPAT - rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); -#else - rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); -#endif - else - rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); - } + unsigned long rnd; + + /* + * 8 bits of randomness in 32bit mmaps, 20 address space bits + * 28 bits of randomness in 64bit mmaps, 40 address space bits + */ + if (mmap_is_ia32()) + rnd = (unsigned long)get_random_int() % (1<<8); + else + rnd = (unsigned long)get_random_int() % (1<<28); + return rnd << PAGE_SHIFT; } -static unsigned long mmap_base(void) +static unsigned long mmap_base(unsigned long rnd) { unsigned long gap = rlimit(RLIMIT_STACK); @@ -91,19 +90,7 @@ static unsigned long mmap_base(void) else if (gap > MAX_GAP) gap = MAX_GAP; - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd()); -} - -/* - * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64 - * does, but not when emulating X86_32 - */ -static unsigned long mmap_legacy_base(void) -{ - if (mmap_is_ia32()) - return TASK_UNMAPPED_BASE; - else - return TASK_UNMAPPED_BASE + mmap_rnd(); + return PAGE_ALIGN(TASK_SIZE - gap - rnd); } /* @@ -112,13 +99,18 @@ static unsigned long mmap_legacy_base(void) */ void arch_pick_mmap_layout(struct mm_struct *mm) { - mm->mmap_legacy_base = mmap_legacy_base(); - mm->mmap_base = mmap_base(); + unsigned long random_factor = 0UL; + + if (current->flags & PF_RANDOMIZE) + random_factor = mmap_rnd(); + + mm->mmap_legacy_base = TASK_UNMAPPED_BASE + random_factor; if (mmap_is_legacy()) { mm->mmap_base = mm->mmap_legacy_base; mm->get_unmapped_area = arch_get_unmapped_area; } else { + mm->mmap_base = mmap_base(random_factor); mm->get_unmapped_area = arch_get_unmapped_area_topdown; } } diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index a3b0265c2ca..63462c8db80 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -118,6 +118,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = { DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"), }, }, + /* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */ + { + .callback = set_nouse_crs, + .ident = "Supermicro X8DTH", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), + DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"), + DMI_MATCH(DMI_BIOS_VERSION, "2.0a"), + }, + }, /* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */ { diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 48e8461057b..6e4580b8760 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -227,23 +227,14 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) return 1; list_for_each_entry(msidesc, &dev->msi_list, list) { - __read_msi_msg(msidesc, &msg); - pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | - ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); - if (msg.data != XEN_PIRQ_MSI_DATA || - xen_irq_from_pirq(pirq) < 0) { - pirq = xen_allocate_pirq_msi(dev, msidesc); - if (pirq < 0) { - irq = -ENODEV; - goto error; - } - xen_msi_compose_msg(dev, pirq, &msg); - __write_msi_msg(msidesc, &msg); - dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); - } else { - dev_dbg(&dev->dev, - "xen: msi already bound to pirq=%d\n", pirq); + pirq = xen_allocate_pirq_msi(dev, msidesc); + if (pirq < 0) { + irq = -ENODEV; + goto error; } + xen_msi_compose_msg(dev, pirq, &msg); + __write_msi_msg(msidesc, &msg); + dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, (type == PCI_CAP_ID_MSIX) ? "msi-x" : "msi", diff --git a/arch/x86/platform/goldfish/goldfish.c b/arch/x86/platform/goldfish/goldfish.c index 1693107a518..0d17c0aafeb 100644 --- a/arch/x86/platform/goldfish/goldfish.c +++ b/arch/x86/platform/goldfish/goldfish.c @@ -42,10 +42,22 @@ static struct resource goldfish_pdev_bus_resources[] = { } }; +static bool goldfish_enable __initdata; + +static int __init goldfish_setup(char *str) +{ + goldfish_enable = true; + return 0; +} +__setup("goldfish", goldfish_setup); + static int __init goldfish_init(void) { + if (!goldfish_enable) + return -ENODEV; + platform_device_register_simple("goldfish_pdev_bus", -1, - goldfish_pdev_bus_resources, 2); + goldfish_pdev_bus_resources, 2); return 0; } device_initcall(goldfish_init); diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 13e8935e2ea..e3600eb618c 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -338,11 +338,11 @@ static int xen_vcpuop_set_next_event(unsigned long delta, WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT); single.timeout_abs_ns = get_abs_timeout(delta); - single.flags = VCPU_SSHOTTMR_future; + /* Get an event anyway, even if the timeout is already expired */ + single.flags = 0; ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single); - - BUG_ON(ret != 0 && ret != -ETIME); + BUG_ON(ret != 0); return ret; } diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index a5214542f31..6e1f4517f34 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -160,6 +160,8 @@ static int __init parse_tag_initrd(const bp_tag_t* tag) __tagtable(BP_TAG_INITRD, parse_tag_initrd); +#endif /* CONFIG_BLK_DEV_INITRD */ + #ifdef CONFIG_OF static int __init parse_tag_fdt(const bp_tag_t *tag) @@ -179,8 +181,6 @@ void __init early_init_dt_setup_initrd_arch(u64 start, u64 end) #endif /* CONFIG_OF */ -#endif /* CONFIG_BLK_DEV_INITRD */ - static int __init parse_tag_cmdline(const bp_tag_t* tag) { strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE); diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index 5d3f7a119ed..1ff0b92eeae 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c @@ -86,7 +86,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, /* At this point: (!vmm || addr < vmm->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; - if (!vmm || addr + len <= vmm->vm_start) + if (!vmm || addr + len <= vm_start_gap(vmm)) return addr; addr = vmm->vm_end; if (flags & MAP_SHARED) diff --git a/block/genhd.c b/block/genhd.c index fd1946b2f31..a240c76be9b 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -662,7 +662,6 @@ void del_gendisk(struct gendisk *disk) kobject_put(disk->part0.holder_dir); kobject_put(disk->slave_dir); - disk->driverfs_dev = NULL; if (!sysfs_deprecated) sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); pm_runtime_set_memalloc_noio(disk_to_dev(disk), false); diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 1b4988b4bc1..9bfbb51aa75 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -175,6 +175,9 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter) __set_bit(WRITE_16, filter->write_ok); __set_bit(WRITE_LONG, filter->write_ok); __set_bit(WRITE_LONG_2, filter->write_ok); + __set_bit(WRITE_SAME, filter->write_ok); + __set_bit(WRITE_SAME_16, filter->write_ok); + __set_bit(WRITE_SAME_32, filter->write_ok); __set_bit(ERASE, filter->write_ok); __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok); __set_bit(MODE_SELECT, filter->write_ok); diff --git a/crypto/Makefile b/crypto/Makefile index 5d0b869b173..f4c1ff4fa4f 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -2,8 +2,13 @@ # Cryptographic API # +# memneq MUST be built with -Os or -O0 to prevent early-return optimizations +# that will defeat memneq's actual purpose to prevent timing attacks. +CFLAGS_REMOVE_memneq.o := -O1 -O2 -O3 +CFLAGS_memneq.o := -Os + obj-$(CONFIG_CRYPTO) += crypto.o -crypto-y := api.o cipher.o compress.o +crypto-y := api.o cipher.o compress.o memneq.o obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o @@ -47,6 +52,7 @@ obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o obj-$(CONFIG_CRYPTO_WP512) += wp512.o +CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o obj-$(CONFIG_CRYPTO_ECB) += ecb.o @@ -67,6 +73,7 @@ obj-$(CONFIG_CRYPTO_BLOWFISH_COMMON) += blowfish_common.o obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o +CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 obj-$(CONFIG_CRYPTO_AES) += aes_generic.o obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o diff --git a/crypto/algapi.c b/crypto/algapi.c index daf2f653b13..8ea7a5dc383 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -337,6 +337,7 @@ int crypto_register_alg(struct crypto_alg *alg) struct crypto_larval *larval; int err; + alg->cra_flags &= ~CRYPTO_ALG_DEAD; err = crypto_check_alg(alg); if (err) return err; diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index d11d431251f..63e154017f5 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -195,7 +195,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags) struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; struct ahash_request *req = &ctx->req; - char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req))]; + char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req)) ? : 1]; struct sock *sk2; struct alg_sock *ask2; struct hash_ctx *ctx2; diff --git a/crypto/asymmetric_keys/rsa.c b/crypto/asymmetric_keys/rsa.c index 4a6a0696f8a..1912b9be504 100644 --- a/crypto/asymmetric_keys/rsa.c +++ b/crypto/asymmetric_keys/rsa.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "public_key.h" MODULE_LICENSE("GPL"); @@ -189,12 +190,12 @@ static int RSA_verify(const u8 *H, const u8 *EM, size_t k, size_t hash_size, } } - if (memcmp(asn1_template, EM + T_offset, asn1_size) != 0) { + if (crypto_memneq(asn1_template, EM + T_offset, asn1_size) != 0) { kleave(" = -EBADMSG [EM[T] ASN.1 mismatch]"); return -EBADMSG; } - if (memcmp(H, EM + T_offset + asn1_size, hash_size) != 0) { + if (crypto_memneq(H, EM + T_offset + asn1_size, hash_size) != 0) { kleave(" = -EKEYREJECTED [EM[T] hash mismatch]"); return -EKEYREJECTED; } diff --git a/crypto/authenc.c b/crypto/authenc.c index a2cfae251dd..65bcd076b18 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -188,7 +188,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; + err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; if (err) goto out; @@ -227,7 +227,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq, scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; + err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; if (err) goto out; @@ -463,7 +463,7 @@ static int crypto_authenc_verify(struct aead_request *req, ihash = ohash + authsize; scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; + return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0; } static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 16c225cb28c..a3ef98be206 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -247,7 +247,7 @@ static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *ar scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; + err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; if (err) goto out; @@ -296,7 +296,7 @@ static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *a scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; + err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; if (err) goto out; @@ -336,7 +336,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; + err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; if (err) goto out; @@ -568,7 +568,7 @@ static int crypto_authenc_esn_verify(struct aead_request *req) ihash = ohash + authsize; scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; + return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0; } static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv, diff --git a/crypto/ccm.c b/crypto/ccm.c index c569c9c6afe..003bbbd21a2 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -364,7 +364,7 @@ static void crypto_ccm_decrypt_done(struct crypto_async_request *areq, if (!err) { err = crypto_ccm_auth(req, req->dst, cryptlen); - if (!err && memcmp(pctx->auth_tag, pctx->odata, authsize)) + if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize)) err = -EBADMSG; } aead_request_complete(req, err); @@ -423,7 +423,7 @@ static int crypto_ccm_decrypt(struct aead_request *req) return err; /* verify */ - if (memcmp(authtag, odata, authsize)) + if (crypto_memneq(authtag, odata, authsize)) return -EBADMSG; return err; diff --git a/crypto/cryptd.c b/crypto/cryptd.c index d85fab97551..acbe1b97843 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -606,6 +606,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC; inst->alg.halg.digestsize = salg->digestsize; + inst->alg.halg.statesize = salg->statesize; inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; diff --git a/crypto/gcm.c b/crypto/gcm.c index a1ec756b843..49b6fb20cce 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -582,7 +582,7 @@ static int crypto_gcm_verify(struct aead_request *req, crypto_xor(auth_tag, iauth_tag, 16); scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); - return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; + return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; } static void gcm_decrypt_done(struct crypto_async_request *areq, int err) diff --git a/crypto/memneq.c b/crypto/memneq.c new file mode 100644 index 00000000000..cd0162221c1 --- /dev/null +++ b/crypto/memneq.c @@ -0,0 +1,138 @@ +/* + * Constant-time equality testing of memory regions. + * + * Authors: + * + * James Yonan + * Daniel Borkmann + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of OpenVPN Technologies nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ + +/* Generic path for arbitrary size */ +static inline unsigned long +__crypto_memneq_generic(const void *a, const void *b, size_t size) +{ + unsigned long neq = 0; + +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + while (size >= sizeof(unsigned long)) { + neq |= *(unsigned long *)a ^ *(unsigned long *)b; + a += sizeof(unsigned long); + b += sizeof(unsigned long); + size -= sizeof(unsigned long); + } +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + while (size > 0) { + neq |= *(unsigned char *)a ^ *(unsigned char *)b; + a += 1; + b += 1; + size -= 1; + } + return neq; +} + +/* Loop-free fast-path for frequently used 16-byte size */ +static inline unsigned long __crypto_memneq_16(const void *a, const void *b) +{ +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (sizeof(unsigned long) == 8) + return ((*(unsigned long *)(a) ^ *(unsigned long *)(b)) + | (*(unsigned long *)(a+8) ^ *(unsigned long *)(b+8))); + else if (sizeof(unsigned int) == 4) + return ((*(unsigned int *)(a) ^ *(unsigned int *)(b)) + | (*(unsigned int *)(a+4) ^ *(unsigned int *)(b+4)) + | (*(unsigned int *)(a+8) ^ *(unsigned int *)(b+8)) + | (*(unsigned int *)(a+12) ^ *(unsigned int *)(b+12))); + else +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + return ((*(unsigned char *)(a) ^ *(unsigned char *)(b)) + | (*(unsigned char *)(a+1) ^ *(unsigned char *)(b+1)) + | (*(unsigned char *)(a+2) ^ *(unsigned char *)(b+2)) + | (*(unsigned char *)(a+3) ^ *(unsigned char *)(b+3)) + | (*(unsigned char *)(a+4) ^ *(unsigned char *)(b+4)) + | (*(unsigned char *)(a+5) ^ *(unsigned char *)(b+5)) + | (*(unsigned char *)(a+6) ^ *(unsigned char *)(b+6)) + | (*(unsigned char *)(a+7) ^ *(unsigned char *)(b+7)) + | (*(unsigned char *)(a+8) ^ *(unsigned char *)(b+8)) + | (*(unsigned char *)(a+9) ^ *(unsigned char *)(b+9)) + | (*(unsigned char *)(a+10) ^ *(unsigned char *)(b+10)) + | (*(unsigned char *)(a+11) ^ *(unsigned char *)(b+11)) + | (*(unsigned char *)(a+12) ^ *(unsigned char *)(b+12)) + | (*(unsigned char *)(a+13) ^ *(unsigned char *)(b+13)) + | (*(unsigned char *)(a+14) ^ *(unsigned char *)(b+14)) + | (*(unsigned char *)(a+15) ^ *(unsigned char *)(b+15))); +} + +/* Compare two areas of memory without leaking timing information, + * and with special optimizations for common sizes. Users should + * not call this function directly, but should instead use + * crypto_memneq defined in crypto/algapi.h. + */ +noinline unsigned long __crypto_memneq(const void *a, const void *b, + size_t size) +{ + switch (size) { + case 16: + return __crypto_memneq_16(a, b); + default: + return __crypto_memneq_generic(a, b, size); + } +} +EXPORT_SYMBOL(__crypto_memneq); + +#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */ diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 97c949abfab..2af5b5a7d7e 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -2,7 +2,6 @@ # Makefile for the Linux ACPI interpreter # -ccflags-y := -Os ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT # diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 11441ad69de..276ea4727ad 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -173,7 +173,7 @@ static void __init acpi_request_region (struct acpi_generic_address *gas, request_mem_region(addr, length, desc); } -static void __init acpi_reserve_resources(void) +static int __init acpi_reserve_resources(void) { acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, "ACPI PM1a_EVT_BLK"); @@ -202,7 +202,10 @@ static void __init acpi_reserve_resources(void) if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) acpi_request_region(&acpi_gbl_FADT.xgpe1_block, acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); + + return 0; } +fs_initcall_sync(acpi_reserve_resources); void acpi_os_printf(const char *fmt, ...) { @@ -1724,7 +1727,6 @@ acpi_status __init acpi_os_initialize(void) acpi_status __init acpi_os_initialize1(void) { - acpi_reserve_resources(); kacpid_wq = alloc_workqueue("kacpid", 0, 1); kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1); diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 288bb270f8e..9954200c32d 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -211,6 +211,7 @@ static int acpi_power_get_list_state(struct list_head *list, int *state) return -EINVAL; /* The state of the list is 'on' IFF all resources are 'on'. */ + cur_state = 0; list_for_each_entry(entry, list, node) { struct acpi_power_resource *resource = entry->resource; acpi_handle handle = resource->device.handle; diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 0dc9ff61d7c..e3ecaf4d64f 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -1263,6 +1263,9 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video) union acpi_object *dod = NULL; union acpi_object *obj; + if (!video->cap._DOD) + return AE_NOT_EXIST; + status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, &buffer); if (!ACPI_SUCCESS(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _DOD")); diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index b256ff5b657..d9f45c821ac 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -4097,6 +4097,9 @@ static int mv_platform_probe(struct platform_device *pdev) host->iomap = NULL; hpriv->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!hpriv->base) + return -ENOMEM; + hpriv->base -= SATAHC0_REG_BASE; #if defined(CONFIG_HAVE_CLK) diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index f72f52b4b1d..65e36873656 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -432,8 +432,11 @@ static int bcma_device_probe(struct device *dev) drv); int err = 0; + get_device(dev); if (adrv->probe) err = adrv->probe(core); + if (err) + put_device(dev); return err; } @@ -446,6 +449,7 @@ static int bcma_device_remove(struct device *dev) if (adrv->remove) adrv->remove(core); + put_device(dev); return 0; } diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 4e9262a1b7c..5643de97bee 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -594,10 +594,12 @@ config TELCLOCK controlling the behavior of this hardware. config DEVPORT - bool - depends on !M68K + bool "/dev/port character device" depends on ISA || PCI default y + help + Say Y here if you want to support the /dev/port device. The /dev/port + device is similar to /dev/mem, but for I/O ports. source "drivers/s390/char/Kconfig" diff --git a/drivers/char/lp.c b/drivers/char/lp.c index 0913d79424d..6b619105dea 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c @@ -857,7 +857,11 @@ static int __init lp_setup (char *str) } else if (!strcmp(str, "auto")) { parport_nr[0] = LP_PARPORT_AUTO; } else if (!strcmp(str, "none")) { - parport_nr[parport_ptr++] = LP_PARPORT_NONE; + if (parport_ptr < LP_NO) + parport_nr[parport_ptr++] = LP_PARPORT_NONE; + else + printk(KERN_INFO "lp: too many ports, %s ignored.\n", + str); } else if (!strcmp(str, "reset")) { reset = 1; } diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index ec3bd62eeaf..d69c63fdae6 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1129,6 +1129,8 @@ static int put_chars(u32 vtermno, const char *buf, int count) { struct port *port; struct scatterlist sg[1]; + void *data; + int ret; if (unlikely(early_put_chars)) return early_put_chars(vtermno, buf, count); @@ -1137,8 +1139,14 @@ static int put_chars(u32 vtermno, const char *buf, int count) if (!port) return -EPIPE; - sg_init_one(sg, buf, count); - return __send_to_port(port, sg, 1, count, (void *)buf, false); + data = kmemdup(buf, count, GFP_ATOMIC); + if (!data) + return -ENOMEM; + + sg_init_one(sg, data, count); + ret = __send_to_port(port, sg, 1, count, data, false); + kfree(data); + return ret; } /* diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c index 917a3ab482f..e2e5e76e980 100644 --- a/drivers/clk/clk-wm831x.c +++ b/drivers/clk/clk-wm831x.c @@ -248,7 +248,7 @@ static int wm831x_clkout_is_enabled(struct clk_hw *hw) if (ret < 0) { dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_1: %d\n", ret); - return true; + return false; } return (ret & WM831X_CLKOUT_ENA) != 0; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 74f7e52a95f..e2ad01bf875 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -472,9 +472,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, char *buf) { unsigned int cur_freq = __cpufreq_get(policy->cpu); - if (!cur_freq) - return sprintf(buf, ""); - return sprintf(buf, "%u\n", cur_freq); + + if (cur_freq) + return sprintf(buf, "%u\n", cur_freq); + + return sprintf(buf, "\n"); } /** diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index bf416a8391a..0cba9273e6c 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -422,7 +422,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead) /* Will read cryptlen */ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); - aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF | + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH); + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); /* Write ICV */ append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index dda43cc4b6c..e9d8b235f68 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1793,6 +1793,7 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template, template->name); snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", template->driver_name); + t_alg->ahash_alg.setkey = NULL; } alg->cra_module = THIS_MODULE; alg->cra_init = caam_hash_cra_init; diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 62834322b33..4e7c97aa9e5 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -120,7 +120,8 @@ static int ast_get_dram_info(struct drm_device *dev) ast_write32(ast, 0x10000, 0xfc600309); do { - ; + if (pci_channel_offline(dev->pdev)) + return -EIO; } while (ast_read32(ast, 0x10000) != 0x01); data = ast_read32(ast, 0x10004); @@ -343,7 +344,9 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags) ast_detect_chip(dev); if (ast->chip != AST1180) { - ast_get_dram_info(dev); + ret = ast_get_dram_info(dev); + if (ret) + goto out_free; ast->vram_size = ast_get_vram_info(dev); DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size); } diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 977cfb35837..d3464f35f42 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -53,13 +53,9 @@ ast_is_vga_enabled(struct drm_device *dev) /* TODO 1180 */ } else { ch = ast_io_read8(ast, 0x43); - if (ch) { - ast_open_key(ast); - ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff); - return ch & 0x04; - } + return !!(ch & 0x01); } - return 0; + return false; } #endif diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 53435a9d847..93c80d7143e 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -428,6 +428,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; struct edid *edid; struct i2c_adapter *i2c; + bool ret = false; BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); @@ -444,17 +445,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) */ if (!is_digital) { DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); - return true; + ret = true; + } else { + DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); } - - DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); } else { DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); } kfree(edid); - return false; + return ret; } static enum drm_connector_status diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 8814b0dbfc4..a7dbdec6899 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -7052,9 +7052,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev, wake_up_all(&dev_priv->pending_flip_queue); - queue_work(dev_priv->wq, &work->work); - trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); + + queue_work(dev_priv->wq, &work->work); } void intel_finish_page_flip(struct drm_device *dev, int pipe) diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c index 973056b8620..b16e051e48f 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/hw.c +++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c @@ -224,6 +224,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) uint32_t mpllP; pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); + mpllP = (mpllP >> 8) & 0xf; if (!mpllP) mpllP = 4; @@ -234,7 +235,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) uint32_t clock; pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); - return clock; + return clock / 1000; } ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals); diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index dd5e01f89f2..969acd36c40 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -1253,7 +1253,7 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, uint32_t size) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - u32 end = max(start + size, (u32)256); + u32 end = min_t(u32, start + size, 256); u32 i; for (i = start; i < end; i++) { diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 0ac0a88860a..f1672f38898 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1866,7 +1866,6 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) struct ttm_buffer_object *bo; int ret = -EBUSY; int put_count; - uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); spin_lock(&glob->lru_lock); list_for_each_entry(bo, &glob->swap_lru, swap) { @@ -1904,7 +1903,8 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) if (unlikely(ret != 0)) goto out; - if ((bo->mem.placement & swap_placement) != swap_placement) { + if (bo->mem.mem_type != TTM_PL_SYSTEM || + bo->ttm->caching_state != tt_cached) { struct ttm_mem_reg evict_mem; evict_mem = bo->mem; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index c509d40c489..17a503ff260 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -69,8 +69,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, break; } default: - DRM_ERROR("Illegal vmwgfx get param request: %d\n", - param->param); return -EINVAL; } @@ -90,7 +88,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, void *bounce; int ret; - if (unlikely(arg->pad64 != 0)) { + if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) { DRM_ERROR("Illegal GET_3D_CAP argument.\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 58281433974..12969378c06 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -677,11 +677,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 128; num_sizes = 0; - for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) + for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { + if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS) + return -EINVAL; num_sizes += req->mip_levels[i]; + } - if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * - DRM_VMW_MAX_MIP_LEVELS) + if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS || + num_sizes == 0) return -EINVAL; size = vmw_user_surface_size + 128 + diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c index c4ef3bc726e..e299576004c 100644 --- a/drivers/hid/hid-cypress.c +++ b/drivers/hid/hid-cypress.c @@ -39,6 +39,9 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc, if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX)) return rdesc; + if (*rsize < 4) + return rdesc; + for (i = 0; i < *rsize - 4; i++) if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) { __u8 tmp; diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index 12fc48c968e..34dbb9d6d85 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c @@ -790,7 +790,7 @@ static const struct hid_device_id lg_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG), .driver_data = LG_FF }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2), - .driver_data = LG_FF2 }, + .driver_data = LG_NOGET | LG_FF2 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940), .driver_data = LG_FF3 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR), diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index ccc2f36bb33..6584a4d6b88 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -326,6 +326,15 @@ static int i2c_hid_hwreset(struct i2c_client *client) if (ret) return ret; + /* + * The HID over I2C specification states that if a DEVICE needs time + * after the PWR_ON request, it should utilise CLOCK stretching. + * However, it has been observered that the Windows driver provides a + * 1ms sleep between the PWR_ON and RESET requests and that some devices + * rely on this. + */ + usleep_range(1000, 5000); + i2c_hid_dbg(ihid, "resetting...\n"); ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0); diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 05e6a7d13d4..50e6ba9548b 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -114,7 +114,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, struct vmbus_channel_msginfo *open_info = NULL; void *in, *out; unsigned long flags; - int ret, t, err = 0; + int ret, err = 0; newchannel->onchannel_callback = onchannelcallback; newchannel->channel_callback_context = context; @@ -204,11 +204,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, goto error1; } - t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ); - if (t == 0) { - err = -ETIMEDOUT; - goto error1; - } + wait_for_completion(&open_info->waitevent); if (open_info->response.open_result.status) @@ -391,7 +387,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, struct vmbus_channel_gpadl_header *gpadlmsg; struct vmbus_channel_gpadl_body *gpadl_body; struct vmbus_channel_msginfo *msginfo = NULL; - struct vmbus_channel_msginfo *submsginfo; + struct vmbus_channel_msginfo *submsginfo, *tmp; u32 msgcount; struct list_head *curr; u32 next_gpadl_handle; @@ -453,6 +449,13 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, list_del(&msginfo->msglistentry); spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); + if (msgcount > 1) { + list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist, + msglistentry) { + kfree(submsginfo); + } + } + kfree(msginfo); return ret; } diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index ae4923756d9..4e4cb3db323 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -154,7 +154,7 @@ int hv_init(void) /* See if the hypercall page is already set */ rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); - virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC); + virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX); if (!virtaddr) goto cleanup; @@ -193,7 +193,7 @@ int hv_init(void) * * This routine is called normally during driver unloading or exiting. */ -void hv_cleanup(void) +void hv_cleanup(bool crash) { union hv_x64_msr_hypercall_contents hypercall_msr; @@ -203,7 +203,8 @@ void hv_cleanup(void) if (hv_context.hypercall_page) { hypercall_msr.as_uint64 = 0; wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); - vfree(hv_context.hypercall_page); + if (!crash) + vfree(hv_context.hypercall_page); hv_context.hypercall_page = NULL; } } diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index 694173f662d..d285165435d 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -673,7 +673,7 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) * If the pfn range we are dealing with is not in the current * "hot add block", move on. */ - if ((start_pfn >= has->end_pfn)) + if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) continue; /* * If the current hot add-request extends beyond @@ -728,7 +728,7 @@ static unsigned long handle_pg_range(unsigned long pg_start, * If the pfn range we are dealing with is not in the current * "hot add block", move on. */ - if ((start_pfn >= has->end_pfn)) + if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) continue; old_covered_state = has->covered_end_pfn; diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 12f2f9e989f..11d4e6222f5 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -519,7 +519,7 @@ extern struct hv_context hv_context; extern int hv_init(void); -extern void hv_cleanup(void); +extern void hv_cleanup(bool crash); extern int hv_post_message(union hv_connection_id connection_id, enum hv_message_type message_type, diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 80754e2d808..3190a1fc7bc 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -618,7 +618,7 @@ static int vmbus_bus_init(int irq) bus_unregister(&hv_bus); err_cleanup: - hv_cleanup(); + hv_cleanup(false); return ret; } @@ -841,7 +841,7 @@ static void __exit vmbus_exit(void) free_irq(irq, hv_acpi_dev); vmbus_free_channels(); bus_unregister(&hv_bus); - hv_cleanup(); + hv_cleanup(false); acpi_bus_unregister_driver(&vmbus_acpi_driver); hv_cpu_hotplug_quirk(false); } diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c index f1d6b422cf0..c25700f7db9 100644 --- a/drivers/hwmon/ds620.c +++ b/drivers/hwmon/ds620.c @@ -166,7 +166,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da, if (res) return res; - val = (val * 10 / 625) * 8; + val = (clamp_val(val, -128000, 128000) * 10 / 625) * 8; mutex_lock(&data->update_lock); data->temp[attr->index] = val; diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index c880d13f540..f079877bd4e 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c @@ -273,8 +273,14 @@ static void at91_twi_write_data_dma(struct at91_twi_dev *dev) static void at91_twi_read_next_byte(struct at91_twi_dev *dev) { - if (dev->buf_len <= 0) + /* + * If we are in this case, it means there is garbage data in RHR, so + * delete them. + */ + if (!dev->buf_len) { + at91_twi_read(dev, AT91_TWI_RHR); return; + } *dev->buf = at91_twi_read(dev, AT91_TWI_RHR) & 0xff; --dev->buf_len; @@ -371,6 +377,24 @@ static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id) if (!irqstatus) return IRQ_NONE; + /* + * In reception, the behavior of the twi device (before sama5d2) is + * weird. There is some magic about RXRDY flag! When a data has been + * almost received, the reception of a new one is anticipated if there + * is no stop command to send. That is the reason why ask for sending + * the stop command not on the last data but on the second last one. + * + * Unfortunately, we could still have the RXRDY flag set even if the + * transfer is done and we have read the last data. It might happen + * when the i2c slave device sends too quickly data after receiving the + * ack from the master. The data has been almost received before having + * the order to send stop. In this case, sending the stop command could + * cause a RXRDY interrupt with a TXCOMP one. It is better to manage + * the RXRDY interrupt first in order to not keep garbage data in the + * Receive Holding Register for the next transfer. + */ + if (irqstatus & AT91_TWI_RXRDY) + at91_twi_read_next_byte(dev); /* * When a NACK condition is detected, the I2C controller sets the NACK, @@ -413,8 +437,6 @@ static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id) if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) { at91_disable_twi_interrupts(dev); complete(&dev->cmd_complete); - } else if (irqstatus & AT91_TWI_RXRDY) { - at91_twi_read_next_byte(dev); } else if (irqstatus & AT91_TWI_TXRDY) { at91_twi_write_next_byte(dev); } @@ -429,7 +451,6 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) { int ret; bool has_unre_flag = dev->pdata->has_unre_flag; - unsigned sr; /* * WARNING: the TXCOMP bit in the Status Register is NOT a clear on @@ -466,7 +487,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) dev->transfer_status = 0; /* Clear pending interrupts, such as NACK. */ - sr = at91_twi_read(dev, AT91_TWI_SR); + at91_twi_read(dev, AT91_TWI_SR); if (!dev->buf_len) { at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK); @@ -474,11 +495,6 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) } else if (dev->msg->flags & I2C_M_RD) { unsigned start_flags = AT91_TWI_START; - if (sr & AT91_TWI_RXRDY) { - dev_err(dev->dev, "RXRDY still set!"); - at91_twi_read(dev, AT91_TWI_RHR); - } - /* if only one byte is to be read, immediately stop transfer */ if (dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN)) start_flags |= AT91_TWI_STOP; diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index c3ccdea3d18..fa3ecec524f 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -328,7 +328,7 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client, unsigned long arg) { struct i2c_smbus_ioctl_data data_arg; - union i2c_smbus_data temp; + union i2c_smbus_data temp = {}; int datasize, res; if (copy_from_user(&data_arg, diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 71c2c711680..818cac9bbd8 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2772,6 +2772,9 @@ static int cma_accept_iw(struct rdma_id_private *id_priv, struct iw_cm_conn_param iw_param; int ret; + if (!conn_param) + return -EINVAL; + ret = cma_modify_qp_rtr(id_priv, conn_param); if (ret) return ret; diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index dc3fd1e8af0..200f6c10eee 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -1598,7 +1598,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv, if (!class) goto out; if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >= - IB_MGMT_MAX_METHODS) + ARRAY_SIZE(class->method_table)) goto out; method = class->method_table[convert_mgmt_class( mad->mad_hdr.mgmt_class)]; diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index 180d7f436ed..2f861b59cbc 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c @@ -516,8 +516,11 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec, if (status) process_join_error(group, status); else { - ib_find_pkey(group->port->dev->device, group->port->port_num, - be16_to_cpu(rec->pkey), &pkey_index); + + if (ib_find_pkey(group->port->dev->device, + group->port->port_num, be16_to_cpu(rec->pkey), + &pkey_index)) + pkey_index = MCAST_INVALID_PKEY_INDEX; spin_lock_irq(&group->port->lock); group->rec = *rec; diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index f55d69500a5..3a85e766906 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c @@ -118,7 +118,9 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support)) --ah->av.eth.stat_rate; } - + ah->av.eth.sl_tclass_flowlabel |= + cpu_to_be32((ah_attr->grh.traffic_class << 20) | + ah_attr->grh.flow_label); /* * HW requires multicast LID so we just choose one. */ @@ -126,7 +128,7 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr ah->av.ib.dlid = cpu_to_be16(0xc000); memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16); - ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29); + ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(ah_attr->sl << 29); return &ah->ibah; } diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 23d734349d8..6b810b12433 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -312,9 +312,11 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, if (err) goto out; - props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ? - IB_WIDTH_4X : IB_WIDTH_1X; - props->active_speed = IB_SPEED_QDR; + props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) || + (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ? + IB_WIDTH_4X : IB_WIDTH_1X; + props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ? + IB_SPEED_FDR : IB_SPEED_QDR; props->port_cap_flags = IB_PORT_CM_SUP; props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; props->max_msg_sz = mdev->dev->caps.max_msg_sz; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index aa9ad2d70dd..c781c7c633f 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1482,12 +1482,14 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr, ret = ipoib_set_mode(dev, buf); - rtnl_unlock(); - - if (!ret) - return count; + /* The assumption is that the function ipoib_set_mode returned + * with the rtnl held by it, if not the value -EBUSY returned, + * then no need to rtnl_unlock + */ + if (ret != -EBUSY) + rtnl_unlock(); - return ret; + return (!ret || ret == -EBUSY) ? count : ret; } static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 375f9edd402..b022d710810 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -234,8 +234,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf) priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; ipoib_flush_paths(dev); - rtnl_lock(); - return 0; + return (!rtnl_trylock()) ? -EBUSY : 0; } if (!strcmp(buf, "datagram\n")) { @@ -244,8 +243,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf) dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); rtnl_unlock(); ipoib_flush_paths(dev); - rtnl_lock(); - return 0; + return (!rtnl_trylock()) ? -EBUSY : 0; } return -EINVAL; diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index f362883c94e..3736c175952 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -188,6 +188,17 @@ static void joydev_detach_client(struct joydev *joydev, synchronize_rcu(); } +static void joydev_refresh_state(struct joydev *joydev) +{ + struct input_dev *dev = joydev->handle.dev; + int i, val; + + for (i = 0; i < joydev->nabs; i++) { + val = input_abs_get_val(dev, joydev->abspam[i]); + joydev->abs[i] = joydev_correct(val, &joydev->corr[i]); + } +} + static int joydev_open_device(struct joydev *joydev) { int retval; @@ -202,6 +213,8 @@ static int joydev_open_device(struct joydev *joydev) retval = input_open_device(&joydev->handle); if (retval) joydev->open--; + else + joydev_refresh_state(joydev); } mutex_unlock(&joydev->mutex); @@ -823,7 +836,6 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev, j = joydev->abspam[i]; if (input_abs_get_max(dev, j) == input_abs_get_min(dev, j)) { joydev->corr[i].type = JS_CORR_NONE; - joydev->abs[i] = input_abs_get_val(dev, j); continue; } joydev->corr[i].type = JS_CORR_BROKEN; @@ -838,10 +850,6 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev, if (t) { joydev->corr[i].coef[2] = (1 << 29) / t; joydev->corr[i].coef[3] = (1 << 29) / t; - - joydev->abs[i] = - joydev_correct(input_abs_get_val(dev, j), - joydev->corr + i); } } diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c index d96aa27dfcd..db64adfbe1a 100644 --- a/drivers/input/joystick/iforce/iforce-usb.c +++ b/drivers/input/joystick/iforce/iforce-usb.c @@ -141,6 +141,9 @@ static int iforce_usb_probe(struct usb_interface *intf, interface = intf->cur_altsetting; + if (interface->desc.bNumEndpoints < 2) + return -ENODEV; + epirq = &interface->endpoint[0].desc; epout = &interface->endpoint[1].desc; diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 856c1b03e22..24e5683d6c9 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -843,6 +843,9 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id struct usb_endpoint_descriptor *ep_irq_in; int i, error; + if (intf->cur_altsetting->desc.bNumEndpoints != 2) + return -ENODEV; + for (i = 0; xpad_device[i].idVendor; i++) { if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) && (le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct)) @@ -898,6 +901,12 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id input_dev->name = xpad_device[i].name; input_dev->phys = xpad->phys; usb_to_input_id(udev, &input_dev->id); + + if (xpad->xtype == XTYPE_XBOX360W) { + /* x360w controllers and the receiver have different ids */ + input_dev->id.product = 0x02a1; + } + input_dev->dev.parent = &intf->dev; input_set_drvdata(input_dev, xpad); diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c index f7f3e9a9fd3..e13713b7658 100644 --- a/drivers/input/keyboard/mpr121_touchkey.c +++ b/drivers/input/keyboard/mpr121_touchkey.c @@ -88,7 +88,8 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id) struct mpr121_touchkey *mpr121 = dev_id; struct i2c_client *client = mpr121->client; struct input_dev *input = mpr121->input_dev; - unsigned int key_num, key_val, pressed; + unsigned long bit_changed; + unsigned int key_num; int reg; reg = i2c_smbus_read_byte_data(client, ELE_TOUCH_STATUS_1_ADDR); @@ -106,18 +107,22 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id) reg &= TOUCH_STATUS_MASK; /* use old press bit to figure out which bit changed */ - key_num = ffs(reg ^ mpr121->statusbits) - 1; - pressed = reg & (1 << key_num); + bit_changed = reg ^ mpr121->statusbits; mpr121->statusbits = reg; + for_each_set_bit(key_num, &bit_changed, mpr121->keycount) { + unsigned int key_val, pressed; - key_val = mpr121->keycodes[key_num]; + pressed = reg & BIT(key_num); + key_val = mpr121->keycodes[key_num]; - input_event(input, EV_MSC, MSC_SCAN, key_num); - input_report_key(input, key_val, pressed); - input_sync(input); + input_event(input, EV_MSC, MSC_SCAN, key_num); + input_report_key(input, key_val, pressed); + + dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val, + pressed ? "pressed" : "released"); - dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val, - pressed ? "pressed" : "released"); + } + input_sync(input); out: return IRQ_HANDLED; @@ -230,6 +235,7 @@ static int mpr_touchkey_probe(struct i2c_client *client, input_dev->id.bustype = BUS_I2C; input_dev->dev.parent = &client->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); + input_set_capability(input_dev, EV_MSC, MSC_SCAN); input_dev->keycode = mpr121->keycodes; input_dev->keycodesize = sizeof(mpr121->keycodes[0]); diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c index 55c15304ddb..92c742420e2 100644 --- a/drivers/input/keyboard/tca8418_keypad.c +++ b/drivers/input/keyboard/tca8418_keypad.c @@ -274,6 +274,7 @@ static int tca8418_keypad_probe(struct i2c_client *client, bool irq_is_gpio = false; int irq; int error, row_shift, max_keys; + unsigned long trigger = 0; /* Copy the platform data */ if (pdata) { @@ -286,6 +287,7 @@ static int tca8418_keypad_probe(struct i2c_client *client, cols = pdata->cols; rep = pdata->rep; irq_is_gpio = pdata->irq_is_gpio; + trigger = IRQF_TRIGGER_FALLING; } else { struct device_node *np = dev->of_node; int err; @@ -360,9 +362,7 @@ static int tca8418_keypad_probe(struct i2c_client *client, irq = gpio_to_irq(irq); error = devm_request_threaded_irq(dev, irq, NULL, tca8418_irq_handler, - IRQF_TRIGGER_FALLING | - IRQF_SHARED | - IRQF_ONESHOT, + trigger | IRQF_SHARED | IRQF_ONESHOT, client->name, keypad_data); if (error) { dev_err(dev, "Unable to claim irq %d; error %d\n", diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c index 082684e7f39..d6a35a71385 100644 --- a/drivers/input/misc/cm109.c +++ b/drivers/input/misc/cm109.c @@ -669,6 +669,10 @@ static int cm109_usb_probe(struct usb_interface *intf, int error = -ENOMEM; interface = intf->cur_altsetting; + + if (interface->desc.bNumEndpoints < 1) + return -ENODEV; + endpoint = &interface->endpoint[0].desc; if (!usb_endpoint_is_int_in(endpoint)) diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index 77164dc1bed..8fb814ccfd7 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c @@ -1437,6 +1437,10 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc return -EINVAL; alt = pcu->ctrl_intf->cur_altsetting; + + if (alt->desc.bNumEndpoints < 1) + return -ENODEV; + pcu->ep_ctrl = &alt->endpoint[0].desc; pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl); diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c index 285a5bd6cbc..3b6fdb389a2 100644 --- a/drivers/input/misc/yealink.c +++ b/drivers/input/misc/yealink.c @@ -876,6 +876,10 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id) int ret, pipe, i; interface = intf->cur_altsetting; + + if (interface->desc.bNumEndpoints < 1) + return -ENODEV; + endpoint = &interface->endpoint[0].desc; if (!usb_endpoint_is_int_in(endpoint)) return -ENODEV; diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 5102b4f68f1..566ced8b3bb 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -119,6 +119,13 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"), }, }, + { + /* Dell Embedded Box PC 3000 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"), + }, + }, { /* OQO Model 01 */ .matches = { @@ -211,6 +218,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"), }, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"), + DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), + }, + }, { } }; @@ -574,6 +587,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "20046"), }, }, + { + /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), + DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"), + }, + }, { } }; diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c index 5cc04124995..263c85e72e1 100644 --- a/drivers/input/tablet/hanwang.c +++ b/drivers/input/tablet/hanwang.c @@ -341,6 +341,9 @@ static int hanwang_probe(struct usb_interface *intf, const struct usb_device_id int error; int i; + if (intf->cur_altsetting->desc.bNumEndpoints < 1) + return -ENODEV; + hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL); input_dev = input_allocate_device(); if (!hanwang || !input_dev) { diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c index 3fba74b9b60..f0d532684af 100644 --- a/drivers/input/tablet/kbtab.c +++ b/drivers/input/tablet/kbtab.c @@ -123,6 +123,9 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i struct input_dev *input_dev; int error = -ENOMEM; + if (intf->cur_altsetting->desc.bNumEndpoints < 1) + return -ENODEV; + kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL); input_dev = input_allocate_device(); if (!kbtab || !input_dev) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 1c62c248da6..0e7cd14bf7b 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1029,7 +1029,7 @@ static int iommu_queue_command_sync(struct amd_iommu *iommu, next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; left = (head - next_tail) % iommu->cmd_buf_size; - if (left <= 2) { + if (left <= 0x20) { struct iommu_cmd sync_cmd; volatile u64 sem = 0; int ret; diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index c44950d3eb7..6d4d9c1c2cf 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c @@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface, return -ENODEV; } + if (hostif->desc.bNumEndpoints < 1) + return -ENODEV; + dev_info(&udev->dev, "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n", __func__, le16_to_cpu(udev->descriptor.idVendor), diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c index 3ac9c419481..53dfe1693e5 100644 --- a/drivers/isdn/gigaset/ser-gigaset.c +++ b/drivers/isdn/gigaset/ser-gigaset.c @@ -787,8 +787,10 @@ static int __init ser_gigaset_init(void) driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, GIGASET_MODULENAME, GIGASET_DEVNAME, &ops, THIS_MODULE); - if (!driver) + if (!driver) { + rc = -ENOMEM; goto error; + } rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc); if (rc != 0) { diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c index a82e542ffc2..fecbf1d2f60 100644 --- a/drivers/isdn/hardware/eicon/message.c +++ b/drivers/isdn/hardware/eicon/message.c @@ -11304,7 +11304,8 @@ static void mixer_notify_update(PLCI *plci, byte others) ((CAPI_MSG *) msg)->header.ncci = 0; ((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT; ((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3; - PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE); + ((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff; + ((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8; ((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0; w = api_put(notify_plci->appl, (CAPI_MSG *) msg); if (w != _QUEUE_FULL) diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index f93fa95c518..e73e8f7e7c2 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1311,12 +1311,15 @@ static int crypt_set_key(struct crypt_config *cc, char *key) if (!cc->key_size && strcmp(key, "-")) goto out; + /* clear the flag since following operations may invalidate previously valid key */ + clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); + if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0) goto out; - set_bit(DM_CRYPT_KEY_VALID, &cc->flags); - r = crypt_setkey_allcpus(cc); + if (!r) + set_bit(DM_CRYPT_KEY_VALID, &cc->flags); out: /* Hex key string not needed after here, so wipe it. */ diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ef032265945..97f68081fc7 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -976,11 +976,62 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) } EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); +/* + * Flush current->bio_list when the target map method blocks. + * This fixes deadlocks in snapshot and possibly in other targets. + */ +struct dm_offload { + struct blk_plug plug; + struct blk_plug_cb cb; +}; + +static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule) +{ + struct dm_offload *o = container_of(cb, struct dm_offload, cb); + struct bio_list list; + struct bio *bio; + + INIT_LIST_HEAD(&o->cb.list); + + if (unlikely(!current->bio_list)) + return; + + list = *current->bio_list; + bio_list_init(current->bio_list); + + while ((bio = bio_list_pop(&list))) { + struct bio_set *bs = bio->bi_pool; + if (unlikely(!bs) || bs == fs_bio_set) { + bio_list_add(current->bio_list, bio); + continue; + } + + spin_lock(&bs->rescue_lock); + bio_list_add(&bs->rescue_list, bio); + queue_work(bs->rescue_workqueue, &bs->rescue_work); + spin_unlock(&bs->rescue_lock); + } +} + +static void dm_offload_start(struct dm_offload *o) +{ + blk_start_plug(&o->plug); + o->cb.callback = flush_current_bio_list; + list_add(&o->cb.list, ¤t->plug->cb_list); +} + +static void dm_offload_end(struct dm_offload *o) +{ + list_del(&o->cb.list); + blk_finish_plug(&o->plug); +} + static void __map_bio(struct dm_target_io *tio) { int r; sector_t sector; struct mapped_device *md; + struct dm_offload o; struct bio *clone = &tio->clone; struct dm_target *ti = tio->ti; @@ -994,7 +1045,11 @@ static void __map_bio(struct dm_target_io *tio) */ atomic_inc(&tio->io->io_count); sector = clone->bi_sector; + + dm_offload_start(&o); r = ti->type->map(ti, clone); + dm_offload_end(&o); + if (r == DM_MAPIO_REMAPPED) { /* the bio has been remapped so dispatch it */ diff --git a/drivers/md/linear.c b/drivers/md/linear.c index f03fabd2b37..f169afac026 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -97,6 +97,12 @@ static int linear_mergeable_bvec(struct request_queue *q, return maxsectors << 9; } +/* + * In linear_congested() conf->raid_disks is used as a copy of + * mddev->raid_disks to iterate conf->disks[], because conf->raid_disks + * and conf->disks[] are created in linear_conf(), they are always + * consitent with each other, but mddev->raid_disks does not. + */ static int linear_congested(void *data, int bits) { struct mddev *mddev = data; @@ -109,7 +115,7 @@ static int linear_congested(void *data, int bits) rcu_read_lock(); conf = rcu_dereference(mddev->private); - for (i = 0; i < mddev->raid_disks && !ret ; i++) { + for (i = 0; i < conf->raid_disks && !ret ; i++) { struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); ret |= bdi_congested(&q->backing_dev_info, bits); } @@ -196,6 +202,19 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) conf->disks[i-1].end_sector + conf->disks[i].rdev->sectors; + /* + * conf->raid_disks is copy of mddev->raid_disks. The reason to + * keep a copy of mddev->raid_disks in struct linear_conf is, + * mddev->raid_disks may not be consistent with pointers number of + * conf->disks[] when it is updated in linear_add() and used to + * iterate old conf->disks[] earray in linear_congested(). + * Here conf->raid_disks is always consitent with number of + * pointers in conf->disks[] array, and mddev->private is updated + * with rcu_assign_pointer() in linear_addr(), such race can be + * avoided. + */ + conf->raid_disks = raid_disks; + return conf; out: @@ -252,10 +271,18 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev) if (!newconf) return -ENOMEM; + /* newconf->raid_disks already keeps a copy of * the increased + * value of mddev->raid_disks, WARN_ONCE() is just used to make + * sure of this. It is possible that oldconf is still referenced + * in linear_congested(), therefore kfree_rcu() is used to free + * oldconf until no one uses it anymore. + */ oldconf = rcu_dereference_protected(mddev->private, lockdep_is_held( &mddev->reconfig_mutex)); mddev->raid_disks++; + WARN_ONCE(mddev->raid_disks != newconf->raid_disks, + "copied raid_disks doesn't match mddev->raid_disks"); rcu_assign_pointer(mddev->private, newconf); md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); set_capacity(mddev->gendisk, mddev->array_sectors); diff --git a/drivers/md/linear.h b/drivers/md/linear.h index b685ddd7d7f..8d392e6098b 100644 --- a/drivers/md/linear.h +++ b/drivers/md/linear.h @@ -10,6 +10,7 @@ struct linear_conf { struct rcu_head rcu; sector_t array_sectors; + int raid_disks; /* a copy of mddev->raid_disks */ struct dev_info disks[0]; }; #endif diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c index 056d09c33af..c79d6480fbe 100644 --- a/drivers/md/persistent-data/dm-space-map-metadata.c +++ b/drivers/md/persistent-data/dm-space-map-metadata.c @@ -679,15 +679,13 @@ int dm_sm_metadata_create(struct dm_space_map *sm, memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); r = sm_ll_new_metadata(&smm->ll, tm); + if (!r) { + r = sm_ll_extend(&smm->ll, nr_blocks); + } + memcpy(&smm->sm, &ops, sizeof(smm->sm)); if (r) return r; - r = sm_ll_extend(&smm->ll, nr_blocks); - if (r) - return r; - - memcpy(&smm->sm, &ops, sizeof(smm->sm)); - /* * Now we need to update the newly created data structures with the * allocated blocks that they were built from. diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 63d42ae56a1..a8315aaba9f 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -560,7 +560,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect if (best_dist_disk < 0) { if (is_badblock(rdev, this_sector, sectors, &first_bad, &bad_sectors)) { - if (first_bad < this_sector) + if (first_bad <= this_sector) /* Cannot use this */ continue; best_good_sectors = first_bad - this_sector; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 73f51a62c14..c0d6d9c7a26 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5616,6 +5616,15 @@ static int run(struct mddev *mddev) stripe = (stripe | (stripe-1)) + 1; mddev->queue->limits.discard_alignment = stripe; mddev->queue->limits.discard_granularity = stripe; + + /* + * We use 16-bit counter of active stripes in bi_phys_segments + * (minus one for over-loaded initialization) + */ + blk_queue_max_hw_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS); + blk_queue_max_discard_sectors(mddev->queue, + 0xfffe * STRIPE_SECTORS); + /* * unaligned part of discard request will be ignored, so can't * guarantee discard_zeroes_data diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index f981d50a2a8..936ef2f881f 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -549,6 +549,7 @@ config VIDEO_S5K6AA config VIDEO_S5K4ECGX tristate "Samsung S5K4ECGX sensor support" depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API + select CRC32 ---help--- This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M camera sensor with an embedded SoC image signal processor. diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c index 63b42252166..7a754ec826a 100644 --- a/drivers/media/rc/ite-cir.c +++ b/drivers/media/rc/ite-cir.c @@ -263,6 +263,8 @@ static void ite_set_carrier_params(struct ite_dev *dev) if (allowance > ITE_RXDCR_MAX) allowance = ITE_RXDCR_MAX; + + use_demodulator = true; } } diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c index 9771cd83c06..9a9b6c72422 100644 --- a/drivers/media/tuners/tuner-xc2028.c +++ b/drivers/media/tuners/tuner-xc2028.c @@ -289,6 +289,14 @@ static void free_firmware(struct xc2028_data *priv) int i; tuner_dbg("%s called\n", __func__); + /* free allocated f/w string */ + if (priv->fname != firmware_name) + kfree(priv->fname); + priv->fname = NULL; + + priv->state = XC2028_NO_FIRMWARE; + memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); + if (!priv->firm) return; @@ -299,9 +307,6 @@ static void free_firmware(struct xc2028_data *priv) priv->firm = NULL; priv->firm_size = 0; - priv->state = XC2028_NO_FIRMWARE; - - memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); } static int load_all_firmwares(struct dvb_frontend *fe, @@ -890,9 +895,9 @@ static int check_firmware(struct dvb_frontend *fe, unsigned int type, return 0; fail: + free_firmware(priv); priv->state = XC2028_SLEEP; - memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); if (retry_count < 8) { msleep(50); retry_count++; @@ -1314,11 +1319,8 @@ static int xc2028_dvb_release(struct dvb_frontend *fe) mutex_lock(&xc2028_list_mutex); /* only perform final cleanup if this is the last instance */ - if (hybrid_tuner_report_instance_count(priv) == 1) { + if (hybrid_tuner_report_instance_count(priv) == 1) free_firmware(priv); - kfree(priv->ctrl.fname); - priv->ctrl.fname = NULL; - } if (priv) hybrid_tuner_release_state(priv); @@ -1381,8 +1383,6 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) /* * Copy the config data. - * For the firmware name, keep a local copy of the string, - * in order to avoid troubles during device release. */ kfree(priv->ctrl.fname); memcpy(&priv->ctrl, p, sizeof(priv->ctrl)); @@ -1391,7 +1391,6 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) if (priv->ctrl.fname == NULL) rc = -ENOMEM; } - /* * If firmware name changed, frees firmware. As free_firmware will * reset the status to NO_FIRMWARE, this forces a new request_firmware @@ -1405,10 +1404,15 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) if (priv->state == XC2028_NO_FIRMWARE) { if (!firmware_name[0]) - priv->fname = priv->ctrl.fname; + priv->fname = kstrdup(p->fname, GFP_KERNEL); else priv->fname = firmware_name; + if (!priv->fname) { + rc = -ENOMEM; + goto unlock; + } + rc = request_firmware_nowait(THIS_MODULE, 1, priv->fname, priv->i2c_props.adap->dev.parent, @@ -1421,6 +1425,7 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) } else priv->state = XC2028_WAITING_FIRMWARE; } +unlock: mutex_unlock(&priv->lock); return rc; diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c index 03761c6f472..8e7c7856713 100644 --- a/drivers/media/usb/siano/smsusb.c +++ b/drivers/media/usb/siano/smsusb.c @@ -206,20 +206,28 @@ static int smsusb_start_streaming(struct smsusb_device_t *dev) static int smsusb_sendrequest(void *context, void *buffer, size_t size) { struct smsusb_device_t *dev = (struct smsusb_device_t *) context; - struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer; - int dummy; + struct sms_msg_hdr *phdr; + int dummy, ret; if (dev->state != SMSUSB_ACTIVE) return -ENOENT; + phdr = kmalloc(size, GFP_KERNEL); + if (!phdr) + return -ENOMEM; + memcpy(phdr, buffer, size); + sms_debug("sending %s(%d) size: %d", smscore_translate_msg(phdr->msg_type), phdr->msg_type, phdr->msg_length); smsendian_handle_tx_message((struct sms_msg_data *) phdr); - smsendian_handle_message_header((struct sms_msg_hdr *)buffer); - return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2), - buffer, size, &dummy, 1000); + smsendian_handle_message_header((struct sms_msg_hdr *)phdr); + ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2), + phdr, size, &dummy, 1000); + + kfree(phdr); + return ret; } static char *smsusb1_fw_lkup[] = { diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index 363cdbf4ac8..5422093d135 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -1533,6 +1533,114 @@ static const char *uvc_print_chain(struct uvc_video_chain *chain) return buffer; } +static struct uvc_video_chain *uvc_alloc_chain(struct uvc_device *dev) +{ + struct uvc_video_chain *chain; + + chain = kzalloc(sizeof(*chain), GFP_KERNEL); + if (chain == NULL) + return NULL; + + INIT_LIST_HEAD(&chain->entities); + mutex_init(&chain->ctrl_mutex); + chain->dev = dev; + v4l2_prio_init(&chain->prio); + + return chain; +} + +/* + * Fallback heuristic for devices that don't connect units and terminals in a + * valid chain. + * + * Some devices have invalid baSourceID references, causing uvc_scan_chain() + * to fail, but if we just take the entities we can find and put them together + * in the most sensible chain we can think of, turns out they do work anyway. + * Note: This heuristic assumes there is a single chain. + * + * At the time of writing, devices known to have such a broken chain are + * - Acer Integrated Camera (5986:055a) + * - Realtek rtl157a7 (0bda:57a7) + */ +static int uvc_scan_fallback(struct uvc_device *dev) +{ + struct uvc_video_chain *chain; + struct uvc_entity *iterm = NULL; + struct uvc_entity *oterm = NULL; + struct uvc_entity *entity; + struct uvc_entity *prev; + + /* + * Start by locating the input and output terminals. We only support + * devices with exactly one of each for now. + */ + list_for_each_entry(entity, &dev->entities, list) { + if (UVC_ENTITY_IS_ITERM(entity)) { + if (iterm) + return -EINVAL; + iterm = entity; + } + + if (UVC_ENTITY_IS_OTERM(entity)) { + if (oterm) + return -EINVAL; + oterm = entity; + } + } + + if (iterm == NULL || oterm == NULL) + return -EINVAL; + + /* Allocate the chain and fill it. */ + chain = uvc_alloc_chain(dev); + if (chain == NULL) + return -ENOMEM; + + if (uvc_scan_chain_entity(chain, oterm) < 0) + goto error; + + prev = oterm; + + /* + * Add all Processing and Extension Units with two pads. The order + * doesn't matter much, use reverse list traversal to connect units in + * UVC descriptor order as we build the chain from output to input. This + * leads to units appearing in the order meant by the manufacturer for + * the cameras known to require this heuristic. + */ + list_for_each_entry_reverse(entity, &dev->entities, list) { + if (entity->type != UVC_VC_PROCESSING_UNIT && + entity->type != UVC_VC_EXTENSION_UNIT) + continue; + + if (entity->num_pads != 2) + continue; + + if (uvc_scan_chain_entity(chain, entity) < 0) + goto error; + + prev->baSourceID[0] = entity->id; + prev = entity; + } + + if (uvc_scan_chain_entity(chain, iterm) < 0) + goto error; + + prev->baSourceID[0] = iterm->id; + + list_add_tail(&chain->list, &dev->chains); + + uvc_trace(UVC_TRACE_PROBE, + "Found a video chain by fallback heuristic (%s).\n", + uvc_print_chain(chain)); + + return 0; + +error: + kfree(chain); + return -EINVAL; +} + /* * Scan the device for video chains and register video devices. * @@ -1555,15 +1663,10 @@ static int uvc_scan_device(struct uvc_device *dev) if (term->chain.next || term->chain.prev) continue; - chain = kzalloc(sizeof(*chain), GFP_KERNEL); + chain = uvc_alloc_chain(dev); if (chain == NULL) return -ENOMEM; - INIT_LIST_HEAD(&chain->entities); - mutex_init(&chain->ctrl_mutex); - chain->dev = dev; - v4l2_prio_init(&chain->prio); - term->flags |= UVC_ENTITY_FLAG_DEFAULT; if (uvc_scan_chain(chain, term) < 0) { @@ -1577,6 +1680,9 @@ static int uvc_scan_device(struct uvc_device *dev) list_add_tail(&chain->list, &dev->chains); } + if (list_empty(&dev->chains)) + uvc_scan_fallback(dev); + if (list_empty(&dev->chains)) { uvc_printk(KERN_INFO, "No valid video chain found.\n"); return -1; diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c index cd962be860c..7e743958dbc 100644 --- a/drivers/media/usb/uvc/uvc_queue.c +++ b/drivers/media/usb/uvc/uvc_queue.c @@ -375,7 +375,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, nextbuf = NULL; spin_unlock_irqrestore(&queue->irqlock, flags); - buf->state = buf->error ? VB2_BUF_STATE_ERROR : UVC_BUF_STATE_DONE; + buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE; vb2_set_plane_payload(&buf->buf, 0, buf->bytesused); vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE); diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index 96e3dc0aa19..31cd589e07a 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -795,7 +795,7 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test, struct mmc_async_req *cur_areq = &test_areq[0].areq; struct mmc_async_req *other_areq = &test_areq[1].areq; int i; - int ret; + int ret = RESULT_OK; test_areq[0].test = test; test_areq[1].test = test; diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index f3a42321310..01951cd6599 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -312,6 +312,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host) cmd0 = BF_SSP(cmd->opcode, CMD0_CMD); cmd1 = cmd->arg; + if (cmd->opcode == MMC_STOP_TRANSMISSION) + cmd0 |= BM_SSP_CMD0_APPEND_8CYC; + if (host->sdio_irq_en) { ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; @@ -420,8 +423,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) ssp->base + HW_SSP_BLOCK_SIZE); } - if ((cmd->opcode == MMC_STOP_TRANSMISSION) || - (cmd->opcode == SD_IO_RW_EXTENDED)) + if (cmd->opcode == SD_IO_RW_EXTENDED) cmd0 |= BM_SSP_CMD0_APPEND_8CYC; cmd1 = cmd->arg; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 18ce68c098f..66f81d264bf 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1437,7 +1437,9 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) goto ret; } timeout--; - udelay(1); + spin_unlock_irq(&host->lock); + usleep_range(900, 1100); + spin_lock_irq(&host->lock); } clk |= SDHCI_CLOCK_CARD_EN; diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c index c0105a2e269..d5493a5a7e7 100644 --- a/drivers/mmc/host/ushc.c +++ b/drivers/mmc/host/ushc.c @@ -426,6 +426,9 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id struct ushc_data *ushc; int ret; + if (intf->cur_altsetting->desc.bNumEndpoints < 1) + return -ENODEV; + mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev); if (mmc == NULL) return -ENOMEM; diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c index 9279a9174f8..04e2e430889 100644 --- a/drivers/mtd/bcm47xxpart.c +++ b/drivers/mtd/bcm47xxpart.c @@ -159,12 +159,10 @@ static int bcm47xxpart_parse(struct mtd_info *master, last_trx_part = curr_part - 1; - /* - * We have whole TRX scanned, skip to the next part. Use - * roundown (not roundup), as the loop will increase - * offset in next step. - */ - offset = rounddown(offset + trx->length, blocksize); + /* Jump to the end of TRX */ + offset = roundup(offset + trx->length, blocksize); + /* Next loop iteration will increase the offset */ + offset -= blocksize; continue; } } diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c index f9fa3fad728..2051f28ddac 100644 --- a/drivers/mtd/maps/pmcmsp-flash.c +++ b/drivers/mtd/maps/pmcmsp-flash.c @@ -139,15 +139,13 @@ static int __init init_msp_flash(void) } msp_maps[i].bankwidth = 1; - msp_maps[i].name = kmalloc(7, GFP_KERNEL); + msp_maps[i].name = kstrndup(flash_name, 7, GFP_KERNEL); if (!msp_maps[i].name) { iounmap(msp_maps[i].virt); kfree(msp_parts[i]); goto cleanup_loop; } - msp_maps[i].name = strncpy(msp_maps[i].name, flash_name, 7); - for (j = 0; j < pcnt; j++) { part_name[5] = '0' + i; part_name[7] = '0' + j; diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index f2ab08c2c5f..575de54596f 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -541,7 +541,7 @@ config MTD_NAND_FSMC Flexible Static Memory Controller (FSMC) config MTD_NAND_XWAY - tristate "Support for NAND on Lantiq XWAY SoC" + bool "Support for NAND on Lantiq XWAY SoC" depends on LANTIQ && SOC_TYPE_XWAY select MTD_NAND_PLATFORM help diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c index 0134ba32a05..39712560b4c 100644 --- a/drivers/mtd/ubi/upd.c +++ b/drivers/mtd/ubi/upd.c @@ -148,11 +148,11 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, return err; } - if (bytes == 0) { - err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL); - if (err) - return err; + err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL); + if (err) + return err; + if (bytes == 0) { err = clear_update_marker(ubi, vol, 0); if (err) return err; diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c index b374be7891a..b905e5e840f 100644 --- a/drivers/net/can/c_can/c_can_pci.c +++ b/drivers/net/can/c_can/c_can_pci.c @@ -109,6 +109,7 @@ static int c_can_pci_probe(struct pci_dev *pdev, dev->irq = pdev->irq; priv->base = addr; + priv->device = &pdev->dev; if (!c_can_pci_data->freq) { dev_err(&pdev->dev, "no clock frequency defined\n"); diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index f21fc37ec57..2c19b4ffe82 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -962,7 +962,12 @@ static int ti_hecc_probe(struct platform_device *pdev) netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll, HECC_DEF_NAPI_WEIGHT); - clk_enable(priv->clk); + err = clk_prepare_enable(priv->clk); + if (err) { + dev_err(&pdev->dev, "clk_prepare_enable() failed\n"); + goto probe_exit_clk; + } + err = register_candev(ndev); if (err) { dev_err(&pdev->dev, "register_candev() failed\n"); @@ -995,7 +1000,7 @@ static int ti_hecc_remove(struct platform_device *pdev) struct ti_hecc_priv *priv = netdev_priv(ndev); unregister_candev(ndev); - clk_disable(priv->clk); + clk_disable_unprepare(priv->clk); clk_put(priv->clk); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); iounmap(priv->base); @@ -1021,7 +1026,7 @@ static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state) hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR); priv->can.state = CAN_STATE_SLEEPING; - clk_disable(priv->clk); + clk_disable_unprepare(priv->clk); return 0; } @@ -1030,8 +1035,11 @@ static int ti_hecc_resume(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct ti_hecc_priv *priv = netdev_priv(dev); + int err; - clk_enable(priv->clk); + err = clk_prepare_enable(priv->clk); + if (err) + return err; hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR); priv->can.state = CAN_STATE_ERROR_ACTIVE; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 3a220d2f2ee..9a82890f64e 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -817,23 +817,25 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, static void peak_usb_disconnect(struct usb_interface *intf) { struct peak_usb_device *dev; + struct peak_usb_device *dev_prev_siblings; /* unregister as many netdev devices as siblings */ - for (dev = usb_get_intfdata(intf); dev; dev = dev->prev_siblings) { + for (dev = usb_get_intfdata(intf); dev; dev = dev_prev_siblings) { struct net_device *netdev = dev->netdev; char name[IFNAMSIZ]; + dev_prev_siblings = dev->prev_siblings; dev->state &= ~PCAN_USB_STATE_CONNECTED; strncpy(name, netdev->name, IFNAMSIZ); unregister_netdev(netdev); - free_candev(netdev); kfree(dev->cmd_buf); dev->next_siblings = NULL; if (dev->adapter->dev_free) dev->adapter->dev_free(dev); + free_candev(netdev); dev_info(&intf->dev, "%s removed\n", name); } diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index cbd388eea68..f8b84fed537 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -956,8 +956,8 @@ static int usb_8dev_probe(struct usb_interface *intf, for (i = 0; i < MAX_TX_URBS; i++) priv->tx_contexts[i].echo_index = MAX_TX_URBS; - priv->cmd_msg_buffer = kzalloc(sizeof(struct usb_8dev_cmd_msg), - GFP_KERNEL); + priv->cmd_msg_buffer = devm_kzalloc(&intf->dev, sizeof(struct usb_8dev_cmd_msg), + GFP_KERNEL); if (!priv->cmd_msg_buffer) goto cleanup_candev; @@ -971,7 +971,7 @@ static int usb_8dev_probe(struct usb_interface *intf, if (err) { netdev_err(netdev, "couldn't register CAN device: %d\n", err); - goto cleanup_cmd_msg_buffer; + goto cleanup_candev; } err = usb_8dev_cmd_version(priv, &version); @@ -992,9 +992,6 @@ static int usb_8dev_probe(struct usb_interface *intf, cleanup_unregister_candev: unregister_netdev(priv->netdev); -cleanup_cmd_msg_buffer: - kfree(priv->cmd_msg_buffer); - cleanup_candev: free_candev(netdev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index ce1a9161867..9c19f49f0f5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1792,8 +1792,16 @@ static void bnx2x_get_ringparam(struct net_device *dev, ering->rx_max_pending = MAX_RX_AVAIL; + /* If size isn't already set, we give an estimation of the number + * of buffers we'll have. We're neglecting some possible conditions + * [we couldn't know for certain at this point if number of queues + * might shrink] but the number would be correct for the likely + * scenario. + */ if (bp->rx_ring_size) ering->rx_pending = bp->rx_ring_size; + else if (BNX2X_NUM_RX_QUEUES(bp)) + ering->rx_pending = MAX_RX_AVAIL / BNX2X_NUM_RX_QUEUES(bp); else ering->rx_pending = MAX_RX_AVAIL; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 77c6427263d..a1f7e437eb2 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -193,6 +193,7 @@ bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb) return 0; hw_cons = *(tcb->hw_consumer_index); + rmb(); cons = tcb->consumer_index; q_depth = tcb->q_depth; @@ -2903,13 +2904,12 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) BNA_QE_INDX_INC(prod, q_depth); tcb->producer_index = prod; - smp_mb(); + wmb(); if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) return NETDEV_TX_OK; bna_txq_prod_indx_doorbell(tcb); - smp_mb(); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 5dec66a9679..583ebff3116 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -87,6 +87,10 @@ s32 igb_get_phy_id(struct e1000_hw *hw) s32 ret_val = 0; u16 phy_id; + /* ensure PHY page selection to fix misconfigured i210 */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) + phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0); + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); if (ret_val) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 004e4231af6..528597f6593 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -57,13 +57,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) { struct mlx4_cq *cq; + rcu_read_lock(); cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, cqn & (dev->caps.num_cqs - 1)); + rcu_read_unlock(); + if (!cq) { mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn); return; } + /* Acessing the CQ outside of rcu_read_lock is safe, because + * the CQ is freed only after interrupt handling is completed. + */ ++cq->arm_sn; cq->comp(cq); @@ -74,23 +80,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; struct mlx4_cq *cq; - spin_lock(&cq_table->lock); - + rcu_read_lock(); cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); - if (cq) - atomic_inc(&cq->refcount); - - spin_unlock(&cq_table->lock); + rcu_read_unlock(); if (!cq) { - mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn); + mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn); return; } + /* Acessing the CQ outside of rcu_read_lock is safe, because + * the CQ is freed only after interrupt handling is completed. + */ cq->event(cq, event_type); - - if (atomic_dec_and_test(&cq->refcount)) - complete(&cq->free); } static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, @@ -261,9 +263,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, if (err) return err; - spin_lock_irq(&cq_table->lock); + spin_lock(&cq_table->lock); err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); - spin_unlock_irq(&cq_table->lock); + spin_unlock(&cq_table->lock); if (err) goto err_icm; @@ -303,9 +305,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, return 0; err_radix: - spin_lock_irq(&cq_table->lock); + spin_lock(&cq_table->lock); radix_tree_delete(&cq_table->tree, cq->cqn); - spin_unlock_irq(&cq_table->lock); + spin_unlock(&cq_table->lock); err_icm: mlx4_cq_free_icm(dev, cq->cqn); @@ -324,11 +326,11 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) if (err) mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); - synchronize_irq(priv->eq_table.eq[cq->vector].irq); - - spin_lock_irq(&cq_table->lock); + spin_lock(&cq_table->lock); radix_tree_delete(&cq_table->tree, cq->cqn); - spin_unlock_irq(&cq_table->lock); + spin_unlock(&cq_table->lock); + + synchronize_irq(priv->eq_table.eq[cq->vector].irq); if (atomic_dec_and_test(&cq->refcount)) complete(&cq->free); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 02aee1ebd20..2a541500ad2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -350,8 +350,14 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) ring->cqn = priv->rx_cq[ring_ind].mcq.cqn; ring->stride = stride; - if (ring->stride <= TXBB_SIZE) + if (ring->stride <= TXBB_SIZE) { + /* Stamp first unused send wqe */ + __be32 *ptr = (__be32 *)ring->buf; + __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT); + *ptr = stamp; + /* Move pointer to start of rx section */ ring->buf += TXBB_SIZE; + } ring->log_stride = ffs(ring->stride) - 1; ring->buf_size = ring->size * ring->stride; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index e9eab29db7b..5cb09ecfd75 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2697,12 +2697,6 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, spin_lock_init(&priv->lock); spin_lock_init(&priv->tx_lock); - ret = register_netdev(ndev); - if (ret) { - pr_err("%s: ERROR %i registering the device\n", __func__, ret); - goto error_netdev_register; - } - priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME); if (IS_ERR(priv->stmmac_clk)) { pr_warn("%s: warning: cannot get CSR clock\n", __func__); @@ -2733,13 +2727,23 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, } } + ret = register_netdev(ndev); + if (ret) { + netdev_err(priv->dev, "%s: ERROR %i registering the device\n", + __func__, ret); + goto error_netdev_register; + } + return priv; +error_netdev_register: + if (priv->pcs != STMMAC_PCS_RGMII && + priv->pcs != STMMAC_PCS_TBI && + priv->pcs != STMMAC_PCS_RTBI) + stmmac_mdio_unregister(ndev); error_mdio_register: clk_put(priv->stmmac_clk); error_clk_get: - unregister_netdev(ndev); -error_netdev_register: netif_napi_del(&priv->napi); error_free_netdev: free_netdev(ndev); diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index 31bbbca341a..922f7dd6028 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -557,7 +557,8 @@ static int cpmac_poll(struct napi_struct *napi, int budget) static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) { - int queue, len; + int queue; + unsigned int len; struct cpmac_desc *desc; struct cpmac_priv *priv = netdev_priv(dev); @@ -567,7 +568,7 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(skb_padto(skb, ETH_ZLEN))) return NETDEV_TX_OK; - len = max(skb->len, ETH_ZLEN); + len = max_t(unsigned int, skb->len, ETH_ZLEN); queue = skb_get_queue_mapping(skb); netif_stop_subqueue(dev, queue); @@ -1241,7 +1242,7 @@ int cpmac_init(void) goto fail_alloc; } -#warning FIXME: unhardcode gpio&reset bits + /* FIXME: unhardcode gpio&reset bits */ ar7_gpio_disable(26); ar7_gpio_disable(27); ar7_device_reset(AR7_RESET_BIT_CPMAC_LO); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 59e9c56e5b8..493460424a0 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -48,6 +48,9 @@ struct net_device_context { struct work_struct work; }; +/* Restrict GSO size to account for NVGRE */ +#define NETVSC_GSO_MAX_SIZE 62768 + #define RING_SIZE_MIN 64 static int ring_size = 128; module_param(ring_size, int, S_IRUGO); @@ -436,6 +439,7 @@ static int netvsc_probe(struct hv_device *dev, SET_ETHTOOL_OPS(net, ðtool_ops); SET_NETDEV_DEV(net, &dev->device); + netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE); ret = register_netdev(net); if (ret != 0) { diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 8fc46fcaee5..1c51abbeced 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -678,7 +678,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, size_t linear; if (q->flags & IFF_VNET_HDR) { - vnet_hdr_len = q->vnet_hdr_sz; + vnet_hdr_len = ACCESS_ONCE(q->vnet_hdr_sz); err = -EINVAL; if (len < vnet_hdr_len) @@ -809,7 +809,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, if (q->flags & IFF_VNET_HDR) { struct virtio_net_hdr vnet_hdr; - vnet_hdr_len = q->vnet_hdr_sz; + vnet_hdr_len = ACCESS_ONCE(q->vnet_hdr_sz); if ((len -= vnet_hdr_len) < 0) return -EINVAL; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 299d35552a3..bd245c3039e 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -474,7 +474,7 @@ void phy_stop_machine(struct phy_device *phydev) cancel_delayed_work_sync(&phydev->state_queue); mutex_lock(&phydev->lock); - if (phydev->state > PHY_UP) + if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) phydev->state = PHY_UP; mutex_unlock(&phydev->lock); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index aeff706f163..d818f990c7a 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1087,9 +1087,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, } if (tun->flags & TUN_VNET_HDR) { - if (len < tun->vnet_hdr_sz) + int vnet_hdr_sz = ACCESS_ONCE(tun->vnet_hdr_sz); + + if (len < vnet_hdr_sz) return -EINVAL; - len -= tun->vnet_hdr_sz; + len -= vnet_hdr_sz; if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) return -EFAULT; @@ -1100,7 +1102,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (gso.hdr_len > len) return -EINVAL; - offset += tun->vnet_hdr_sz; + offset += vnet_hdr_sz; } if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) { @@ -1272,12 +1274,16 @@ static ssize_t tun_put_user(struct tun_struct *tun, { struct tun_pi pi = { 0, skb->protocol }; ssize_t total = 0; + int vnet_hdr_sz = 0; + + if (tun->flags & TUN_VNET_HDR) + vnet_hdr_sz = ACCESS_ONCE(tun->vnet_hdr_sz); if (!(tun->flags & TUN_NO_PI)) { if ((len -= sizeof(pi)) < 0) return -EINVAL; - if (len < skb->len) { + if (len < skb->len + vnet_hdr_sz) { /* Packet will be striped */ pi.flags |= TUN_PKT_STRIP; } @@ -1287,9 +1293,9 @@ static ssize_t tun_put_user(struct tun_struct *tun, total += sizeof(pi); } - if (tun->flags & TUN_VNET_HDR) { + if (vnet_hdr_sz) { struct virtio_net_hdr gso = { 0 }; /* no info leak */ - if ((len -= tun->vnet_hdr_sz) < 0) + if ((len -= vnet_hdr_sz) < 0) return -EINVAL; if (skb_is_gso(skb)) { @@ -1332,7 +1338,7 @@ static ssize_t tun_put_user(struct tun_struct *tun, if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total, sizeof(gso)))) return -EFAULT; - total += tun->vnet_hdr_sz; + total += vnet_hdr_sz; } len = min_t(int, skb->len, len); diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index 8d5cac2d8e3..57da4c10c69 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -779,7 +779,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id struct net_device *netdev; struct catc *catc; u8 broadcast[6]; - int i, pktsz; + int pktsz, ret; if (usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 1)) { @@ -814,12 +814,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id if ((!catc->ctrl_urb) || (!catc->tx_urb) || (!catc->rx_urb) || (!catc->irq_urb)) { dev_err(&intf->dev, "No free urbs available.\n"); - usb_free_urb(catc->ctrl_urb); - usb_free_urb(catc->tx_urb); - usb_free_urb(catc->rx_urb); - usb_free_urb(catc->irq_urb); - free_netdev(netdev); - return -ENOMEM; + ret = -ENOMEM; + goto fail_free; } /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */ @@ -847,15 +843,24 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id catc->irq_buf, 2, catc_irq_done, catc, 1); if (!catc->is_f5u011) { + u32 *buf; + int i; + dev_dbg(dev, "Checking memory size\n"); - i = 0x12345678; - catc_write_mem(catc, 0x7a80, &i, 4); - i = 0x87654321; - catc_write_mem(catc, 0xfa80, &i, 4); - catc_read_mem(catc, 0x7a80, &i, 4); + buf = kmalloc(4, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto fail_free; + } + + *buf = 0x12345678; + catc_write_mem(catc, 0x7a80, buf, 4); + *buf = 0x87654321; + catc_write_mem(catc, 0xfa80, buf, 4); + catc_read_mem(catc, 0x7a80, buf, 4); - switch (i) { + switch (*buf) { case 0x12345678: catc_set_reg(catc, TxBufCount, 8); catc_set_reg(catc, RxBufCount, 32); @@ -870,6 +875,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id dev_dbg(dev, "32k Memory\n"); break; } + + kfree(buf); dev_dbg(dev, "Getting MAC from SEEROM.\n"); @@ -916,16 +923,21 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id usb_set_intfdata(intf, catc); SET_NETDEV_DEV(netdev, &intf->dev); - if (register_netdev(netdev) != 0) { - usb_set_intfdata(intf, NULL); - usb_free_urb(catc->ctrl_urb); - usb_free_urb(catc->tx_urb); - usb_free_urb(catc->rx_urb); - usb_free_urb(catc->irq_urb); - free_netdev(netdev); - return -EIO; - } + ret = register_netdev(netdev); + if (ret) + goto fail_clear_intfdata; + return 0; + +fail_clear_intfdata: + usb_set_intfdata(intf, NULL); +fail_free: + usb_free_urb(catc->ctrl_urb); + usb_free_urb(catc->tx_urb); + usb_free_urb(catc->rx_urb); + usb_free_urb(catc->irq_urb); + free_netdev(netdev); + return ret; } static void catc_disconnect(struct usb_interface *intf) diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 03e8a15d7de..f32a57ed1d1 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -126,40 +126,61 @@ static void async_ctrl_callback(struct urb *urb) static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data) { + u8 *buf; int ret; + buf = kmalloc(size, GFP_NOIO); + if (!buf) + return -ENOMEM; + ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0), PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0, - indx, data, size, 1000); + indx, buf, size, 1000); if (ret < 0) netif_dbg(pegasus, drv, pegasus->net, "%s returned %d\n", __func__, ret); + else if (ret <= size) + memcpy(data, buf, ret); + kfree(buf); return ret; } -static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data) +static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, + const void *data) { + u8 *buf; int ret; + buf = kmemdup(data, size, GFP_NOIO); + if (!buf) + return -ENOMEM; + ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0, - indx, data, size, 100); + indx, buf, size, 100); if (ret < 0) netif_dbg(pegasus, drv, pegasus->net, "%s returned %d\n", __func__, ret); + kfree(buf); return ret; } static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data) { + u8 *buf; int ret; + buf = kmemdup(&data, 1, GFP_NOIO); + if (!buf) + return -ENOMEM; + ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data, - indx, &data, 1, 1000); + indx, buf, 1, 1000); if (ret < 0) netif_dbg(pegasus, drv, pegasus->net, "%s returned %d\n", __func__, ret); + kfree(buf); return ret; } diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 6cbdac67f3a..59d6a3a5830 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -156,16 +156,36 @@ static const char driver_name [] = "rtl8150"; */ static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) { - return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), - RTL8150_REQ_GET_REGS, RTL8150_REQT_READ, - indx, 0, data, size, 500); + void *buf; + int ret; + + buf = kmalloc(size, GFP_NOIO); + if (!buf) + return -ENOMEM; + + ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), + RTL8150_REQ_GET_REGS, RTL8150_REQT_READ, + indx, 0, buf, size, 500); + if (ret > 0 && ret <= size) + memcpy(data, buf, ret); + kfree(buf); + return ret; } -static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) +static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data) { - return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), - RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE, - indx, 0, data, size, 500); + void *buf; + int ret; + + buf = kmemdup(data, size, GFP_NOIO); + if (!buf) + return -ENOMEM; + + ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), + RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE, + indx, 0, buf, size, 500); + kfree(buf); + return ret; } static void async_set_reg_cb(struct urb *urb) diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index d0815855d87..e782dd7183d 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2862,7 +2862,6 @@ vmxnet3_tx_timeout(struct net_device *netdev) netdev_err(adapter->netdev, "tx hang\n"); schedule_work(&adapter->work); - netif_wake_queue(adapter->netdev); } @@ -2889,6 +2888,7 @@ vmxnet3_reset_work(struct work_struct *data) } rtnl_unlock(); + netif_wake_queue(adapter->netdev); clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index a1dc186c6f6..8912ba83fd7 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1386,7 +1386,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) if (data[IFLA_VXLAN_ID]) { __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]); - if (id >= VXLAN_VID_MASK) + if (id >= VXLAN_N_VID) return -ERANGE; } diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index 06f86f43571..1b8422c4ef9 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -511,8 +511,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, break; return -EOPNOTSUPP; default: - WARN_ON(1); - return -EINVAL; + return -EOPNOTSUPP; } mutex_lock(&ah->lock); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h index 874f6570bd1..d83ad9df660 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h @@ -71,13 +71,13 @@ #define AR9300_OTP_BASE \ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000) #define AR9300_OTP_STATUS \ - ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30018 : 0x15f18) + ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x31018 : 0x15f18) #define AR9300_OTP_STATUS_TYPE 0x7 #define AR9300_OTP_STATUS_VALID 0x4 #define AR9300_OTP_STATUS_ACCESS_BUSY 0x2 #define AR9300_OTP_STATUS_SM_BUSY 0x1 #define AR9300_OTP_READ_DATA \ - ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3001c : 0x15f1c) + ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3101c : 0x15f1c) enum targetPowerHTRates { HT_TARGET_RATE_0_8_16, diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c index 6307a4e36c8..f8639003da9 100644 --- a/drivers/net/wireless/hostap/hostap_hw.c +++ b/drivers/net/wireless/hostap/hostap_hw.c @@ -836,25 +836,30 @@ static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len, spin_lock_bh(&local->baplock); res = hfa384x_setup_bap(dev, BAP0, rid, 0); - if (!res) - res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec)); + if (res) + goto unlock; + + res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec)); + if (res) + goto unlock; if (le16_to_cpu(rec.len) == 0) { /* RID not available */ res = -ENODATA; + goto unlock; } rlen = (le16_to_cpu(rec.len) - 1) * 2; - if (!res && exact_len && rlen != len) { + if (exact_len && rlen != len) { printk(KERN_DEBUG "%s: hfa384x_get_rid - RID len mismatch: " "rid=0x%04x, len=%d (expected %d)\n", dev->name, rid, rlen, len); res = -ENODATA; } - if (!res) - res = hfa384x_from_bap(dev, BAP0, buf, len); + res = hfa384x_from_bap(dev, BAP0, buf, len); +unlock: spin_unlock_bh(&local->baplock); mutex_unlock(&local->rid_bap_mtx); diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index 3ad79736b25..3fc7d084548 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c @@ -823,6 +823,7 @@ static void rtl_usb_stop(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + struct urb *urb; /* should after adapter start and interrupt enable. */ set_hal_stop(rtlhal); @@ -830,6 +831,23 @@ static void rtl_usb_stop(struct ieee80211_hw *hw) /* Enable software */ SET_USB_STOP(rtlusb); rtl_usb_deinit(hw); + + /* free pre-allocated URBs from rtl_usb_start() */ + usb_kill_anchored_urbs(&rtlusb->rx_submitted); + + tasklet_kill(&rtlusb->rx_work_tasklet); + cancel_work_sync(&rtlpriv->works.lps_change_work); + + flush_workqueue(rtlpriv->works.rtl_wq); + + skb_queue_purge(&rtlusb->rx_queue); + + while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) { + usb_free_coherent(urb->dev, urb->transfer_buffer_length, + urb->transfer_buffer, urb->transfer_dma); + usb_free_urb(urb); + } + rtlpriv->cfg->ops->hw_disable(hw); } diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index b29e20b7862..ffb2de5c4d5 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c @@ -259,8 +259,13 @@ static int dlpar_add_phb(char *drc_name, struct device_node *dn) static int dlpar_add_vio_slot(char *drc_name, struct device_node *dn) { - if (vio_find_node(dn)) + struct vio_dev *vio_dev; + + vio_dev = vio_find_node(dn); + if (vio_dev) { + put_device(&vio_dev->dev); return -EINVAL; + } if (!vio_register_device_node(dn)) { printk(KERN_ERR @@ -336,6 +341,9 @@ static int dlpar_remove_vio_slot(char *drc_name, struct device_node *dn) return -EINVAL; vio_unregister_device(vio_dev); + + put_device(&vio_dev->dev); + return 0; } diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c index 3492ec9a33b..a7d64f94c3c 100644 --- a/drivers/pinctrl/sh-pfc/pinctrl.c +++ b/drivers/pinctrl/sh-pfc/pinctrl.c @@ -274,7 +274,8 @@ static bool sh_pfc_pinconf_validate(struct sh_pfc *pfc, unsigned int _pin, switch (param) { case PIN_CONFIG_BIAS_DISABLE: - return true; + return pin->configs & + (SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN); case PIN_CONFIG_BIAS_PULL_UP: return pin->configs & SH_PFC_PIN_CFG_PULL_UP; diff --git a/drivers/platform/goldfish/pdev_bus.c b/drivers/platform/goldfish/pdev_bus.c index 92cc4cfafde..6bcd57cb2f7 100644 --- a/drivers/platform/goldfish/pdev_bus.c +++ b/drivers/platform/goldfish/pdev_bus.c @@ -153,23 +153,26 @@ static int goldfish_new_pdev(void) static irqreturn_t goldfish_pdev_bus_interrupt(int irq, void *dev_id) { irqreturn_t ret = IRQ_NONE; + while (1) { u32 op = readl(pdev_bus_base + PDEV_BUS_OP); - switch (op) { - case PDEV_BUS_OP_DONE: - return IRQ_NONE; + switch (op) { case PDEV_BUS_OP_REMOVE_DEV: goldfish_pdev_remove(); + ret = IRQ_HANDLED; break; case PDEV_BUS_OP_ADD_DEV: goldfish_new_pdev(); + ret = IRQ_HANDLED; break; + + case PDEV_BUS_OP_DONE: + default: + return ret; } - ret = IRQ_HANDLED; } - return ret; } static int goldfish_pdev_bus_probe(struct platform_device *pdev) diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 59a8d325a69..e4d9a903ca3 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -1860,11 +1860,24 @@ static int acer_wmi_enable_lm(void) return status; } +#define ACER_WMID_ACCEL_HID "BST0001" + static acpi_status __init acer_wmi_get_handle_cb(acpi_handle ah, u32 level, void *ctx, void **retval) { + struct acpi_device *dev; + + if (!strcmp(ctx, "SENR")) { + if (acpi_bus_get_device(ah, &dev)) + return AE_OK; + if (strcmp(ACER_WMID_ACCEL_HID, acpi_device_hid(dev))) + return AE_OK; + } else + return AE_OK; + *(acpi_handle *)retval = ah; - return AE_OK; + + return AE_CTRL_TERMINATE; } static int __init acer_wmi_get_handle(const char *name, const char *prop, @@ -1878,8 +1891,7 @@ static int __init acer_wmi_get_handle(const char *name, const char *prop, handle = NULL; status = acpi_get_devices(prop, acer_wmi_get_handle_cb, (void *)name, &handle); - - if (ACPI_SUCCESS(status)) { + if (ACPI_SUCCESS(status) && handle) { *ah = handle; return 0; } else { @@ -1891,7 +1903,7 @@ static int __init acer_wmi_accel_setup(void) { int err; - err = acer_wmi_get_handle("SENR", "BST0001", &gsensor_handle); + err = acer_wmi_get_handle("SENR", ACER_WMID_ACCEL_HID, &gsensor_handle); if (err) return err; @@ -2262,10 +2274,11 @@ static int __init acer_wmi_init(void) err = acer_wmi_input_setup(); if (err) return err; + err = acer_wmi_accel_setup(); + if (err && err != -ENODEV) + pr_warn("Cannot enable accelerometer\n"); } - acer_wmi_accel_setup(); - err = platform_driver_register(&acer_platform_driver); if (err) { pr_err("Unable to register platform driver\n"); diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c index f59683aa13d..fc6d84e202e 100644 --- a/drivers/platform/x86/intel_mid_powerbtn.c +++ b/drivers/platform/x86/intel_mid_powerbtn.c @@ -78,8 +78,8 @@ static int mfld_pb_probe(struct platform_device *pdev) input_set_capability(input, EV_KEY, KEY_POWER); - error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_NO_SUSPEND, - DRIVER_NAME, input); + error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_NO_SUSPEND | + IRQF_ONESHOT, DRIVER_NAME, input); if (error) { dev_err(&pdev->dev, "Unable to request irq %d for mfld power" "button\n", irq); diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 42bd57da239..09198941ee2 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -763,9 +763,23 @@ EXPORT_SYMBOL_GPL(rtc_irq_set_freq); */ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) { + struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue); + struct rtc_time tm; + ktime_t now; + timer->enabled = 1; + __rtc_read_time(rtc, &tm); + now = rtc_tm_to_ktime(tm); + + /* Skip over expired timers */ + while (next) { + if (next->expires.tv64 >= now.tv64) + break; + next = timerqueue_iterate_next(next); + } + timerqueue_add(&rtc->timerqueue, &timer->node); - if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) { + if (!next) { struct rtc_wkalrm alarm; int err; alarm.time = rtc_ktime_to_tm(timer->node.expires); diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c index f40afdd0e5f..b6e220f5963 100644 --- a/drivers/rtc/rtc-s35390a.c +++ b/drivers/rtc/rtc-s35390a.c @@ -15,6 +15,7 @@ #include #include #include +#include #define S35390A_CMD_STATUS1 0 #define S35390A_CMD_STATUS2 1 @@ -34,10 +35,14 @@ #define S35390A_ALRM_BYTE_HOURS 1 #define S35390A_ALRM_BYTE_MINS 2 +/* flags for STATUS1 */ #define S35390A_FLAG_POC 0x01 #define S35390A_FLAG_BLD 0x02 +#define S35390A_FLAG_INT2 0x04 #define S35390A_FLAG_24H 0x40 #define S35390A_FLAG_RESET 0x80 + +/* flag for STATUS2 */ #define S35390A_FLAG_TEST 0x01 #define S35390A_INT2_MODE_MASK 0xF0 @@ -94,19 +99,63 @@ static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len) return 0; } -static int s35390a_reset(struct s35390a *s35390a) +/* + * Returns <0 on error, 0 if rtc is setup fine and 1 if the chip was reset. + * To keep the information if an irq is pending, pass the value read from + * STATUS1 to the caller. + */ +static int s35390a_reset(struct s35390a *s35390a, char *status1) { - char buf[1]; - - if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)) < 0) - return -EIO; - - if (!(buf[0] & (S35390A_FLAG_POC | S35390A_FLAG_BLD))) + char buf; + int ret; + unsigned initcount = 0; + + ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, status1, 1); + if (ret < 0) + return ret; + + if (*status1 & S35390A_FLAG_POC) + /* + * Do not communicate for 0.5 seconds since the power-on + * detection circuit is in operation. + */ + msleep(500); + else if (!(*status1 & S35390A_FLAG_BLD)) + /* + * If both POC and BLD are unset everything is fine. + */ return 0; - buf[0] |= (S35390A_FLAG_RESET | S35390A_FLAG_24H); - buf[0] &= 0xf0; - return s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)); + /* + * At least one of POC and BLD are set, so reinitialise chip. Keeping + * this information in the hardware to know later that the time isn't + * valid is unfortunately not possible because POC and BLD are cleared + * on read. So the reset is best done now. + * + * The 24H bit is kept over reset, so set it already here. + */ +initialize: + *status1 = S35390A_FLAG_24H; + buf = S35390A_FLAG_RESET | S35390A_FLAG_24H; + ret = s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, &buf, 1); + + if (ret < 0) + return ret; + + ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, &buf, 1); + if (ret < 0) + return ret; + + if (buf & (S35390A_FLAG_POC | S35390A_FLAG_BLD)) { + /* Try up to five times to reset the chip */ + if (initcount < 5) { + ++initcount; + goto initialize; + } else + return -EIO; + } + + return 1; } static int s35390a_disable_test_mode(struct s35390a *s35390a) @@ -265,6 +314,20 @@ static int s35390a_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alm) char buf[3], sts; int i, err; + /* + * initialize all members to -1 to signal the core that they are not + * defined by the hardware. + */ + alm->time.tm_sec = -1; + alm->time.tm_min = -1; + alm->time.tm_hour = -1; + alm->time.tm_mday = -1; + alm->time.tm_mon = -1; + alm->time.tm_year = -1; + alm->time.tm_wday = -1; + alm->time.tm_yday = -1; + alm->time.tm_isdst = -1; + err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS2, &sts, sizeof(sts)); if (err < 0) return err; @@ -327,11 +390,11 @@ static struct i2c_driver s35390a_driver; static int s35390a_probe(struct i2c_client *client, const struct i2c_device_id *id) { - int err; + int err, err_reset; unsigned int i; struct s35390a *s35390a; struct rtc_time tm; - char buf[1]; + char buf, status1; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { err = -ENODEV; @@ -360,29 +423,35 @@ static int s35390a_probe(struct i2c_client *client, } } - err = s35390a_reset(s35390a); - if (err < 0) { + err_reset = s35390a_reset(s35390a, &status1); + if (err_reset < 0) { + err = err_reset; dev_err(&client->dev, "error resetting chip\n"); goto exit_dummy; } - err = s35390a_disable_test_mode(s35390a); - if (err < 0) { - dev_err(&client->dev, "error disabling test mode\n"); - goto exit_dummy; - } - - err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)); - if (err < 0) { - dev_err(&client->dev, "error checking 12/24 hour mode\n"); - goto exit_dummy; - } - if (buf[0] & S35390A_FLAG_24H) + if (status1 & S35390A_FLAG_24H) s35390a->twentyfourhour = 1; else s35390a->twentyfourhour = 0; - if (s35390a_get_datetime(client, &tm) < 0) + if (status1 & S35390A_FLAG_INT2) { + /* disable alarm (and maybe test mode) */ + buf = 0; + err = s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, &buf, 1); + if (err < 0) { + dev_err(&client->dev, "error disabling alarm"); + goto exit_dummy; + } + } else { + err = s35390a_disable_test_mode(s35390a); + if (err < 0) { + dev_err(&client->dev, "error disabling test mode\n"); + goto exit_dummy; + } + } + + if (err_reset > 0 || s35390a_get_datetime(client, &tm) < 0) dev_warn(&client->dev, "clock needs to be set\n"); device_set_wakeup_capable(&client->dev, 1); @@ -395,6 +464,10 @@ static int s35390a_probe(struct i2c_client *client, err = PTR_ERR(s35390a->rtc); goto exit_dummy; } + + if (status1 & S35390A_FLAG_INT2) + rtc_update_irq(s35390a->rtc, 1, RTC_AF); + return 0; exit_dummy: diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 9b3a24e8d3a..5e41e8453ac 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -873,7 +873,7 @@ static int __init vmlogrdr_init(void) goto cleanup; for (i=0; i < MAXMINOR; ++i ) { - sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL); + sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sys_ser[i].buffer) { rc = -ENOMEM; break; diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index bde5255200d..1d1e585bd03 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -142,11 +142,11 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) struct qdio_q *q; int i; - for_each_input_queue(irq, q, i) { - if (!references_shared_dsci(irq) && - has_multiple_inq_on_dsci(irq)) - xchg(q->irq_ptr->dsci, 0); + if (!references_shared_dsci(irq) && + has_multiple_inq_on_dsci(irq)) + xchg(irq->dsci, 0); + for_each_input_queue(irq, q, i) { if (q->u.in.queue_start_poll) { /* skip if polling is enabled or already in work */ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index c846a63ea67..bf13e73ecab 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -282,11 +282,12 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter, /** - * zfcp_dbf_rec_run - trace event related to running recovery + * zfcp_dbf_rec_run_lvl - trace event related to running recovery + * @level: trace level to be used for event * @tag: identifier for event * @erp: erp_action running */ -void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp) +void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp) { struct zfcp_dbf *dbf = erp->adapter->dbf; struct zfcp_dbf_rec *rec = &dbf->rec_buf; @@ -312,10 +313,20 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp) else rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter); - debug_event(dbf->rec, 1, rec, sizeof(*rec)); + debug_event(dbf->rec, level, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->rec_lock, flags); } +/** + * zfcp_dbf_rec_run - trace event related to running recovery + * @tag: identifier for event + * @erp: erp_action running + */ +void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp) +{ + zfcp_dbf_rec_run_lvl(1, tag, erp); +} + /** * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery * @tag: identifier for event diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index 440aa619da1..a8165f14255 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -2,7 +2,7 @@ * zfcp device driver * debug feature declarations * - * Copyright IBM Corp. 2008, 2015 + * Copyright IBM Corp. 2008, 2016 */ #ifndef ZFCP_DBF_H @@ -283,6 +283,30 @@ struct zfcp_dbf { struct zfcp_dbf_scsi scsi_buf; }; +/** + * zfcp_dbf_hba_fsf_resp_suppress - true if we should not trace by default + * @req: request that has been completed + * + * Returns true if FCP response with only benign residual under count. + */ +static inline +bool zfcp_dbf_hba_fsf_resp_suppress(struct zfcp_fsf_req *req) +{ + struct fsf_qtcb *qtcb = req->qtcb; + u32 fsf_stat = qtcb->header.fsf_status; + struct fcp_resp *fcp_rsp; + u8 rsp_flags, fr_status; + + if (qtcb->prefix.qtcb_type != FSF_IO_COMMAND) + return false; /* not an FCP response */ + fcp_rsp = (struct fcp_resp *)&qtcb->bottom.io.fcp_rsp; + rsp_flags = fcp_rsp->fr_flags; + fr_status = fcp_rsp->fr_status; + return (fsf_stat == FSF_FCP_RSP_AVAILABLE) && + (rsp_flags == FCP_RESID_UNDER) && + (fr_status == SAM_STAT_GOOD); +} + static inline void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req) { @@ -304,7 +328,9 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req) zfcp_dbf_hba_fsf_resp("fs_perr", 1, req); } else if (qtcb->header.fsf_status != FSF_GOOD) { - zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req); + zfcp_dbf_hba_fsf_resp("fs_ferr", + zfcp_dbf_hba_fsf_resp_suppress(req) + ? 5 : 1, req); } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) || (req->fsf_command == FSF_QTCB_OPEN_LUN)) { @@ -388,4 +414,15 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag) _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL); } +/** + * zfcp_dbf_scsi_nullcmnd() - trace NULLify of SCSI command in dev/tgt-reset. + * @scmnd: SCSI command that was NULLified. + * @fsf_req: request that owned @scmnd. + */ +static inline void zfcp_dbf_scsi_nullcmnd(struct scsi_cmnd *scmnd, + struct zfcp_fsf_req *fsf_req) +{ + _zfcp_dbf_scsi("scfc__1", 3, scmnd, fsf_req); +} + #endif /* ZFCP_DBF_H */ diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index b4cd26d2415..f7e720e1109 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -3,7 +3,7 @@ * * Error Recovery Procedures (ERP). * - * Copyright IBM Corp. 2002, 2015 + * Copyright IBM Corp. 2002, 2016 */ #define KMSG_COMPONENT "zfcp" @@ -1212,6 +1212,62 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) } } +/** + * zfcp_erp_try_rport_unblock - unblock rport if no more/new recovery + * @port: zfcp_port whose fc_rport we should try to unblock + */ +static void zfcp_erp_try_rport_unblock(struct zfcp_port *port) +{ + unsigned long flags; + struct zfcp_adapter *adapter = port->adapter; + int port_status; + struct Scsi_Host *shost = adapter->scsi_host; + struct scsi_device *sdev; + + write_lock_irqsave(&adapter->erp_lock, flags); + port_status = atomic_read(&port->status); + if ((port_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 || + (port_status & (ZFCP_STATUS_COMMON_ERP_INUSE | + ZFCP_STATUS_COMMON_ERP_FAILED)) != 0) { + /* new ERP of severity >= port triggered elsewhere meanwhile or + * local link down (adapter erp_failed but not clear unblock) + */ + zfcp_dbf_rec_run_lvl(4, "ertru_p", &port->erp_action); + write_unlock_irqrestore(&adapter->erp_lock, flags); + return; + } + spin_lock(shost->host_lock); + __shost_for_each_device(sdev, shost) { + struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev); + int lun_status; + + if (zsdev->port != port) + continue; + /* LUN under port of interest */ + lun_status = atomic_read(&zsdev->status); + if ((lun_status & ZFCP_STATUS_COMMON_ERP_FAILED) != 0) + continue; /* unblock rport despite failed LUNs */ + /* LUN recovery not given up yet [maybe follow-up pending] */ + if ((lun_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 || + (lun_status & ZFCP_STATUS_COMMON_ERP_INUSE) != 0) { + /* LUN blocked: + * not yet unblocked [LUN recovery pending] + * or meanwhile blocked [new LUN recovery triggered] + */ + zfcp_dbf_rec_run_lvl(4, "ertru_l", &zsdev->erp_action); + spin_unlock(shost->host_lock); + write_unlock_irqrestore(&adapter->erp_lock, flags); + return; + } + } + /* now port has no child or all children have completed recovery, + * and no ERP of severity >= port was meanwhile triggered elsewhere + */ + zfcp_scsi_schedule_rport_register(port); + spin_unlock(shost->host_lock); + write_unlock_irqrestore(&adapter->erp_lock, flags); +} + static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) { struct zfcp_adapter *adapter = act->adapter; @@ -1222,6 +1278,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) case ZFCP_ERP_ACTION_REOPEN_LUN: if (!(act->status & ZFCP_STATUS_ERP_NO_REF)) scsi_device_put(sdev); + zfcp_erp_try_rport_unblock(port); break; case ZFCP_ERP_ACTION_REOPEN_PORT: @@ -1232,7 +1289,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) */ if (act->step != ZFCP_ERP_STEP_UNINITIALIZED) if (result == ZFCP_ERP_SUCCEEDED) - zfcp_scsi_schedule_rport_register(port); + zfcp_erp_try_rport_unblock(port); /* fall through */ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: put_device(&port->dev); diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 01527c31d1d..fdef6a6fe06 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -3,7 +3,7 @@ * * External function declarations. * - * Copyright IBM Corp. 2002, 2015 + * Copyright IBM Corp. 2002, 2016 */ #ifndef ZFCP_EXT_H @@ -49,6 +49,8 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *); extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *, struct zfcp_port *, struct scsi_device *, u8, u8); extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *); +extern void zfcp_dbf_rec_run_lvl(int level, char *tag, + struct zfcp_erp_action *erp); extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64); extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *); diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index f246097b7c6..ad5718401ea 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -1607,7 +1607,7 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) { struct zfcp_qdio *qdio = wka_port->adapter->qdio; - struct zfcp_fsf_req *req = NULL; + struct zfcp_fsf_req *req; int retval = -EIO; spin_lock_irq(&qdio->req_q_lock); @@ -1636,7 +1636,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) zfcp_fsf_req_free(req); out: spin_unlock_irq(&qdio->req_q_lock); - if (req && !IS_ERR(req)) + if (!retval) zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id); return retval; } @@ -1662,7 +1662,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) { struct zfcp_qdio *qdio = wka_port->adapter->qdio; - struct zfcp_fsf_req *req = NULL; + struct zfcp_fsf_req *req; int retval = -EIO; spin_lock_irq(&qdio->req_q_lock); @@ -1691,7 +1691,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) zfcp_fsf_req_free(req); out: spin_unlock_irq(&qdio->req_q_lock); - if (req && !IS_ERR(req)) + if (!retval) zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id); return retval; } diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h index 8cad41ffb6b..358b92ece8d 100644 --- a/drivers/s390/scsi/zfcp_fsf.h +++ b/drivers/s390/scsi/zfcp_fsf.h @@ -3,7 +3,7 @@ * * Interface to the FSF support functions. * - * Copyright IBM Corp. 2002, 2015 + * Copyright IBM Corp. 2002, 2016 */ #ifndef FSF_H @@ -86,6 +86,7 @@ #define FSF_APP_TAG_CHECK_FAILURE 0x00000082 #define FSF_REF_TAG_CHECK_FAILURE 0x00000083 #define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD +#define FSF_FCP_RSP_AVAILABLE 0x000000AF #define FSF_UNKNOWN_COMMAND 0x000000E2 #define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3 #define FSF_INVALID_COMMAND_OPTION 0x000000E5 diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h index 7c2c6194dfc..703fce59bef 100644 --- a/drivers/s390/scsi/zfcp_reqlist.h +++ b/drivers/s390/scsi/zfcp_reqlist.h @@ -4,7 +4,7 @@ * Data structure and helper functions for tracking pending FSF * requests. * - * Copyright IBM Corp. 2009 + * Copyright IBM Corp. 2009, 2016 */ #ifndef ZFCP_REQLIST_H @@ -180,4 +180,32 @@ static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl, spin_unlock_irqrestore(&rl->lock, flags); } +/** + * zfcp_reqlist_apply_for_all() - apply a function to every request. + * @rl: the requestlist that contains the target requests. + * @f: the function to apply to each request; the first parameter of the + * function will be the target-request; the second parameter is the same + * pointer as given with the argument @data. + * @data: freely chosen argument; passed through to @f as second parameter. + * + * Uses :c:macro:`list_for_each_entry` to iterate over the lists in the hash- + * table (not a 'safe' variant, so don't modify the list). + * + * Holds @rl->lock over the entire request-iteration. + */ +static inline void +zfcp_reqlist_apply_for_all(struct zfcp_reqlist *rl, + void (*f)(struct zfcp_fsf_req *, void *), void *data) +{ + struct zfcp_fsf_req *req; + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&rl->lock, flags); + for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++) + list_for_each_entry(req, &rl->buckets[i], list) + f(req, data); + spin_unlock_irqrestore(&rl->lock, flags); +} + #endif /* ZFCP_REQLIST_H */ diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 38ee0df633a..66c37e77ac7 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -3,7 +3,7 @@ * * Interface to Linux SCSI midlayer. * - * Copyright IBM Corp. 2002, 2015 + * Copyright IBM Corp. 2002, 2016 */ #define KMSG_COMPONENT "zfcp" @@ -109,9 +109,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt) } if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) { - /* This could be either - * open LUN pending: this is temporary, will result in - * open LUN or ERP_FAILED, so retry command + /* This could be * call to rport_delete pending: mimic retry from * fc_remote_port_chkready until rport is BLOCKED */ @@ -230,6 +228,57 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) return retval; } +struct zfcp_scsi_req_filter { + u8 tmf_scope; + u32 lun_handle; + u32 port_handle; +}; + +static void zfcp_scsi_forget_cmnd(struct zfcp_fsf_req *old_req, void *data) +{ + struct zfcp_scsi_req_filter *filter = + (struct zfcp_scsi_req_filter *)data; + + /* already aborted - prevent side-effects - or not a SCSI command */ + if (old_req->data == NULL || old_req->fsf_command != FSF_QTCB_FCP_CMND) + return; + + /* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */ + if (old_req->qtcb->header.port_handle != filter->port_handle) + return; + + if (filter->tmf_scope == FCP_TMF_LUN_RESET && + old_req->qtcb->header.lun_handle != filter->lun_handle) + return; + + zfcp_dbf_scsi_nullcmnd((struct scsi_cmnd *)old_req->data, old_req); + old_req->data = NULL; +} + +static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags) +{ + struct zfcp_adapter *adapter = zsdev->port->adapter; + struct zfcp_scsi_req_filter filter = { + .tmf_scope = FCP_TMF_TGT_RESET, + .port_handle = zsdev->port->handle, + }; + unsigned long flags; + + if (tm_flags == FCP_TMF_LUN_RESET) { + filter.tmf_scope = FCP_TMF_LUN_RESET; + filter.lun_handle = zsdev->lun_handle; + } + + /* + * abort_lock secures against other processings - in the abort-function + * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data + */ + write_lock_irqsave(&adapter->abort_lock, flags); + zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd, + &filter); + write_unlock_irqrestore(&adapter->abort_lock, flags); +} + static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); @@ -262,8 +311,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags); retval = FAILED; - } else + } else { zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags); + zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags); + } zfcp_fsf_req_free(fsf_req); return retval; diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 7e17107643d..05c999429ff 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -358,17 +358,24 @@ static int aac_src_check_health(struct aac_dev *dev) { u32 status = src_readl(dev, MUnit.OMR); + /* + * Check to see if the board panic'd. + */ + if (unlikely(status & KERNEL_PANIC)) + goto err_blink; + /* * Check to see if the board failed any self tests. */ if (unlikely(status & SELF_TEST_FAILED)) - return -1; + goto err_out; /* - * Check to see if the board panic'd. + * Check to see if the board failed any self tests. */ - if (unlikely(status & KERNEL_PANIC)) - return (status >> 16) & 0xFF; + if (unlikely(status & MONITOR_PANIC)) + goto err_out; + /* * Wait for the adapter to be up and running. */ @@ -378,6 +385,12 @@ static int aac_src_check_health(struct aac_dev *dev) * Everything is OK */ return 0; + +err_out: + return -1; + +err_blink: + return (status > 16) & 0xFF; } /** diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index d2895836f9f..83e3ca703cd 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -219,7 +219,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) task->num_scatter = qc->n_elem; } else { for_each_sg(qc->sg, sg, qc->n_elem, si) - xfer += sg->length; + xfer += sg_dma_len(sg); task->total_xfer_len = xfer; task->num_scatter = si; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index e6e0679ec88..b08b1e1a45e 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -10909,6 +10909,7 @@ static struct pci_driver lpfc_driver = { .id_table = lpfc_id_table, .probe = lpfc_pci_probe_one, .remove = lpfc_pci_remove_one, + .shutdown = lpfc_pci_remove_one, .suspend = lpfc_pci_suspend_one, .resume = lpfc_pci_resume_one, .err_handler = &lpfc_err_handler, diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c index 1e4479f3331..55716c5184f 100644 --- a/drivers/scsi/mvsas/mv_94xx.c +++ b/drivers/scsi/mvsas/mv_94xx.c @@ -621,7 +621,7 @@ static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx) { u32 tmp; tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3)); - if (tmp && 1 << (slot_idx % 32)) { + if (tmp & 1 << (slot_idx % 32)) { mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx); mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3), 1 << (slot_idx % 32)); diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 2da1959ff2f..03c8783180a 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -736,8 +736,8 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf mv_dprintk("device %016llx not ready.\n", SAS_ADDR(dev->sas_addr)); - rc = SAS_PHY_DOWN; - return rc; + rc = SAS_PHY_DOWN; + return rc; } tei.port = dev->port->lldd_port; if (tei.port && !tei.port->port_attached && !tmf) { diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 66c495d2101..40fe8a77236 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3301,7 +3301,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, sizeof(struct ct6_dsd), 0, SLAB_HWCACHE_ALIGN, NULL); if (!ctx_cachep) - goto fail_free_gid_list; + goto fail_free_srb_mempool; } ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, ctx_cachep); @@ -3454,7 +3454,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long), GFP_KERNEL); if (!ha->loop_id_map) - goto fail_async_pd; + goto fail_loop_id_map; else { qla2x00_set_reserved_loop_ids(ha); ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, @@ -3463,6 +3463,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, return 0; +fail_loop_id_map: + dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); fail_async_pd: dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); fail_ex_init_cb: @@ -3490,6 +3492,10 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); ha->ms_iocb = NULL; ha->ms_iocb_dma = 0; + + if (ha->sns_cmd) + dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), + ha->sns_cmd, ha->sns_cmd_dma); fail_dma_pool: if (IS_QLA82XX(ha) || ql2xenabledif) { dma_pool_destroy(ha->fcp_cmnd_dma_pool); @@ -3507,10 +3513,12 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, kfree(ha->nvram); ha->nvram = NULL; fail_free_ctx_mempool: - mempool_destroy(ha->ctx_mempool); + if (ha->ctx_mempool) + mempool_destroy(ha->ctx_mempool); ha->ctx_mempool = NULL; fail_free_srb_mempool: - mempool_destroy(ha->srb_mempool); + if (ha->srb_mempool) + mempool_destroy(ha->srb_mempool); ha->srb_mempool = NULL; fail_free_gid_list: dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 60031e15d56..dc1c2f4520f 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1009,8 +1009,12 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) { struct request *rq = cmd->request; + int error; - int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask); + if (WARN_ON_ONCE(!rq->nr_phys_segments)) + return -EINVAL; + + error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask); if (error) goto err_exit; @@ -1102,11 +1106,7 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) * submit a request without an attached bio. */ if (req->bio) { - int ret; - - BUG_ON(!req->nr_phys_segments); - - ret = scsi_init_io(cmd, GFP_ATOMIC); + int ret = scsi_init_io(cmd, GFP_ATOMIC); if (unlikely(ret)) return ret; } else { @@ -1150,11 +1150,6 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) return ret; } - /* - * Filesystem requests must transfer data. - */ - BUG_ON(!req->nr_phys_segments); - cmd = scsi_get_cmd_from_req(sdev, req); if (unlikely(!cmd)) return BLKPREP_DEFER; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index edb2c8f17fe..8f87042ce31 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -865,10 +865,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) struct request_queue *rq = sdev->request_queue; struct scsi_target *starget = sdev->sdev_target; - error = scsi_device_set_state(sdev, SDEV_RUNNING); - if (error) - return error; - error = scsi_target_add(starget); if (error) return error; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 2414099bd9d..0b95bac71cf 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1379,11 +1379,15 @@ static int media_not_present(struct scsi_disk *sdkp, **/ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) { - struct scsi_disk *sdkp = scsi_disk(disk); - struct scsi_device *sdp = sdkp->device; + struct scsi_disk *sdkp = scsi_disk_get(disk); + struct scsi_device *sdp; struct scsi_sense_hdr *sshdr = NULL; int retval; + if (!sdkp) + return 0; + + sdp = sdkp->device; SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); /* @@ -1440,6 +1444,7 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) kfree(sshdr); retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0; sdp->changed = 0; + scsi_disk_put(sdkp); return retval; } @@ -1944,6 +1949,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, #define READ_CAPACITY_RETRIES_ON_RESET 10 +/* + * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set + * and the reported logical block size is bigger than 512 bytes. Note + * that last_sector is a u64 and therefore logical_to_sectors() is not + * applicable. + */ +static bool sd_addressable_capacity(u64 lba, unsigned int sector_size) +{ + u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9); + + if (sizeof(sector_t) == 4 && last_sector > U32_MAX) + return false; + + return true; +} + static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, unsigned char *buffer) { @@ -2009,7 +2030,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, return -ENODEV; } - if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) { + if (!sd_addressable_capacity(lba, sector_size)) { sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " "kernel compiled with support for large block " "devices.\n"); @@ -2095,7 +2116,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp, return sector_size; } - if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) { + if (!sd_addressable_capacity(lba, sector_size)) { sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " "kernel compiled with support for large block " "devices.\n"); diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 1f65e32db28..291791a9be8 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -568,6 +568,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) sg_io_hdr_t *hp; unsigned char cmnd[MAX_COMMAND_SIZE]; + if (unlikely(segment_eq(get_fs(), KERNEL_DS))) + return -EINVAL; + if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n", diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 1ac9943cbb9..c1f23abd754 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -855,6 +855,7 @@ static void get_capabilities(struct scsi_cd *cd) unsigned char *buffer; struct scsi_mode_data data; struct scsi_sense_hdr sshdr; + unsigned int ms_len = 128; int rc, n; static const char *loadmech[] = @@ -881,10 +882,11 @@ static void get_capabilities(struct scsi_cd *cd) scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr); /* ask for mode page 0x2a */ - rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128, + rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len, SR_TIMEOUT, 3, &data, NULL); - if (!scsi_status_is_good(rc)) { + if (!scsi_status_is_good(rc) || data.length > ms_len || + data.header_length + data.block_descriptor_length > data.length) { /* failed, drive doesn't have capabilities mode page */ cd->cdi.speed = 1; cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R | diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 913b91c78a2..58d898cdff0 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -204,6 +204,7 @@ enum storvsc_request_type { #define SRB_STATUS_SUCCESS 0x01 #define SRB_STATUS_ABORTED 0x02 #define SRB_STATUS_ERROR 0x04 +#define SRB_STATUS_DATA_OVERRUN 0x12 /* * This is the end of Protocol specific defines. @@ -794,6 +795,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, switch (vm_srb->srb_status) { case SRB_STATUS_ERROR: + /* + * Let upper layer deal with error when + * sense message is present. + */ + + if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) + break; /* * If there is an error; offline the device since all * error recovery strategies would have already been @@ -859,6 +867,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) struct scsi_sense_hdr sense_hdr; struct vmscsi_request *vm_srb; struct stor_mem_pools *memp = scmnd->device->hostdata; + u32 data_transfer_length; struct Scsi_Host *host; struct storvsc_device *stor_dev; struct hv_device *dev = host_dev->dev; @@ -867,6 +876,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) host = stor_dev->host; vm_srb = &cmd_request->vstor_packet.vm_srb; + data_transfer_length = vm_srb->data_transfer_length; if (cmd_request->bounce_sgl_count) { if (vm_srb->data_in == READ_TYPE) copy_from_bounce_buffer(scsi_sglist(scmnd), @@ -885,13 +895,20 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) scsi_print_sense_hdr("storvsc", &sense_hdr); } - if (vm_srb->srb_status != SRB_STATUS_SUCCESS) + if (vm_srb->srb_status != SRB_STATUS_SUCCESS) { storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc, sense_hdr.ascq); + /* + * The Windows driver set data_transfer_length on + * SRB_STATUS_DATA_OVERRUN. On other errors, this value + * is untouched. In these cases we set it to 0. + */ + if (vm_srb->srb_status != SRB_STATUS_DATA_OVERRUN) + data_transfer_length = 0; + } scsi_set_resid(scmnd, - cmd_request->data_buffer.len - - vm_srb->data_transfer_length); + cmd_request->data_buffer.len - data_transfer_length); scsi_done_fn = scmnd->scsi_done; diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c index a8dc95ebf2d..7700cef5e17 100644 --- a/drivers/ssb/pci.c +++ b/drivers/ssb/pci.c @@ -846,6 +846,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus, if (err) { ssb_warn("WARNING: Using fallback SPROM failed (err %d)\n", err); + goto out_free; } else { ssb_dbg("Using SPROM revision %d provided by platform\n", sprom->revision); diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c index d104b437842..fa763dd9a4b 100644 --- a/drivers/staging/iio/adc/ad7606_core.c +++ b/drivers/staging/iio/adc/ad7606_core.c @@ -185,7 +185,7 @@ static ssize_t ad7606_store_oversampling_ratio(struct device *dev, mutex_lock(&indio_dev->mlock); gpio_set_value(st->pdata->gpio_os0, (ret >> 0) & 1); gpio_set_value(st->pdata->gpio_os1, (ret >> 1) & 1); - gpio_set_value(st->pdata->gpio_os1, (ret >> 2) & 1); + gpio_set_value(st->pdata->gpio_os2, (ret >> 2) & 1); st->oversampling = lval; mutex_unlock(&indio_dev->mlock); diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c index c699a3058b3..cfffdd20e43 100644 --- a/drivers/staging/vt6656/hostap.c +++ b/drivers/staging/vt6656/hostap.c @@ -133,7 +133,8 @@ static int hostap_disable_hostapd(struct vnt_private *pDevice, int rtnl_locked) DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n", pDevice->dev->name, pDevice->apdev->name); } - free_netdev(pDevice->apdev); + if (pDevice->apdev) + free_netdev(pDevice->apdev); pDevice->apdev = NULL; pDevice->bEnable8021x = false; pDevice->bEnableHostWEP = false; diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 30be6c9bdbc..ff3ca598e53 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -805,22 +805,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param) } else if (IS_TYPE_NUMBER(param)) { if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) SET_PSTATE_REPLY_OPTIONAL(param); - /* - * The GlobalSAN iSCSI Initiator for MacOSX does - * not respond to MaxBurstLength, FirstBurstLength, - * DefaultTime2Wait or DefaultTime2Retain parameter keys. - * So, we set them to 'reply optional' here, and assume the - * the defaults from iscsi_parameters.h if the initiator - * is not RFC compliant and the keys are not negotiated. - */ - if (!strcmp(param->name, MAXBURSTLENGTH)) - SET_PSTATE_REPLY_OPTIONAL(param); - if (!strcmp(param->name, FIRSTBURSTLENGTH)) - SET_PSTATE_REPLY_OPTIONAL(param); - if (!strcmp(param->name, DEFAULTTIME2WAIT)) - SET_PSTATE_REPLY_OPTIONAL(param); - if (!strcmp(param->name, DEFAULTTIME2RETAIN)) - SET_PSTATE_REPLY_OPTIONAL(param); /* * Required for gPXE iSCSI boot client */ diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index 75a4e83842c..a6801e8a811 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -256,7 +256,6 @@ int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_gro iscsi_release_param_list(tpg->param_list); tpg->param_list = NULL; } - kfree(tpg); return -ENOMEM; } diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 016e882356d..eeeea38d4b2 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -722,21 +722,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) { struct se_cmd *se_cmd = NULL; int rc; + bool op_scsi = false; /* * Determine if a struct se_cmd is associated with * this struct iscsi_cmd. */ switch (cmd->iscsi_opcode) { case ISCSI_OP_SCSI_CMD: - se_cmd = &cmd->se_cmd; - __iscsit_free_cmd(cmd, true, shutdown); + op_scsi = true; /* * Fallthrough */ case ISCSI_OP_SCSI_TMFUNC: - rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); - if (!rc && shutdown && se_cmd && se_cmd->se_sess) { - __iscsit_free_cmd(cmd, true, shutdown); + se_cmd = &cmd->se_cmd; + __iscsit_free_cmd(cmd, op_scsi, shutdown); + rc = transport_generic_free_cmd(se_cmd, shutdown); + if (!rc && shutdown && se_cmd->se_sess) { + __iscsit_free_cmd(cmd, op_scsi, shutdown); target_put_sess_cmd(se_cmd->se_sess, se_cmd); } break; diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 244776bec1c..79fed114a2e 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -157,7 +157,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, buf = kzalloc(12, GFP_KERNEL); if (!buf) - return; + goto out_free; memset(cdb, 0, MAX_COMMAND_SIZE); cdb[0] = MODE_SENSE; @@ -172,9 +172,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, * If MODE_SENSE still returns zero, set the default value to 1024. */ sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); +out_free: if (!sdev->sector_size) sdev->sector_size = 1024; -out_free: + kfree(buf); } @@ -317,9 +318,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, sd->lun, sd->queue_depth); } - dev->dev_attrib.hw_block_size = sd->sector_size; + dev->dev_attrib.hw_block_size = + min_not_zero((int)sd->sector_size, 512); dev->dev_attrib.hw_max_sectors = - min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); + min_not_zero((unsigned)sd->host->max_sectors, queue_max_hw_sectors(q)); dev->dev_attrib.hw_queue_depth = sd->queue_depth; /* @@ -342,8 +344,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, /* * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. */ - if (sd->type == TYPE_TAPE) + if (sd->type == TYPE_TAPE) { pscsi_tape_read_blocksize(dev, sd); + dev->dev_attrib.hw_block_size = sd->sector_size; + } return 0; } @@ -409,7 +413,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) /* * Called with struct Scsi_Host->host_lock called. */ -static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) +static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd) __releases(sh->host_lock) { struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; @@ -436,28 +440,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) return 0; } -/* - * Called with struct Scsi_Host->host_lock called. - */ -static int pscsi_create_type_other(struct se_device *dev, - struct scsi_device *sd) - __releases(sh->host_lock) -{ - struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; - struct Scsi_Host *sh = sd->host; - int ret; - - spin_unlock_irq(sh->host_lock); - ret = pscsi_add_device_to_list(dev, sd); - if (ret) - return ret; - - pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", - phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, - sd->channel, sd->id, sd->lun); - return 0; -} - static int pscsi_configure_device(struct se_device *dev) { struct se_hba *hba = dev->se_hba; @@ -545,11 +527,8 @@ static int pscsi_configure_device(struct se_device *dev) case TYPE_DISK: ret = pscsi_create_type_disk(dev, sd); break; - case TYPE_ROM: - ret = pscsi_create_type_rom(dev, sd); - break; default: - ret = pscsi_create_type_other(dev, sd); + ret = pscsi_create_type_nondisk(dev, sd); break; } @@ -606,8 +585,7 @@ static void pscsi_free_device(struct se_device *dev) else if (pdv->pdv_lld_host) scsi_host_put(pdv->pdv_lld_host); - if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) - scsi_device_put(sd); + scsi_device_put(sd); pdv->pdv_sd = NULL; } @@ -1125,7 +1103,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev) if (pdv->pdv_bd && pdv->pdv_bd->bd_part) return pdv->pdv_bd->bd_part->nr_sects; - dump_stack(); return 0; } diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c index d6080c3831e..ce2e5d508fe 100644 --- a/drivers/tty/nozomi.c +++ b/drivers/tty/nozomi.c @@ -823,7 +823,7 @@ static int receive_data(enum port_type index, struct nozomi *dc) struct tty_struct *tty = tty_port_tty_get(&port->port); int i, ret; - read_mem32((u32 *) &size, addr, 4); + size = __le32_to_cpu(readl(addr)); /* DBG1( "%d bytes port: %d", size, index); */ if (tty && test_bit(TTY_THROTTLED, &tty->flags)) { diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 98b8423793f..9243dd729dd 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -55,6 +55,7 @@ struct serial_private { unsigned int nr; void __iomem *remapped_bar[PCI_NUM_BAR_RESOURCES]; struct pci_serial_quirk *quirk; + const struct pciserial_board *board; int line[0]; }; @@ -3374,6 +3375,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board) } } priv->nr = i; + priv->board = board; return priv; err_deinit: @@ -3384,7 +3386,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board) } EXPORT_SYMBOL_GPL(pciserial_init_ports); -void pciserial_remove_ports(struct serial_private *priv) +void pciserial_detach_ports(struct serial_private *priv) { struct pci_serial_quirk *quirk; int i; @@ -3404,7 +3406,11 @@ void pciserial_remove_ports(struct serial_private *priv) quirk = find_quirk(priv->dev); if (quirk->exit) quirk->exit(priv->dev); +} +void pciserial_remove_ports(struct serial_private *priv) +{ + pciserial_detach_ports(priv); kfree(priv); } EXPORT_SYMBOL_GPL(pciserial_remove_ports); @@ -4943,7 +4949,7 @@ static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev, return PCI_ERS_RESULT_DISCONNECT; if (priv) - pciserial_suspend_ports(priv); + pciserial_detach_ports(priv); pci_disable_device(dev); @@ -4968,9 +4974,18 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev) static void serial8250_io_resume(struct pci_dev *dev) { struct serial_private *priv = pci_get_drvdata(dev); + const struct pciserial_board *board; - if (priv) - pciserial_resume_ports(priv); + if (!priv) + return; + + board = priv->board; + kfree(priv); + priv = pciserial_init_ports(dev, board); + + if (!IS_ERR(priv)) { + pci_set_drvdata(dev, priv); + } } static const struct pci_error_handlers serial8250_err_handler = { diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 2f2d4ebe429..ba809af3700 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -1163,6 +1163,7 @@ static struct dev_pm_ops msm_serial_dev_pm_ops = { .runtime_suspend = msm_serial_runtime_suspend, .runtime_resume = msm_serial_runtime_resume, }; +MODULE_DEVICE_TABLE(of, msm_match_table); static struct platform_driver msm_platform_driver = { .remove = msm_serial_remove, diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index b51c15408ff..602c9a74bec 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -881,8 +881,8 @@ static const struct input_device_id sysrq_ids[] = { { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, - .evbit = { BIT_MASK(EV_KEY) }, - .keybit = { BIT_MASK(KEY_LEFTALT) }, + .evbit = { [BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY) }, + .keybit = { [BIT_WORD(KEY_LEFTALT)] = BIT_MASK(KEY_LEFTALT) }, }, { }, }; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 77f8dbaaeaf..f3adb78b61c 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -543,19 +543,18 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty) acm->control->needs_remote_wakeup = 1; acm->ctrlurb->dev = acm->dev; - if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) { + retval = usb_submit_urb(acm->ctrlurb, GFP_KERNEL); + if (retval) { dev_err(&acm->control->dev, "%s - usb_submit_urb(ctrl irq) failed\n", __func__); goto error_submit_urb; } acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS; - if (acm_set_control(acm, acm->ctrlout) < 0 && - (acm->ctrl_caps & USB_CDC_CAP_LINE)) + retval = acm_set_control(acm, acm->ctrlout); + if (retval < 0 && (acm->ctrl_caps & USB_CDC_CAP_LINE)) goto error_set_control; - usb_autopm_put_interface(acm->control); - /* * Unthrottle device in case the TTY was closed while throttled. */ @@ -564,9 +563,12 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty) acm->throttle_req = 0; spin_unlock_irq(&acm->read_lock); - if (acm_submit_read_urbs(acm, GFP_KERNEL)) + retval = acm_submit_read_urbs(acm, GFP_KERNEL); + if (retval) goto error_submit_read_urbs; + usb_autopm_put_interface(acm->control); + mutex_unlock(&acm->mutex); return 0; @@ -583,7 +585,8 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty) error_get_interface: disconnected: mutex_unlock(&acm->mutex); - return retval; + + return usb_translate_errors(retval); } static void acm_port_destruct(struct tty_port *port) diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 4c5506ae5e4..64317898a7c 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -989,7 +989,7 @@ static int usbtmc_probe(struct usb_interface *intf, dev_dbg(&intf->dev, "%s called\n", __func__); - data = kmalloc(sizeof(struct usbtmc_device_data), GFP_KERNEL); + data = kzalloc(sizeof(struct usbtmc_device_data), GFP_KERNEL); if (!data) { dev_err(&intf->dev, "Unable to allocate kernel memory\n"); return -ENOMEM; @@ -1035,6 +1035,12 @@ static int usbtmc_probe(struct usb_interface *intf, } } + if (!data->bulk_out || !data->bulk_in) { + dev_err(&intf->dev, "bulk endpoints not found\n"); + retcode = -ENODEV; + goto err_put; + } + retcode = get_capabilities(data); if (retcode) dev_err(&intf->dev, "can't read capabilities\n"); @@ -1058,6 +1064,7 @@ static int usbtmc_probe(struct usb_interface *intf, error_register: sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp); sysfs_remove_group(&intf->dev.kobj, &data_attr_grp); +err_put: kref_put(&data->kref, usbtmc_delete); return retcode; } diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 3252bb2dcb8..d6481cb469c 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -207,6 +207,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, if (ifp->desc.bNumEndpoints >= num_ep) goto skip_to_next_endpoint_or_interface_descriptor; + /* Check for duplicate endpoint addresses */ + for (i = 0; i < ifp->desc.bNumEndpoints; ++i) { + if (ifp->endpoint[i].desc.bEndpointAddress == + d->bEndpointAddress) { + dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", + cfgno, inum, asnum, d->bEndpointAddress); + goto skip_to_next_endpoint_or_interface_descriptor; + } + } + endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; ++ifp->desc.bNumEndpoints; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index d75940c98c6..b951d196974 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -2660,8 +2660,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1, if (ret < 0) return ret; - /* The port state is unknown until the reset completes. */ - if (!(portstatus & USB_PORT_STAT_RESET)) + /* + * The port state is unknown until the reset completes. + * + * On top of that, some chips may require additional time + * to re-establish a connection after the reset is complete, + * so also wait for the connection to be re-established. + */ + if (!(portstatus & USB_PORT_STAT_RESET) && + (portstatus & USB_PORT_STAT_CONNECTION)) break; /* switch to the long delay after two short delay failures */ diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 60ffe1a160f..2d185d398a0 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -292,6 +292,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, int status) { struct dwc3 *dwc = dep->dwc; + unsigned int unmap_after_complete = false; int i; if (req->queued) { @@ -325,11 +326,19 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, if (req->request.status == -EINPROGRESS) req->request.status = status; - if (dwc->ep0_bounced && dep->number == 0) + /* + * NOTICE we don't want to unmap before calling ->complete() if we're + * dealing with a bounced ep0 request. If we unmap it here, we would end + * up overwritting the contents of req->buf and this could confuse the + * gadget driver. + */ + if (dwc->ep0_bounced && dep->number <= 1) { dwc->ep0_bounced = false; - else - usb_gadget_unmap_request(&dwc->gadget, &req->request, - req->direction); + unmap_after_complete = true; + } else { + usb_gadget_unmap_request(&dwc->gadget, + &req->request, req->direction); + } dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", req, dep->name, req->request.actual, @@ -339,6 +348,10 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, spin_unlock(&dwc->lock); req->request.complete(&dep->endpoint, &req->request); spin_lock(&dwc->lock); + + if (unmap_after_complete) + usb_gadget_unmap_request(&dwc->gadget, + &req->request, req->direction); } static const char *dwc3_gadget_ep_cmd_string(u8 cmd) diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h index 80870932e40..9297cd0473e 100644 --- a/drivers/usb/dwc3/gadget.h +++ b/drivers/usb/dwc3/gadget.h @@ -48,23 +48,23 @@ struct dwc3; #define gadget_to_dwc(g) (container_of(g, struct dwc3, gadget)) /* DEPCFG parameter 1 */ -#define DWC3_DEPCFG_INT_NUM(n) ((n) << 0) +#define DWC3_DEPCFG_INT_NUM(n) (((n) & 0x1f) << 0) #define DWC3_DEPCFG_XFER_COMPLETE_EN (1 << 8) #define DWC3_DEPCFG_XFER_IN_PROGRESS_EN (1 << 9) #define DWC3_DEPCFG_XFER_NOT_READY_EN (1 << 10) #define DWC3_DEPCFG_FIFO_ERROR_EN (1 << 11) #define DWC3_DEPCFG_STREAM_EVENT_EN (1 << 13) -#define DWC3_DEPCFG_BINTERVAL_M1(n) ((n) << 16) +#define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16) #define DWC3_DEPCFG_STREAM_CAPABLE (1 << 24) -#define DWC3_DEPCFG_EP_NUMBER(n) ((n) << 25) +#define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25) #define DWC3_DEPCFG_BULK_BASED (1 << 30) #define DWC3_DEPCFG_FIFO_BASED (1 << 31) /* DEPCFG parameter 0 */ -#define DWC3_DEPCFG_EP_TYPE(n) ((n) << 1) -#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) ((n) << 3) -#define DWC3_DEPCFG_FIFO_NUMBER(n) ((n) << 17) -#define DWC3_DEPCFG_BURST_SIZE(n) ((n) << 22) +#define DWC3_DEPCFG_EP_TYPE(n) (((n) & 0x3) << 1) +#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) (((n) & 0x7ff) << 3) +#define DWC3_DEPCFG_FIFO_NUMBER(n) (((n) & 0x1f) << 17) +#define DWC3_DEPCFG_BURST_SIZE(n) (((n) & 0xf) << 22) #define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26) /* This applies for core versions earlier than 1.94a */ #define DWC3_DEPCFG_IGN_SEQ_NUM (1 << 31) diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 68ca079606f..5b8cda00407 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -125,11 +125,16 @@ int config_ep_by_speed(struct usb_gadget *g, ep_found: /* commit results */ - _ep->maxpacket = usb_endpoint_maxp(chosen_desc); + _ep->maxpacket = usb_endpoint_maxp(chosen_desc) & 0x7ff; _ep->desc = chosen_desc; _ep->comp_desc = NULL; _ep->maxburst = 0; - _ep->mult = 0; + _ep->mult = 1; + + if (g->speed == USB_SPEED_HIGH && (usb_endpoint_xfer_isoc(_ep->desc) || + usb_endpoint_xfer_int(_ep->desc))) + _ep->mult = ((usb_endpoint_maxp(_ep->desc) & 0x1800) >> 11) + 1; + if (!want_comp_desc) return 0; @@ -146,7 +151,7 @@ int config_ep_by_speed(struct usb_gadget *g, switch (usb_endpoint_type(_ep->desc)) { case USB_ENDPOINT_XFER_ISOC: /* mult: bits 1:0 of bmAttributes */ - _ep->mult = comp_desc->bmAttributes & 0x3; + _ep->mult = (comp_desc->bmAttributes & 0x3) + 1; case USB_ENDPOINT_XFER_BULK: case USB_ENDPOINT_XFER_INT: _ep->maxburst = comp_desc->bMaxBurst + 1; @@ -1479,9 +1484,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) value = min(w_length, (u16) 1); break; - /* function drivers must handle get/set altsetting; if there's - * no get() method, we know only altsetting zero works. - */ + /* function drivers must handle get/set altsetting */ case USB_REQ_SET_INTERFACE: if (ctrl->bRequestType != USB_RECIP_INTERFACE) goto unknown; @@ -1490,7 +1493,13 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) f = cdev->config->interface[intf]; if (!f) break; - if (w_value && !f->set_alt) + + /* + * If there's no get_alt() method, we know only altsetting zero + * works. There is no need to check if set_alt() is not NULL + * as we check this in usb_add_function(). + */ + if (w_value && !f->get_alt) break; /* * We put interfaces in default settings (alt 0) diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c index ac0e79e2c2e..644c1053d65 100644 --- a/drivers/usb/gadget/dummy_hcd.c +++ b/drivers/usb/gadget/dummy_hcd.c @@ -266,7 +266,7 @@ static void nuke(struct dummy *dum, struct dummy_ep *ep) /* caller must hold lock */ static void stop_activity(struct dummy *dum) { - struct dummy_ep *ep; + int i; /* prevent any more requests */ dum->address = 0; @@ -274,8 +274,8 @@ static void stop_activity(struct dummy *dum) /* The timer is left running so that outstanding URBs can fail */ /* nuke any pending requests first, so driver i/o is quiesced */ - list_for_each_entry(ep, &dum->gadget.ep_list, ep.ep_list) - nuke(dum, ep); + for (i = 0; i < DUMMY_ENDPOINTS; ++i) + nuke(dum, &dum->ep[i]); /* driver now does any non-usb quiescing necessary */ } diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c index 5024683df8a..4824d84ada4 100644 --- a/drivers/usb/gadget/f_acm.c +++ b/drivers/usb/gadget/f_acm.c @@ -638,13 +638,15 @@ static int acm_notify_serial_state(struct f_acm *acm) { struct usb_composite_dev *cdev = acm->port.func.config->cdev; int status; + __le16 serial_state; spin_lock(&acm->lock); if (acm->notify_req) { DBG(cdev, "acm ttyGS%d serial state %04x\n", acm->port_num, acm->serial_state); + serial_state = cpu_to_le16(acm->serial_state); status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE, - 0, &acm->serial_state, sizeof(acm->serial_state)); + 0, &serial_state, sizeof(acm->serial_state)); } else { acm->pending = true; status = 0; diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c index 42a30903d4f..c9e552bdf05 100644 --- a/drivers/usb/gadget/inode.c +++ b/drivers/usb/gadget/inode.c @@ -1200,7 +1200,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) /* data and/or status stage for control request */ } else if (dev->state == STATE_DEV_SETUP) { - /* IN DATA+STATUS caller makes len <= wLength */ + len = min_t(size_t, len, dev->setup_wLength); if (dev->setup_in) { retval = setup_req (dev->gadget->ep0, dev->req, len); if (retval == 0) { @@ -1834,10 +1834,12 @@ static struct usb_gadget_driver probe_driver = { * such as configuration notifications. */ -static int is_valid_config (struct usb_config_descriptor *config) +static int is_valid_config(struct usb_config_descriptor *config, + unsigned int total) { return config->bDescriptorType == USB_DT_CONFIG && config->bLength == USB_DT_CONFIG_SIZE + && total >= USB_DT_CONFIG_SIZE && config->bConfigurationValue != 0 && (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0 && (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0; @@ -1854,7 +1856,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) u32 tag; char *kbuf; - if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) + if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) || + (len > PAGE_SIZE * 4)) return -EINVAL; /* we might need to change message format someday */ @@ -1878,7 +1881,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) /* full or low speed config */ dev->config = (void *) kbuf; total = le16_to_cpu(dev->config->wTotalLength); - if (!is_valid_config (dev->config) || total >= length) + if (!is_valid_config(dev->config, total) || + total > length - USB_DT_DEVICE_SIZE) goto fail; kbuf += total; length -= total; @@ -1887,10 +1891,13 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) if (kbuf [1] == USB_DT_CONFIG) { dev->hs_config = (void *) kbuf; total = le16_to_cpu(dev->hs_config->wTotalLength); - if (!is_valid_config (dev->hs_config) || total >= length) + if (!is_valid_config(dev->hs_config, total) || + total > length - USB_DT_DEVICE_SIZE) goto fail; kbuf += total; length -= total; + } else { + dev->hs_config = NULL; } /* could support multiple configs, using another encoding! */ diff --git a/drivers/usb/gadget/uvc_video.c b/drivers/usb/gadget/uvc_video.c index 71e896d4c5a..43e8c65fd9e 100644 --- a/drivers/usb/gadget/uvc_video.c +++ b/drivers/usb/gadget/uvc_video.c @@ -240,7 +240,7 @@ uvc_video_alloc_requests(struct uvc_video *video) req_size = video->ep->maxpacket * max_t(unsigned int, video->ep->maxburst, 1) - * (video->ep->mult + 1); + * (video->ep->mult); for (i = 0; i < UVC_NUM_REQUESTS; ++i) { video->req_buffer[i] = kmalloc(req_size, GFP_KERNEL); diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index 1e1563da181..7f93dc26fb1 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c @@ -927,10 +927,6 @@ finish_unlinks (struct ohci_hcd *ohci, u16 tick) int completed, modified; __hc32 *prev; - /* Is this ED already invisible to the hardware? */ - if (ed->state == ED_IDLE) - goto ed_idle; - /* only take off EDs that the HC isn't using, accounting for * frame counter wraps and EDs with partially retired TDs */ @@ -961,14 +957,12 @@ finish_unlinks (struct ohci_hcd *ohci, u16 tick) } /* ED's now officially unlinked, hc doesn't see */ - ed->state = ED_IDLE; if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT) ohci->eds_scheduled--; ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); ed->hwNextED = 0; wmb(); ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE); -ed_idle: /* reentrancy: if we drop the schedule lock, someone might * have modified this list. normally it's just prepending @@ -1039,6 +1033,7 @@ finish_unlinks (struct ohci_hcd *ohci, u16 tick) if (list_empty(&ed->td_list)) { *last = ed->ed_next; ed->ed_next = NULL; + ed->state = ED_IDLE; } else if (ohci->rh_state == OHCI_RH_RUNNING) { *last = ed->ed_next; ed->ed_next = NULL; diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c index 0f228c46eed..ad458ef4b7e 100644 --- a/drivers/usb/host/uhci-pci.c +++ b/drivers/usb/host/uhci-pci.c @@ -129,6 +129,10 @@ static int uhci_pci_init(struct usb_hcd *hcd) if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_HP) uhci->wait_for_hp = 1; + /* Intel controllers use non-PME wakeup signalling */ + if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL) + device_set_run_wake(uhci_dev(uhci), 1); + /* Set up pointers to PCI-specific functions */ uhci->reset_hc = uhci_pci_reset_hc; uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc; diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 7bb78ff42b5..c77b719d0d9 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -943,6 +943,40 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) xhci->devs[slot_id] = NULL; } +/* + * Free a virt_device structure. + * If the virt_device added a tt_info (a hub) and has children pointing to + * that tt_info, then free the child first. Recursive. + * We can't rely on udev at this point to find child-parent relationships. + */ +void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id) +{ + struct xhci_virt_device *vdev; + struct list_head *tt_list_head; + struct xhci_tt_bw_info *tt_info, *next; + int i; + + vdev = xhci->devs[slot_id]; + if (!vdev) + return; + + tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts); + list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { + /* is this a hub device that added a tt_info to the tts list */ + if (tt_info->slot_id == slot_id) { + /* are any devices using this tt_info? */ + for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) { + vdev = xhci->devs[i]; + if (vdev && (vdev->tt_info == tt_info)) + xhci_free_virt_devices_depth_first( + xhci, i); + } + } + } + /* we are now at a leaf device */ + xhci_free_virt_device(xhci, slot_id); +} + int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags) { @@ -1822,8 +1856,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) } } - for (i = 1; i < MAX_HC_SLOTS; ++i) - xhci_free_virt_device(xhci, i); + for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--) + xhci_free_virt_devices_depth_first(xhci, i); if (xhci->segment_pool) dma_pool_destroy(xhci->segment_pool); @@ -2324,7 +2358,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) * "physically contiguous and 64-byte (cache line) aligned". */ xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, - GFP_KERNEL); + flags); if (!xhci->dcbaa) goto fail; memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); @@ -2415,7 +2449,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) xhci->erst.entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma, - GFP_KERNEL); + flags); if (!xhci->erst.entries) goto fail; xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n", diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 2320e20d5be..cae9881145f 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -224,6 +224,7 @@ static void xhci_pci_remove(struct pci_dev *dev) struct xhci_hcd *xhci; xhci = hcd_to_xhci(pci_get_drvdata(dev)); + xhci->xhc_state |= XHCI_STATE_REMOVING; if (xhci->shared_hcd) { usb_remove_hcd(xhci->shared_hcd); usb_put_hcd(xhci->shared_hcd); diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 4de2d185817..67a8c7a3532 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -205,7 +205,7 @@ static int xhci_plat_remove(struct platform_device *dev) struct usb_hcd *hcd = platform_get_drvdata(dev); struct xhci_hcd *xhci = hcd_to_xhci(hcd); - pm_runtime_disable(&dev->dev); + xhci->xhc_state |= XHCI_STATE_REMOVING; usb_remove_hcd(xhci->shared_hcd); usb_put_hcd(xhci->shared_hcd); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 535e5be3d1e..420a688f3ac 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -147,7 +147,8 @@ int xhci_start(struct xhci_hcd *xhci) "waited %u microseconds.\n", XHCI_MAX_HALT_USEC); if (!ret) - xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING); + /* clear state flags. Including dying, halted or removing */ + xhci->xhc_state = 0; return ret; } @@ -2710,7 +2711,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) if (ret <= 0) return ret; xhci = hcd_to_xhci(hcd); - if (xhci->xhc_state & XHCI_STATE_DYING) + if ((xhci->xhc_state & XHCI_STATE_DYING) || + (xhci->xhc_state & XHCI_STATE_REMOVING)) return -ENODEV; xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 05b80792ff4..0653f7bfd60 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1497,6 +1497,7 @@ struct xhci_hcd { */ #define XHCI_STATE_DYING (1 << 0) #define XHCI_STATE_HALTED (1 << 1) +#define XHCI_STATE_REMOVING (1 << 2) /* Statistics */ int error_bitmask; unsigned int quirks; diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c index ce978384fda..3b885c61b73 100644 --- a/drivers/usb/misc/idmouse.c +++ b/drivers/usb/misc/idmouse.c @@ -347,6 +347,9 @@ static int idmouse_probe(struct usb_interface *interface, if (iface_desc->desc.bInterfaceClass != 0x0A) return -ENODEV; + if (iface_desc->desc.bNumEndpoints < 1) + return -ENODEV; + /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index 4c24ba0a657..05aa716cf6b 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -792,12 +792,6 @@ static int iowarrior_probe(struct usb_interface *interface, iface_desc = interface->cur_altsetting; dev->product_id = le16_to_cpu(udev->descriptor.idProduct); - if (iface_desc->desc.bNumEndpoints < 1) { - dev_err(&interface->dev, "Invalid number of endpoints\n"); - retval = -EINVAL; - goto error; - } - /* set up the endpoint information */ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; @@ -808,6 +802,21 @@ static int iowarrior_probe(struct usb_interface *interface, /* this one will match for the IOWarrior56 only */ dev->int_out_endpoint = endpoint; } + + if (!dev->int_in_endpoint) { + dev_err(&interface->dev, "no interrupt-in endpoint found\n"); + retval = -ENODEV; + goto error; + } + + if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) { + if (!dev->int_out_endpoint) { + dev_err(&interface->dev, "no interrupt-out endpoint found\n"); + retval = -ENODEV; + goto error; + } + } + /* we have to check the report_size often, so remember it in the endianness suitable for our machine */ dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint); if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) && diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index e129cf66122..20d7e5312f0 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c @@ -709,6 +709,11 @@ static int uss720_probe(struct usb_interface *intf, interface = intf->cur_altsetting; + if (interface->desc.bNumEndpoints < 3) { + usb_put_dev(usbdev); + return -ENODEV; + } + /* * Allocate parport interface */ diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h index f7b13fd2525..a3dcbd55e43 100644 --- a/drivers/usb/musb/musbhsdma.h +++ b/drivers/usb/musb/musbhsdma.h @@ -157,5 +157,5 @@ struct musb_dma_controller { void __iomem *base; u8 channel_count; u8 used_channels; - u8 irq; + int irq; }; diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c index 40e7fd94646..62fb2553c77 100644 --- a/drivers/usb/serial/ark3116.c +++ b/drivers/usb/serial/ark3116.c @@ -100,10 +100,17 @@ static int ark3116_read_reg(struct usb_serial *serial, usb_rcvctrlpipe(serial->dev, 0), 0xfe, 0xc0, 0, reg, buf, 1, ARK_TIMEOUT); - if (result < 0) + if (result < 1) { + dev_err(&serial->interface->dev, + "failed to read register %u: %d\n", + reg, result); + if (result >= 0) + result = -EIO; + return result; - else - return buf[0]; + } + + return buf[0]; } static inline int calc_divisor(int bps) diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index c2a4171ab9c..a4e5be5aea4 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -97,6 +97,8 @@ static int ch341_control_out(struct usb_device *dev, u8 request, r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, value, index, NULL, 0, DEFAULT_TIMEOUT); + if (r < 0) + dev_err(&dev->dev, "failed to send control message: %d\n", r); return r; } @@ -114,7 +116,20 @@ static int ch341_control_in(struct usb_device *dev, r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, value, index, buf, bufsize, DEFAULT_TIMEOUT); - return r; + if (r < bufsize) { + if (r >= 0) { + dev_err(&dev->dev, + "short control message received (%d < %u)\n", + r, bufsize); + r = -EIO; + } + + dev_err(&dev->dev, "failed to receive control message: %d\n", + r); + return r; + } + + return 0; } static int ch341_set_baudrate(struct usb_device *dev, @@ -156,9 +171,9 @@ static int ch341_set_handshake(struct usb_device *dev, u8 control) static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv) { + const unsigned int size = 2; char *buffer; int r; - const unsigned size = 8; unsigned long flags; buffer = kmalloc(size, GFP_KERNEL); @@ -169,15 +184,10 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv) if (r < 0) goto out; - /* setup the private status if available */ - if (r == 2) { - r = 0; - spin_lock_irqsave(&priv->lock, flags); - priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT; - priv->multi_status_change = 0; - spin_unlock_irqrestore(&priv->lock, flags); - } else - r = -EPROTO; + spin_lock_irqsave(&priv->lock, flags); + priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT; + priv->multi_status_change = 0; + spin_unlock_irqrestore(&priv->lock, flags); out: kfree(buffer); return r; @@ -187,9 +197,9 @@ out: kfree(buffer); static int ch341_configure(struct usb_device *dev, struct ch341_private *priv) { + const unsigned int size = 2; char *buffer; int r; - const unsigned size = 8; buffer = kmalloc(size, GFP_KERNEL); if (!buffer) @@ -252,7 +262,6 @@ static int ch341_port_probe(struct usb_serial_port *port) spin_lock_init(&priv->lock); priv->baud_rate = DEFAULT_BAUD_RATE; - priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR; r = ch341_configure(port->serial->dev, priv); if (r < 0) @@ -316,15 +325,15 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port) r = ch341_configure(serial->dev, priv); if (r) - goto out; + return r; r = ch341_set_handshake(serial->dev, priv->line_control); if (r) - goto out; + return r; r = ch341_set_baudrate(serial->dev, priv); if (r) - goto out; + return r; dev_dbg(&port->dev, "%s - submitting interrupt urb", __func__); r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); @@ -332,12 +341,19 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port) dev_err(&port->dev, "%s - failed submitting interrupt urb," " error %d\n", __func__, r); ch341_close(port); - goto out; + return r; } r = usb_serial_generic_open(tty, port); + if (r) + goto err_kill_interrupt_urb; -out: return r; + return 0; + +err_kill_interrupt_urb: + usb_kill_urb(port->interrupt_in_urb); + + return r; } /* Old_termios contains the original termios settings and @@ -352,26 +368,25 @@ static void ch341_set_termios(struct tty_struct *tty, baud_rate = tty_get_baud_rate(tty); - priv->baud_rate = baud_rate; - if (baud_rate) { - spin_lock_irqsave(&priv->lock, flags); - priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS); - spin_unlock_irqrestore(&priv->lock, flags); + priv->baud_rate = baud_rate; ch341_set_baudrate(port->serial->dev, priv); - } else { - spin_lock_irqsave(&priv->lock, flags); - priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS); - spin_unlock_irqrestore(&priv->lock, flags); } - ch341_set_handshake(port->serial->dev, priv->line_control); - /* Unimplemented: * (cflag & CSIZE) : data bits [5, 8] * (cflag & PARENB) : parity {NONE, EVEN, ODD} * (cflag & CSTOPB) : stop bits [1, 2] */ + + spin_lock_irqsave(&priv->lock, flags); + if (C_BAUD(tty) == B0) + priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS); + else if (old_termios && (old_termios->c_cflag & CBAUD) == B0) + priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS); + spin_unlock_irqrestore(&priv->lock, flags); + + ch341_set_handshake(port->serial->dev, priv->line_control); } static void ch341_break_ctl(struct tty_struct *tty, int break_state) @@ -570,14 +585,23 @@ static int ch341_tiocmget(struct tty_struct *tty) static int ch341_reset_resume(struct usb_serial *serial) { - struct ch341_private *priv; - - priv = usb_get_serial_port_data(serial->port[0]); + struct usb_serial_port *port = serial->port[0]; + struct ch341_private *priv = usb_get_serial_port_data(port); + int ret; /* reconfigure ch341 serial port after bus-reset */ ch341_configure(serial->dev, priv); - return 0; + if (test_bit(ASYNCB_INITIALIZED, &port->port.flags)) { + ret = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO); + if (ret) { + dev_err(&port->dev, "failed to submit interrupt urb: %d\n", + ret); + return ret; + } + } + + return usb_serial_generic_resume(serial); } static struct usb_serial_driver ch341_device = { diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c index 781426230d6..bb3c7f09f05 100644 --- a/drivers/usb/serial/cyberjack.c +++ b/drivers/usb/serial/cyberjack.c @@ -51,6 +51,7 @@ #define CYBERJACK_PRODUCT_ID 0x0100 /* Function prototypes */ +static int cyberjack_attach(struct usb_serial *serial); static int cyberjack_port_probe(struct usb_serial_port *port); static int cyberjack_port_remove(struct usb_serial_port *port); static int cyberjack_open(struct tty_struct *tty, @@ -78,6 +79,7 @@ static struct usb_serial_driver cyberjack_device = { .description = "Reiner SCT Cyberjack USB card reader", .id_table = id_table, .num_ports = 1, + .attach = cyberjack_attach, .port_probe = cyberjack_port_probe, .port_remove = cyberjack_port_remove, .open = cyberjack_open, @@ -101,6 +103,14 @@ struct cyberjack_private { short wrsent; /* Data already sent */ }; +static int cyberjack_attach(struct usb_serial *serial) +{ + if (serial->num_bulk_out < serial->num_ports) + return -ENODEV; + + return 0; +} + static int cyberjack_port_probe(struct usb_serial_port *port) { struct cyberjack_private *priv; diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c index 8c34d9cfb22..e8d7c1beae8 100644 --- a/drivers/usb/serial/digi_acceleport.c +++ b/drivers/usb/serial/digi_acceleport.c @@ -1489,16 +1489,20 @@ static int digi_read_oob_callback(struct urb *urb) struct usb_serial *serial = port->serial; struct tty_struct *tty; struct digi_port *priv = usb_get_serial_port_data(port); + unsigned char *buf = urb->transfer_buffer; int opcode, line, status, val; int i; unsigned int rts; + if (urb->actual_length < 4) + return -1; + /* handle each oob command */ - for (i = 0; i < urb->actual_length - 3;) { - opcode = ((unsigned char *)urb->transfer_buffer)[i++]; - line = ((unsigned char *)urb->transfer_buffer)[i++]; - status = ((unsigned char *)urb->transfer_buffer)[i++]; - val = ((unsigned char *)urb->transfer_buffer)[i++]; + for (i = 0; i < urb->actual_length - 3; i += 4) { + opcode = buf[i]; + line = buf[i + 1]; + status = buf[i + 2]; + val = buf[i + 3]; dev_dbg(&port->dev, "digi_read_oob_callback: opcode=%d, line=%d, status=%d, val=%d\n", opcode, line, status, val); diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 4e865664699..ce884f7434b 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1813,8 +1813,6 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port) mutex_init(&priv->cfg_lock); - priv->flags = ASYNC_LOW_LATENCY; - if (quirk && quirk->port_probe) quirk->port_probe(priv); @@ -2091,6 +2089,20 @@ static int ftdi_process_packet(struct usb_serial_port *port, priv->prev_status = status; } + /* save if the transmitter is empty or not */ + if (packet[1] & FTDI_RS_TEMT) + priv->transmit_empty = 1; + else + priv->transmit_empty = 0; + + len -= 2; + if (!len) + return 0; /* status only */ + + /* + * Break and error status must only be processed for packets with + * data payload to avoid over-reporting. + */ flag = TTY_NORMAL; if (packet[1] & FTDI_RS_ERR_MASK) { /* Break takes precedence over parity, which takes precedence @@ -2113,15 +2125,6 @@ static int ftdi_process_packet(struct usb_serial_port *port, } } - /* save if the transmitter is empty or not */ - if (packet[1] & FTDI_RS_TEMT) - priv->transmit_empty = 1; - else - priv->transmit_empty = 0; - - len -= 2; - if (!len) - return 0; /* status only */ port->icount.rx += len; ch = packet + 2; @@ -2452,8 +2455,12 @@ static int ftdi_get_modem_status(struct usb_serial_port *port, FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE, 0, priv->interface, buf, len, WDR_TIMEOUT); - if (ret < 0) { + + /* NOTE: We allow short responses and handle that below. */ + if (ret < 1) { dev_err(&port->dev, "failed to get modem status: %d\n", ret); + if (ret >= 0) + ret = -EIO; ret = usb_translate_errors(ret); goto out; } diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index b110c573ea8..ea9c4f4aea3 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c @@ -1049,6 +1049,7 @@ static int garmin_write_bulk(struct usb_serial_port *port, "%s - usb_submit_urb(write bulk) failed with status = %d\n", __func__, status); count = status; + kfree(buffer); } /* we are done with this urb, so let the host driver diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index c574d312f1f..9f24fd776ec 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -2795,6 +2795,11 @@ static int edge_startup(struct usb_serial *serial) EDGE_COMPATIBILITY_MASK1, EDGE_COMPATIBILITY_MASK2 }; + if (serial->num_bulk_in < 1 || serial->num_interrupt_in < 1) { + dev_err(&serial->interface->dev, "missing endpoints\n"); + return -ENODEV; + } + dev = serial->dev; /* create our private serial structure */ diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index 8cd6479a8b4..2dd6830fdf7 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -1393,8 +1393,7 @@ static int download_fw(struct edgeport_serial *serial) dev_dbg(dev, "%s - Download successful -- Device rebooting...\n", __func__); - /* return an error on purpose */ - return -ENODEV; + return 1; } stayinbootmode: @@ -1402,7 +1401,7 @@ static int download_fw(struct edgeport_serial *serial) dev_dbg(dev, "%s - STAYING IN BOOT MODE\n", __func__); serial->product_info.TiMode = TI_MODE_BOOT; - return 0; + return 1; } @@ -1575,6 +1574,12 @@ static void edge_interrupt_callback(struct urb *urb) function = TIUMP_GET_FUNC_FROM_CODE(data[0]); dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__, port_number, function, data[1]); + + if (port_number >= edge_serial->serial->num_ports) { + dev_err(dev, "bad port number %d\n", port_number); + goto exit; + } + port = edge_serial->serial->port[port_number]; edge_port = usb_get_serial_port_data(port); if (!edge_port) { @@ -1655,7 +1660,7 @@ static void edge_bulk_in_callback(struct urb *urb) port_number = edge_port->port->number - edge_port->port->serial->minor; - if (edge_port->lsr_event) { + if (urb->actual_length > 0 && edge_port->lsr_event) { edge_port->lsr_event = 0; dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n", __func__, port_number, edge_port->lsr_mask, *data); @@ -2433,6 +2438,13 @@ static int edge_startup(struct usb_serial *serial) struct edgeport_serial *edge_serial; int status; + /* Make sure we have the required endpoints when in download mode. */ + if (serial->interface->cur_altsetting->desc.bNumEndpoints > 1) { + if (serial->num_bulk_in < serial->num_ports || + serial->num_bulk_out < serial->num_ports) + return -ENODEV; + } + /* create our private serial structure */ edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL); if (edge_serial == NULL) { @@ -2444,11 +2456,14 @@ static int edge_startup(struct usb_serial *serial) usb_set_serial_data(serial, edge_serial); status = download_fw(edge_serial); - if (status) { + if (status < 0) { kfree(edge_serial); return status; } + if (status > 0) + return 1; /* bind but do not register any ports */ + return 0; } diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c index 790673e5faa..eadab621361 100644 --- a/drivers/usb/serial/iuu_phoenix.c +++ b/drivers/usb/serial/iuu_phoenix.c @@ -69,6 +69,16 @@ struct iuu_private { u32 clk; }; +static int iuu_attach(struct usb_serial *serial) +{ + unsigned char num_ports = serial->num_ports; + + if (serial->num_bulk_in < num_ports || serial->num_bulk_out < num_ports) + return -ENODEV; + + return 0; +} + static int iuu_port_probe(struct usb_serial_port *port) { struct iuu_private *priv; @@ -1199,6 +1209,7 @@ static struct usb_serial_driver iuu_device = { .tiocmset = iuu_tiocmset, .set_termios = iuu_set_termios, .init_termios = iuu_init_termios, + .attach = iuu_attach, .port_probe = iuu_port_probe, .port_remove = iuu_port_remove, }; diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c index 5f1d382e55c..05c567bf5cf 100644 --- a/drivers/usb/serial/keyspan_pda.c +++ b/drivers/usb/serial/keyspan_pda.c @@ -697,6 +697,19 @@ MODULE_FIRMWARE("keyspan_pda/keyspan_pda.fw"); MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw"); #endif +static int keyspan_pda_attach(struct usb_serial *serial) +{ + unsigned char num_ports = serial->num_ports; + + if (serial->num_bulk_out < num_ports || + serial->num_interrupt_in < num_ports) { + dev_err(&serial->interface->dev, "missing endpoints\n"); + return -ENODEV; + } + + return 0; +} + static int keyspan_pda_port_probe(struct usb_serial_port *port) { @@ -774,6 +787,7 @@ static struct usb_serial_driver keyspan_pda_device = { .break_ctl = keyspan_pda_break_ctl, .tiocmget = keyspan_pda_tiocmget, .tiocmset = keyspan_pda_tiocmset, + .attach = keyspan_pda_attach, .port_probe = keyspan_pda_port_probe, .port_remove = keyspan_pda_port_remove, }; diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c index 1b4054fe52a..b6794baf0a3 100644 --- a/drivers/usb/serial/kl5kusb105.c +++ b/drivers/usb/serial/kl5kusb105.c @@ -198,10 +198,11 @@ static int klsi_105_get_line_state(struct usb_serial_port *port, status_buf, KLSI_STATUSBUF_LEN, 10000 ); - if (rc < 0) - dev_err(&port->dev, "Reading line status failed (error = %d)\n", - rc); - else { + if (rc != KLSI_STATUSBUF_LEN) { + dev_err(&port->dev, "reading line status failed: %d\n", rc); + if (rc >= 0) + rc = -EIO; + } else { status = get_unaligned_le16(status_buf); dev_info(&port->serial->dev->dev, "read status %x %x", @@ -304,7 +305,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) rc = usb_serial_generic_open(tty, port); if (rc) { retval = rc; - goto exit; + goto err_free_cfg; } rc = usb_control_msg(port->serial->dev, @@ -319,21 +320,38 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) if (rc < 0) { dev_err(&port->dev, "Enabling read failed (error = %d)\n", rc); retval = rc; + goto err_generic_close; } else dev_dbg(&port->dev, "%s - enabled reading\n", __func__); rc = klsi_105_get_line_state(port, &line_state); - if (rc >= 0) { - spin_lock_irqsave(&priv->lock, flags); - priv->line_state = line_state; - spin_unlock_irqrestore(&priv->lock, flags); - dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__, line_state); - retval = 0; - } else + if (rc < 0) { retval = rc; + goto err_disable_read; + } + + spin_lock_irqsave(&priv->lock, flags); + priv->line_state = line_state; + spin_unlock_irqrestore(&priv->lock, flags); + dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__, + line_state); + + return 0; -exit: +err_disable_read: + usb_control_msg(port->serial->dev, + usb_sndctrlpipe(port->serial->dev, 0), + KL5KUSB105A_SIO_CONFIGURE, + USB_TYPE_VENDOR | USB_DIR_OUT, + KL5KUSB105A_SIO_CONFIGURE_READ_OFF, + 0, /* index */ + NULL, 0, + KLSI_TIMEOUT); +err_generic_close: + usb_serial_generic_close(port); +err_free_cfg: kfree(cfg); + return retval; } diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c index efa75b4e51f..63fa400a822 100644 --- a/drivers/usb/serial/kobil_sct.c +++ b/drivers/usb/serial/kobil_sct.c @@ -52,6 +52,7 @@ /* Function prototypes */ +static int kobil_attach(struct usb_serial *serial); static int kobil_port_probe(struct usb_serial_port *probe); static int kobil_port_remove(struct usb_serial_port *probe); static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port); @@ -87,6 +88,7 @@ static struct usb_serial_driver kobil_device = { .description = "KOBIL USB smart card terminal", .id_table = id_table, .num_ports = 1, + .attach = kobil_attach, .port_probe = kobil_port_probe, .port_remove = kobil_port_remove, .ioctl = kobil_ioctl, @@ -114,6 +116,16 @@ struct kobil_private { }; +static int kobil_attach(struct usb_serial *serial) +{ + if (serial->num_interrupt_out < serial->num_ports) { + dev_err(&serial->interface->dev, "missing interrupt-out endpoint\n"); + return -ENODEV; + } + + return 0; +} + static int kobil_port_probe(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index ddc71d706ac..2d1ad823b1a 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -66,8 +66,6 @@ struct moschip_port { struct urb *write_urb_pool[NUM_URBS]; }; -static struct usb_serial_driver moschip7720_2port_driver; - #define USB_VENDOR_ID_MOSCHIP 0x9710 #define MOSCHIP_DEVICE_ID_7720 0x7720 #define MOSCHIP_DEVICE_ID_7715 0x7715 @@ -966,25 +964,6 @@ static void mos7720_bulk_out_data_callback(struct urb *urb) tty_port_tty_wakeup(&mos7720_port->port->port); } -/* - * mos77xx_probe - * this function installs the appropriate read interrupt endpoint callback - * depending on whether the device is a 7720 or 7715, thus avoiding costly - * run-time checks in the high-frequency callback routine itself. - */ -static int mos77xx_probe(struct usb_serial *serial, - const struct usb_device_id *id) -{ - if (id->idProduct == MOSCHIP_DEVICE_ID_7715) - moschip7720_2port_driver.read_int_callback = - mos7715_interrupt_callback; - else - moschip7720_2port_driver.read_int_callback = - mos7720_interrupt_callback; - - return 0; -} - static int mos77xx_calc_num_ports(struct usb_serial *serial) { u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); @@ -1917,6 +1896,11 @@ static int mos7720_startup(struct usb_serial *serial) u16 product; int ret_val; + if (serial->num_bulk_in < 2 || serial->num_bulk_out < 2) { + dev_err(&serial->interface->dev, "missing bulk endpoints\n"); + return -ENODEV; + } + product = le16_to_cpu(serial->dev->descriptor.idProduct); dev = serial->dev; @@ -1941,19 +1925,18 @@ static int mos7720_startup(struct usb_serial *serial) tmp->interrupt_in_endpointAddress; serial->port[1]->interrupt_in_urb = NULL; serial->port[1]->interrupt_in_buffer = NULL; + + if (serial->port[0]->interrupt_in_urb) { + struct urb *urb = serial->port[0]->interrupt_in_urb; + + urb->complete = mos7715_interrupt_callback; + } } /* setting configuration feature to one */ usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000); - /* start the interrupt urb */ - ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL); - if (ret_val) - dev_err(&dev->dev, - "%s - Error %d submitting control urb\n", - __func__, ret_val); - #ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT if (product == MOSCHIP_DEVICE_ID_7715) { ret_val = mos7715_parport_init(serial); @@ -1961,6 +1944,13 @@ static int mos7720_startup(struct usb_serial *serial) return ret_val; } #endif + /* start the interrupt urb */ + ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL); + if (ret_val) { + dev_err(&dev->dev, "failed to submit interrupt urb: %d\n", + ret_val); + } + /* LSR For Port 1 */ read_mos_reg(serial, 0, LSR, &data); dev_dbg(&dev->dev, "LSR:%x\n", data); @@ -1970,6 +1960,8 @@ static int mos7720_startup(struct usb_serial *serial) static void mos7720_release(struct usb_serial *serial) { + usb_kill_urb(serial->port[0]->interrupt_in_urb); + #ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT /* close the parallel port */ @@ -2052,7 +2044,6 @@ static struct usb_serial_driver moschip7720_2port_driver = { .close = mos7720_close, .throttle = mos7720_throttle, .unthrottle = mos7720_unthrottle, - .probe = mos77xx_probe, .attach = mos7720_startup, .release = mos7720_release, .port_probe = mos7720_port_probe, @@ -2066,7 +2057,7 @@ static struct usb_serial_driver moschip7720_2port_driver = { .chars_in_buffer = mos7720_chars_in_buffer, .break_ctl = mos7720_break, .read_bulk_callback = mos7720_bulk_in_callback, - .read_int_callback = NULL /* dynamically assigned in probe() */ + .read_int_callback = mos7720_interrupt_callback, }; static struct usb_serial_driver * const serial_drivers[] = { diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 7df7df62e17..3308c43d231 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -1041,6 +1041,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port) * (can't set it up in mos7840_startup as the structures * * were not set up at that time.) */ if (port0->open_ports == 1) { + /* FIXME: Buffer never NULL, so URB is not submitted. */ if (serial->port[0]->interrupt_in_buffer == NULL) { /* set up interrupt urb */ usb_fill_int_urb(serial->port[0]->interrupt_in_urb, @@ -2255,6 +2256,18 @@ static int mos7840_calc_num_ports(struct usb_serial *serial) return mos7840_num_ports; } +static int mos7840_attach(struct usb_serial *serial) +{ + if (serial->num_bulk_in < serial->num_ports || + serial->num_bulk_out < serial->num_ports || + serial->num_interrupt_in < 1) { + dev_err(&serial->interface->dev, "missing endpoints\n"); + return -ENODEV; + } + + return 0; +} + static int mos7840_port_probe(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; @@ -2537,6 +2550,7 @@ static struct usb_serial_driver moschip7840_4port_device = { .tiocmset = mos7840_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .get_icount = usb_serial_generic_get_icount, + .attach = mos7840_attach, .port_probe = mos7840_port_probe, .port_remove = mos7840_port_remove, .read_bulk_callback = mos7840_bulk_in_callback, diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c index 5739bf6f720..8028e5ffe80 100644 --- a/drivers/usb/serial/omninet.c +++ b/drivers/usb/serial/omninet.c @@ -39,6 +39,7 @@ static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); static int omninet_write_room(struct tty_struct *tty); static void omninet_disconnect(struct usb_serial *serial); +static int omninet_attach(struct usb_serial *serial); static int omninet_port_probe(struct usb_serial_port *port); static int omninet_port_remove(struct usb_serial_port *port); @@ -57,6 +58,7 @@ static struct usb_serial_driver zyxel_omninet_device = { .description = "ZyXEL - omni.net lcd plus usb", .id_table = id_table, .num_ports = 1, + .attach = omninet_attach, .port_probe = omninet_port_probe, .port_remove = omninet_port_remove, .open = omninet_open, @@ -105,6 +107,17 @@ struct omninet_data { __u8 od_outseq; /* Sequence number for bulk_out URBs */ }; +static int omninet_attach(struct usb_serial *serial) +{ + /* The second bulk-out endpoint is used for writing. */ + if (serial->num_bulk_out < 2) { + dev_err(&serial->interface->dev, "missing endpoints\n"); + return -ENODEV; + } + + return 0; +} + static int omninet_port_probe(struct usb_serial_port *port) { struct omninet_data *od; @@ -130,12 +143,6 @@ static int omninet_port_remove(struct usb_serial_port *port) static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port) { - struct usb_serial *serial = port->serial; - struct usb_serial_port *wport; - - wport = serial->port[1]; - tty_port_tty_set(&wport->port, tty); - return usb_serial_generic_open(tty, port); } diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c index b0eb1dfc601..b93ab96573e 100644 --- a/drivers/usb/serial/opticon.c +++ b/drivers/usb/serial/opticon.c @@ -143,7 +143,7 @@ static int opticon_open(struct tty_struct *tty, struct usb_serial_port *port) usb_clear_halt(port->serial->dev, port->read_urb->pipe); res = usb_serial_generic_open(tty, port); - if (!res) + if (res) return res; /* Request CTS line state, sometimes during opening the current diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c index 7e3e0782e51..ff83d87ed92 100644 --- a/drivers/usb/serial/oti6858.c +++ b/drivers/usb/serial/oti6858.c @@ -135,6 +135,7 @@ static int oti6858_tiocmget(struct tty_struct *tty); static int oti6858_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static int oti6858_tiocmiwait(struct tty_struct *tty, unsigned long arg); +static int oti6858_attach(struct usb_serial *serial); static int oti6858_port_probe(struct usb_serial_port *port); static int oti6858_port_remove(struct usb_serial_port *port); @@ -159,6 +160,7 @@ static struct usb_serial_driver oti6858_device = { .write_bulk_callback = oti6858_write_bulk_callback, .write_room = oti6858_write_room, .chars_in_buffer = oti6858_chars_in_buffer, + .attach = oti6858_attach, .port_probe = oti6858_port_probe, .port_remove = oti6858_port_remove, }; @@ -328,6 +330,20 @@ static void send_data(struct work_struct *work) usb_serial_port_softint(port); } +static int oti6858_attach(struct usb_serial *serial) +{ + unsigned char num_ports = serial->num_ports; + + if (serial->num_bulk_in < num_ports || + serial->num_bulk_out < num_ports || + serial->num_interrupt_in < num_ports) { + dev_err(&serial->interface->dev, "missing endpoints\n"); + return -ENODEV; + } + + return 0; +} + static int oti6858_port_probe(struct usb_serial_port *port) { struct oti6858_private *priv; diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 33313caed50..f496c38d539 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -175,9 +175,17 @@ static int pl2303_vendor_write(__u16 value, __u16 index, static int pl2303_startup(struct usb_serial *serial) { struct pl2303_serial_private *spriv; + unsigned char num_ports = serial->num_ports; enum pl2303_type type = type_0; unsigned char *buf; + if (serial->num_bulk_in < num_ports || + serial->num_bulk_out < num_ports || + serial->num_interrupt_in < num_ports) { + dev_err(&serial->interface->dev, "missing endpoints\n"); + return -ENODEV; + } + spriv = kzalloc(sizeof(*spriv), GFP_KERNEL); if (!spriv) return -ENOMEM; diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c index 13824b5ca34..ecd0a84ffc0 100644 --- a/drivers/usb/serial/quatech2.c +++ b/drivers/usb/serial/quatech2.c @@ -408,16 +408,12 @@ static void qt2_close(struct usb_serial_port *port) { struct usb_serial *serial; struct qt2_port_private *port_priv; - unsigned long flags; int i; serial = port->serial; port_priv = usb_get_serial_port_data(port); - spin_lock_irqsave(&port_priv->urb_lock, flags); usb_kill_urb(port_priv->write_urb); - port_priv->urb_in_use = false; - spin_unlock_irqrestore(&port_priv->urb_lock, flags); /* flush the port transmit buffer */ i = usb_control_msg(serial->dev, diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c index 21cd7bf2a8c..8e24f8ff2fc 100644 --- a/drivers/usb/serial/safe_serial.c +++ b/drivers/usb/serial/safe_serial.c @@ -215,6 +215,11 @@ static void safe_process_read_urb(struct urb *urb) if (!safe) goto out; + if (length < 2) { + dev_err(&port->dev, "malformed packet\n"); + return; + } + fcs = fcs_compute10(data, length, CRC10_INITFCS); if (fcs) { dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs); diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c index 1694d4ff163..595a3f0b021 100644 --- a/drivers/usb/serial/spcp8x5.c +++ b/drivers/usb/serial/spcp8x5.c @@ -155,6 +155,19 @@ static int spcp8x5_probe(struct usb_serial *serial, return 0; } +static int spcp8x5_attach(struct usb_serial *serial) +{ + unsigned char num_ports = serial->num_ports; + + if (serial->num_bulk_in < num_ports || + serial->num_bulk_out < num_ports) { + dev_err(&serial->interface->dev, "missing endpoints\n"); + return -ENODEV; + } + + return 0; +} + static int spcp8x5_port_probe(struct usb_serial_port *port) { const struct usb_device_id *id = usb_get_serial_data(port->serial); @@ -218,11 +231,17 @@ static int spcp8x5_get_msr(struct usb_serial_port *port, u8 *status) ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), GET_UART_STATUS, GET_UART_STATUS_TYPE, 0, GET_UART_STATUS_MSR, buf, 1, 100); - if (ret < 0) + if (ret < 1) { dev_err(&port->dev, "failed to get modem status: %d", ret); + if (ret >= 0) + ret = -EIO; + goto out; + } dev_dbg(&port->dev, "0xc0:0x22:0:6 %d - 0x02%x", ret, *buf); *status = *buf; + ret = 0; +out: kfree(buf); return ret; @@ -479,6 +498,7 @@ static struct usb_serial_driver spcp8x5_device = { .tiocmget = spcp8x5_tiocmget, .tiocmset = spcp8x5_tiocmset, .probe = spcp8x5_probe, + .attach = spcp8x5_attach, .port_probe = spcp8x5_port_probe, .port_remove = spcp8x5_port_remove, }; diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 0a7c68fa5e5..1ccf221d842 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -401,6 +401,13 @@ static int ti_startup(struct usb_serial *serial) goto free_tdev; } + if (serial->num_bulk_in < serial->num_ports || + serial->num_bulk_out < serial->num_ports) { + dev_err(&serial->interface->dev, "missing endpoints\n"); + status = -ENODEV; + goto free_tdev; + } + return 0; free_tdev: diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c index a09b65ebd9b..2bb0fd3f342 100644 --- a/drivers/usb/wusbcore/wa-hc.c +++ b/drivers/usb/wusbcore/wa-hc.c @@ -38,6 +38,9 @@ int wa_create(struct wahc *wa, struct usb_interface *iface) int result; struct device *dev = &iface->dev; + if (iface->cur_altsetting->desc.bNumEndpoints < 3) + return -ENODEV; + result = wa_rpipes_create(wa); if (result < 0) goto error_rpipes_create; diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c index 810c90ae2c5..cd8bf69aa69 100644 --- a/drivers/uwb/hwa-rc.c +++ b/drivers/uwb/hwa-rc.c @@ -811,6 +811,9 @@ static int hwarc_probe(struct usb_interface *iface, struct hwarc *hwarc; struct device *dev = &iface->dev; + if (iface->cur_altsetting->desc.bNumEndpoints < 1) + return -ENODEV; + result = -ENOMEM; uwb_rc = uwb_rc_alloc(); if (uwb_rc == NULL) { diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c index 2bfc846ac07..6345e85822a 100644 --- a/drivers/uwb/i1480/dfu/usb.c +++ b/drivers/uwb/i1480/dfu/usb.c @@ -362,6 +362,9 @@ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id) result); } + if (iface->cur_altsetting->desc.bNumEndpoints < 1) + return -ENODEV; + result = -ENOMEM; i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL); if (i1480_usb == NULL) { diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index dc55bc254c5..a1d0fc47614 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -344,8 +344,9 @@ static long vfio_pci_ioctl(void *device_data, } else if (cmd == VFIO_DEVICE_SET_IRQS) { struct vfio_irq_set hdr; + size_t size; u8 *data = NULL; - int ret = 0; + int max, ret = 0; minsz = offsetofend(struct vfio_irq_set, count); @@ -353,23 +354,31 @@ static long vfio_pci_ioctl(void *device_data, return -EFAULT; if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS || + hdr.count >= (U32_MAX - hdr.start) || hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK | VFIO_IRQ_SET_ACTION_TYPE_MASK)) return -EINVAL; - if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { - size_t size; - int max = vfio_pci_get_irq_count(vdev, hdr.index); + max = vfio_pci_get_irq_count(vdev, hdr.index); + if (hdr.start >= max || hdr.start + hdr.count > max) + return -EINVAL; - if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) - size = sizeof(uint8_t); - else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD) - size = sizeof(int32_t); - else - return -EINVAL; + switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { + case VFIO_IRQ_SET_DATA_NONE: + size = 0; + break; + case VFIO_IRQ_SET_DATA_BOOL: + size = sizeof(uint8_t); + break; + case VFIO_IRQ_SET_DATA_EVENTFD: + size = sizeof(int32_t); + break; + default: + return -EINVAL; + } - if (hdr.argsz - minsz < hdr.count * size || - hdr.start >= max || hdr.start + hdr.count > max) + if (size) { + if (hdr.argsz - minsz < hdr.count * size) return -EINVAL; data = memdup_user((void __user *)(arg + minsz), diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index 4bc704e1b7c..bfe72a991fa 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -468,7 +468,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) if (!is_irq_none(vdev)) return -EINVAL; - vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); + vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); if (!vdev->ctx) return -ENOMEM; diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index a92783e480e..ca55f93b0f6 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -1196,6 +1196,8 @@ static void fbcon_free_font(struct display *p, bool freefont) p->userfont = 0; } +static void set_vc_hi_font(struct vc_data *vc, bool set); + static void fbcon_deinit(struct vc_data *vc) { struct display *p = &fb_display[vc->vc_num]; @@ -1231,6 +1233,9 @@ static void fbcon_deinit(struct vc_data *vc) if (free_font) vc->vc_font.data = NULL; + if (vc->vc_hi_font_mask) + set_vc_hi_font(vc, false); + if (!con_is_bound(&fb_con)) fbcon_exit(); @@ -2466,32 +2471,10 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font) return 0; } -static int fbcon_do_set_font(struct vc_data *vc, int w, int h, - const u8 * data, int userfont) +/* set/clear vc_hi_font_mask and update vc attrs accordingly */ +static void set_vc_hi_font(struct vc_data *vc, bool set) { - struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; - struct fbcon_ops *ops = info->fbcon_par; - struct display *p = &fb_display[vc->vc_num]; - int resize; - int cnt; - char *old_data = NULL; - - if (CON_IS_VISIBLE(vc) && softback_lines) - fbcon_set_origin(vc); - - resize = (w != vc->vc_font.width) || (h != vc->vc_font.height); - if (p->userfont) - old_data = vc->vc_font.data; - if (userfont) - cnt = FNTCHARCNT(data); - else - cnt = 256; - vc->vc_font.data = (void *)(p->fontdata = data); - if ((p->userfont = userfont)) - REFCOUNT(data)++; - vc->vc_font.width = w; - vc->vc_font.height = h; - if (vc->vc_hi_font_mask && cnt == 256) { + if (!set) { vc->vc_hi_font_mask = 0; if (vc->vc_can_do_color) { vc->vc_complement_mask >>= 1; @@ -2514,7 +2497,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, ((c & 0xfe00) >> 1) | (c & 0xff); vc->vc_attr >>= 1; } - } else if (!vc->vc_hi_font_mask && cnt == 512) { + } else { vc->vc_hi_font_mask = 0x100; if (vc->vc_can_do_color) { vc->vc_complement_mask <<= 1; @@ -2546,8 +2529,38 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, } else vc->vc_video_erase_char = c & ~0x100; } - } +} + +static int fbcon_do_set_font(struct vc_data *vc, int w, int h, + const u8 * data, int userfont) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; + struct display *p = &fb_display[vc->vc_num]; + int resize; + int cnt; + char *old_data = NULL; + + if (CON_IS_VISIBLE(vc) && softback_lines) + fbcon_set_origin(vc); + + resize = (w != vc->vc_font.width) || (h != vc->vc_font.height); + if (p->userfont) + old_data = vc->vc_font.data; + if (userfont) + cnt = FNTCHARCNT(data); + else + cnt = 256; + vc->vc_font.data = (void *)(p->fontdata = data); + if ((p->userfont = userfont)) + REFCOUNT(data)++; + vc->vc_font.width = w; + vc->vc_font.height = h; + if (vc->vc_hi_font_mask && cnt == 256) + set_vc_hi_font(vc, false); + else if (!vc->vc_hi_font_mask && cnt == 512) + set_vc_hi_font(vc, true); if (resize) { int cols, rows; diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c index f26570d83d1..6970285bb53 100644 --- a/drivers/video/fbcmap.c +++ b/drivers/video/fbcmap.c @@ -163,8 +163,8 @@ void fb_dealloc_cmap(struct fb_cmap *cmap) int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to) { - int tooff = 0, fromoff = 0; - int size; + unsigned int tooff = 0, fromoff = 0; + size_t size; if (!to || !from) return -EINVAL; @@ -173,10 +173,11 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to) fromoff = to->start - from->start; else tooff = from->start - to->start; - size = to->len - tooff; - if (size > (int) (from->len - fromoff)) - size = from->len - fromoff; - if (size <= 0) + if (fromoff >= from->len || tooff >= to->len) + return -EINVAL; + + size = min_t(size_t, to->len - tooff, from->len - fromoff); + if (size == 0) return -EINVAL; size *= sizeof(u16); @@ -193,8 +194,8 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to) int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to) { - int tooff = 0, fromoff = 0; - int size; + unsigned int tooff = 0, fromoff = 0; + size_t size; if (!to || !from) return -EINVAL; @@ -203,13 +204,12 @@ int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to) fromoff = to->start - from->start; else tooff = from->start - to->start; - if ((to->len <= tooff) || (from->len <= fromoff)) + if (fromoff >= from->len || tooff >= to->len) return -EINVAL; - size = to->len - tooff; - - if (size > (int) (from->len - fromoff)) - size = from->len - fromoff; + size = min_t(size_t, to->len - tooff, from->len - fromoff); + if (size == 0) + return -EINVAL; size *= sizeof(u16); if (from->red && to->red) diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c index cd005c227a2..d026bbb4e50 100644 --- a/drivers/video/xen-fbfront.c +++ b/drivers/video/xen-fbfront.c @@ -643,7 +643,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev, break; case XenbusStateInitWait: -InitWait: xenbus_switch_state(dev, XenbusStateConnected); break; @@ -654,7 +653,8 @@ static void xenfb_backend_changed(struct xenbus_device *dev, * get Connected twice here. */ if (dev->state != XenbusStateConnected) - goto InitWait; /* no InitWait seen yet, fudge it */ + /* no InitWait seen yet, fudge it */ + xenbus_switch_state(dev, XenbusStateConnected); if (xenbus_scanf(XBT_NIL, info->xbdev->otherend, "request-update", "%d", &val) < 0) diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 148e8ea1bc9..3d42cde0286 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -349,6 +349,8 @@ static int init_vqs(struct virtio_balloon *vb) * Prime this virtqueue with one buffer so the hypervisor can * use it to signal us later. */ + update_balloon_stats(vb); + sg_init_one(&sg, vb->stats, sizeof vb->stats); if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL) < 0) diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c index 8ca1030675a..131501b3b11 100644 --- a/drivers/vme/bridges/vme_ca91cx42.c +++ b/drivers/vme/bridges/vme_ca91cx42.c @@ -464,7 +464,7 @@ static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled, vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]); pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]); - *pci_base = (dma_addr_t)vme_base + pci_offset; + *pci_base = (dma_addr_t)*vme_base + pci_offset; *size = (unsigned long long)((vme_bound - *vme_base) + granularity); *enabled = 0; diff --git a/fs/block_dev.c b/fs/block_dev.c index 4e9b5f49c5b..2b7cdb17122 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -655,7 +655,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole, return true; /* already a holder */ else if (bdev->bd_holder != NULL) return false; /* held by someone else */ - else if (bdev->bd_contains == bdev) + else if (whole == bdev) return true; /* is a whole device which isn't held */ else if (whole->bd_holder == bd_may_claim) @@ -1693,6 +1693,7 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg) spin_lock(&inode_sb_list_lock); list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) { struct address_space *mapping = inode->i_mapping; + struct block_device *bdev; spin_lock(&inode->i_lock); if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) || @@ -1713,8 +1714,12 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg) */ iput(old_inode); old_inode = inode; + bdev = I_BDEV(inode); - func(I_BDEV(inode), arg); + mutex_lock(&bdev->bd_mutex); + if (bdev->bd_openers) + func(bdev, arg); + mutex_unlock(&bdev->bd_mutex); spin_lock(&inode_sb_list_lock); } diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 019fc5a68a1..f26f38ccd19 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1843,14 +1843,6 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, struct btrfs_delayed_node *delayed_node; int ret = 0; - /* - * we don't do delayed inode updates during log recovery because it - * leads to enospc problems. This means we also can't do - * delayed inode refs - */ - if (BTRFS_I(inode)->root->fs_info->log_root_recovering) - return -EAGAIN; - delayed_node = btrfs_get_or_create_delayed_node(inode); if (IS_ERR(delayed_node)) return PTR_ERR(delayed_node); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index f5dee138fc3..0f0a01f2381 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4661,11 +4661,20 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, lock_page(page); } locked_pages++; + } + /* + * We need to firstly lock all pages to make sure that + * the uptodate bit of our pages won't be affected by + * clear_extent_buffer_uptodate(). + */ + for (i = start_i; i < num_pages; i++) { + page = eb->pages[i]; if (!PageUptodate(page)) { num_reads++; all_uptodate = 0; } } + if (all_uptodate) { if (start_i == 0) set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 7d3331cbccb..681782d00b1 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1691,12 +1691,11 @@ static noinline int find_dir_range(struct btrfs_root *root, next: /* check the next slot in the tree to see if it is a valid item */ nritems = btrfs_header_nritems(path->nodes[0]); + path->slots[0]++; if (path->slots[0] >= nritems) { ret = btrfs_next_leaf(root, path); if (ret) goto out; - } else { - path->slots[0]++; } btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h index 37e4a72a7d1..ae4e35bdc2c 100644 --- a/fs/cifs/cifs_fs_sb.h +++ b/fs/cifs/cifs_fs_sb.h @@ -45,6 +45,9 @@ #define CIFS_MOUNT_POSIXACL 0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */ #define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */ #define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */ +#define CIFS_MOUNT_USE_PREFIX_PATH 0x1000000 /* make subpath with unaccessible + * root mountable + */ struct cifs_sb_info { struct rb_root tlink_tree; @@ -65,5 +68,6 @@ struct cifs_sb_info { char *mountdata; /* options received at mount time or via DFS refs */ struct backing_dev_info bdi; struct delayed_work prune_tlinks; + char *prepath; }; #endif /* _CIFS_FS_SB_H */ diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 3752b9f6d9e..e4e2152b788 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -565,6 +565,9 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb) char *s, *p; char sep; + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) + return dget(sb->s_root); + full_path = cifs_build_path_to_root(vol, cifs_sb, cifs_sb_master_tcon(cifs_sb)); if (full_path == NULL) @@ -644,10 +647,14 @@ cifs_do_mount(struct file_system_type *fs_type, cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL); if (cifs_sb->mountdata == NULL) { root = ERR_PTR(-ENOMEM); - goto out_cifs_sb; + goto out_free; } - cifs_setup_cifs_sb(volume_info, cifs_sb); + rc = cifs_setup_cifs_sb(volume_info, cifs_sb); + if (rc) { + root = ERR_PTR(rc); + goto out_free; + } rc = cifs_mount(cifs_sb, volume_info); if (rc) { @@ -655,7 +662,7 @@ cifs_do_mount(struct file_system_type *fs_type, cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n", rc); root = ERR_PTR(rc); - goto out_mountdata; + goto out_free; } mnt_data.vol = volume_info; @@ -698,9 +705,9 @@ cifs_do_mount(struct file_system_type *fs_type, cifs_cleanup_volume_info(volume_info); return root; -out_mountdata: +out_free: + kfree(cifs_sb->prepath); kfree(cifs_sb->mountdata); -out_cifs_sb: kfree(cifs_sb); out_nls: unload_nls(volume_info->local_nls); diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index f74dfa89c4c..b7f58991857 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -576,6 +576,8 @@ struct TCP_Server_Info { #ifdef CONFIG_CIFS_SMB2 unsigned int max_read; unsigned int max_write; + struct delayed_work reconnect; /* reconnect workqueue job */ + struct mutex reconnect_mutex; /* prevent simultaneous reconnects */ #endif /* CONFIG_CIFS_SMB2 */ }; @@ -750,6 +752,7 @@ cap_unix(struct cifs_ses *ses) struct cifs_tcon { struct list_head tcon_list; int tc_count; + struct list_head rlist; /* reconnect list */ struct list_head openFileList; struct cifs_ses *ses; /* pointer to session associated with */ char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */ @@ -823,7 +826,6 @@ struct cifs_tcon { bool need_reconnect:1; /* connection reset, tid now invalid */ #ifdef CONFIG_CIFS_SMB2 bool print:1; /* set if connection to printer share */ - bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */ __u32 capabilities; __u32 share_flags; __u32 maximal_access; diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index dda188a9433..871a3096673 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -174,7 +174,7 @@ extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, extern int cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig, unsigned int nr_segs, unsigned int to_read); -extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, +extern int cifs_setup_cifs_sb(struct smb_vol *pvolume_info, struct cifs_sb_info *cifs_sb); extern int cifs_match_super(struct super_block *, void *); extern void cifs_cleanup_volume_info(struct smb_vol *pvolume_info); @@ -194,6 +194,9 @@ extern void cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink, struct cifs_pending_open *open); extern void cifs_del_pending_open(struct cifs_pending_open *open); +extern void cifs_put_tcp_session(struct TCP_Server_Info *server, + int from_reconnect); +extern void cifs_put_tcon(struct cifs_tcon *tcon); #if IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) extern void cifs_dfs_release_automount_timer(void); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 7c33afd7d5d..417ce0a497f 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -52,6 +52,9 @@ #include "nterr.h" #include "rfc1002pdu.h" #include "fscache.h" +#ifdef CONFIG_CIFS_SMB2 +#include "smb2proto.h" +#endif #define CIFS_PORT 445 #define RFC1001_PORT 139 @@ -2070,8 +2073,8 @@ cifs_find_tcp_session(struct smb_vol *vol) return NULL; } -static void -cifs_put_tcp_session(struct TCP_Server_Info *server) +void +cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect) { struct task_struct *task; @@ -2088,6 +2091,19 @@ cifs_put_tcp_session(struct TCP_Server_Info *server) cancel_delayed_work_sync(&server->echo); +#ifdef CONFIG_CIFS_SMB2 + if (from_reconnect) + /* + * Avoid deadlock here: reconnect work calls + * cifs_put_tcp_session() at its end. Need to be sure + * that reconnect work does nothing with server pointer after + * that step. + */ + cancel_delayed_work(&server->reconnect); + else + cancel_delayed_work_sync(&server->reconnect); +#endif + spin_lock(&GlobalMid_Lock); server->tcpStatus = CifsExiting; spin_unlock(&GlobalMid_Lock); @@ -2158,6 +2174,10 @@ cifs_get_tcp_session(struct smb_vol *volume_info) INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); INIT_LIST_HEAD(&tcp_ses->smb_ses_list); INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); +#ifdef CONFIG_CIFS_SMB2 + INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server); + mutex_init(&tcp_ses->reconnect_mutex); +#endif memcpy(&tcp_ses->srcaddr, &volume_info->srcaddr, sizeof(tcp_ses->srcaddr)); memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr, @@ -2288,7 +2308,7 @@ cifs_put_smb_ses(struct cifs_ses *ses) _free_xid(xid); } sesInfoFree(ses); - cifs_put_tcp_session(server); + cifs_put_tcp_session(server, 0); } #ifdef CONFIG_KEYS @@ -2461,7 +2481,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) mutex_unlock(&ses->session_mutex); /* existing SMB ses has a server reference already */ - cifs_put_tcp_session(server); + cifs_put_tcp_session(server, 0); free_xid(xid); return ses; } @@ -2550,7 +2570,7 @@ cifs_find_tcon(struct cifs_ses *ses, const char *unc) return NULL; } -static void +void cifs_put_tcon(struct cifs_tcon *tcon) { unsigned int xid; @@ -2715,6 +2735,24 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data) return 1; } +static int +match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data) +{ + struct cifs_sb_info *old = CIFS_SB(sb); + struct cifs_sb_info *new = mnt_data->cifs_sb; + + if (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) { + if (!(new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)) + return 0; + /* The prepath should be null terminated strings */ + if (strcmp(new->prepath, old->prepath)) + return 0; + + return 1; + } + return 0; +} + int cifs_match_super(struct super_block *sb, void *data) { @@ -2742,7 +2780,8 @@ cifs_match_super(struct super_block *sb, void *data) if (!match_server(tcp_srv, volume_info) || !match_session(ses, volume_info) || - !match_tcon(tcon, volume_info->UNC)) { + !match_tcon(tcon, volume_info->UNC) || + !match_prepath(sb, mnt_data)) { rc = 0; goto out; } @@ -3158,7 +3197,7 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon, } } -void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, +int cifs_setup_cifs_sb(struct smb_vol *pvolume_info, struct cifs_sb_info *cifs_sb) { INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks); @@ -3240,6 +3279,15 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm)) cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n"); + + + if (pvolume_info->prepath) { + cifs_sb->prepath = kstrdup(pvolume_info->prepath, GFP_KERNEL); + if (cifs_sb->prepath == NULL) + return -ENOMEM; + } + + return 0; } static void @@ -3410,6 +3458,44 @@ cifs_get_volume_info(char *mount_data, const char *devname) return volume_info; } +static int +cifs_are_all_path_components_accessible(struct TCP_Server_Info *server, + unsigned int xid, + struct cifs_tcon *tcon, + struct cifs_sb_info *cifs_sb, + char *full_path) +{ + int rc; + char *s; + char sep, tmp; + + sep = CIFS_DIR_SEP(cifs_sb); + s = full_path; + + rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, ""); + while (rc == 0) { + /* skip separators */ + while (*s == sep) + s++; + if (!*s) + break; + /* next separator */ + while (*s && *s != sep) + s++; + + /* + * temporarily null-terminate the path at the end of + * the current component + */ + tmp = *s; + *s = 0; + rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, + full_path); + *s = tmp; + } + return rc; +} + int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) { @@ -3536,6 +3622,17 @@ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) kfree(full_path); goto mount_fail_check; } + if (rc != -EREMOTE) { + rc = cifs_are_all_path_components_accessible(server, + xid, tcon, cifs_sb, + full_path); + if (rc != 0) { + cifs_dbg(VFS, "cannot query dirs between root and final path, " + "enabling CIFS_MOUNT_USE_PREFIX_PATH\n"); + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; + rc = 0; + } + } kfree(full_path); } @@ -3599,7 +3696,7 @@ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) else if (ses) cifs_put_smb_ses(ses); else - cifs_put_tcp_session(server); + cifs_put_tcp_session(server, 0); bdi_destroy(&cifs_sb->bdi); } @@ -3793,6 +3890,7 @@ cifs_umount(struct cifs_sb_info *cifs_sb) bdi_destroy(&cifs_sb->bdi); kfree(cifs_sb->mountdata); + kfree(cifs_sb->prepath); unload_nls(cifs_sb->local_nls); kfree(cifs_sb); } @@ -3932,7 +4030,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info); if (IS_ERR(ses)) { tcon = (struct cifs_tcon *)ses; - cifs_put_tcp_session(master_tcon->ses->server); + cifs_put_tcp_session(master_tcon->ses->server, 0); goto out; } diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index a998c929286..543124703e0 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -83,6 +83,7 @@ build_path_from_dentry(struct dentry *direntry) struct dentry *temp; int namelen; int dfsplen; + int pplen = 0; char *full_path; char dirsep; struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); @@ -94,8 +95,12 @@ build_path_from_dentry(struct dentry *direntry) dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1); else dfsplen = 0; + + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) + pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0; + cifs_bp_rename_retry: - namelen = dfsplen; + namelen = dfsplen + pplen; seq = read_seqbegin(&rename_lock); rcu_read_lock(); for (temp = direntry; !IS_ROOT(temp);) { @@ -136,7 +141,7 @@ build_path_from_dentry(struct dentry *direntry) } } rcu_read_unlock(); - if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) { + if (namelen != dfsplen + pplen || read_seqretry(&rename_lock, seq)) { cifs_dbg(FYI, "did not end path lookup where expected. namelen=%ddfsplen=%d\n", namelen, dfsplen); /* presumably this is only possible if racing with a rename @@ -152,6 +157,17 @@ build_path_from_dentry(struct dentry *direntry) those safely to '/' if any are found in the middle of the prepath */ /* BB test paths to Windows with '/' in the midst of prepath */ + if (pplen) { + int i; + + cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath); + memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1); + full_path[dfsplen] = '\\'; + for (i = 0; i < pplen-1; i++) + if (full_path[dfsplen+1+i] == '/') + full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb); + } + if (dfsplen) { strncpy(full_path, tcon->treeName, dfsplen); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) { diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 54304ccae7e..971e7bea5d8 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -895,12 +895,29 @@ struct inode *cifs_root_iget(struct super_block *sb) struct inode *inode = NULL; long rc; struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); + char *path = NULL; + int len; + + if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) + && cifs_sb->prepath) { + len = strlen(cifs_sb->prepath); + path = kzalloc(len + 2 /* leading sep + null */, GFP_KERNEL); + if (path == NULL) + return ERR_PTR(-ENOMEM); + path[0] = '/'; + memcpy(path+1, cifs_sb->prepath, len); + } else { + path = kstrdup("", GFP_KERNEL); + if (path == NULL) + return ERR_PTR(-ENOMEM); + } xid = get_xid(); + convert_delimiter(path, CIFS_DIR_SEP(cifs_sb)); if (tcon->unix_ext) - rc = cifs_get_inode_info_unix(&inode, "", sb, xid); + rc = cifs_get_inode_info_unix(&inode, path, sb, xid); else - rc = cifs_get_inode_info(&inode, "", NULL, sb, xid, NULL); + rc = cifs_get_inode_info(&inode, path, NULL, sb, xid, NULL); if (!inode) { inode = ERR_PTR(rc); @@ -928,6 +945,7 @@ struct inode *cifs_root_iget(struct super_block *sb) } out: + kfree(path); /* can not call macro free_xid here since in a void func * TODO: This is no longer true */ diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 610c6c24d41..d97841e124b 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -891,6 +891,15 @@ cifs_dir_needs_close(struct cifsFileInfo *cfile) return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle; } +static bool +cifs_can_echo(struct TCP_Server_Info *server) +{ + if (server->tcpStatus == CifsGood) + return true; + + return false; +} + struct smb_version_operations smb1_operations = { .send_cancel = send_nt_cancel, .compare_fids = cifs_compare_fids, @@ -923,6 +932,7 @@ struct smb_version_operations smb1_operations = { .get_dfs_refer = CIFSGetDFSRefer, .qfs_tcon = cifs_qfs_tcon, .is_path_accessible = cifs_is_path_accessible, + .can_echo = cifs_can_echo, .query_path_info = cifs_query_path_info, .query_file_info = cifs_query_file_info, .get_srv_inum = cifs_get_srv_inum, diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index d801f63cddd..866caf1d2be 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c @@ -266,7 +266,7 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile) * and check it for zero before using. */ max_buf = tlink_tcon(cfile->tlink)->ses->server->maxBuf; - if (!max_buf) { + if (max_buf < sizeof(struct smb2_lock_element)) { free_xid(xid); return -EINVAL; } diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 9dd8c968d94..04fd3946213 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -254,7 +254,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) case SMB2_CHANGE_NOTIFY: case SMB2_QUERY_INFO: case SMB2_SET_INFO: - return -EAGAIN; + rc = -EAGAIN; } unload_nls(nls_codepage); return rc; @@ -720,9 +720,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, else return -EIO; - if (tcon && tcon->bad_network_name) - return -ENOENT; - unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); if (unc_path == NULL) return -ENOMEM; @@ -734,6 +731,10 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, return -EINVAL; } + /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */ + if (tcon) + tcon->tid = 0; + rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req); if (rc) { kfree(unc_path); @@ -809,8 +810,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, tcon_error_exit: if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) { cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); - if (tcon) - tcon->bad_network_name = true; } goto tcon_exit; } @@ -1239,6 +1238,54 @@ smb2_echo_callback(struct mid_q_entry *mid) add_credits(server, credits_received, CIFS_ECHO_OP); } +void smb2_reconnect_server(struct work_struct *work) +{ + struct TCP_Server_Info *server = container_of(work, + struct TCP_Server_Info, reconnect.work); + struct cifs_ses *ses; + struct cifs_tcon *tcon, *tcon2; + struct list_head tmp_list; + int tcon_exist = false; + + /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */ + mutex_lock(&server->reconnect_mutex); + + INIT_LIST_HEAD(&tmp_list); + cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n"); + + spin_lock(&cifs_tcp_ses_lock); + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { + if (tcon->need_reconnect) { + tcon->tc_count++; + list_add_tail(&tcon->rlist, &tmp_list); + tcon_exist = true; + } + } + } + /* + * Get the reference to server struct to be sure that the last call of + * cifs_put_tcon() in the loop below won't release the server pointer. + */ + if (tcon_exist) + server->srv_count++; + + spin_unlock(&cifs_tcp_ses_lock); + + list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) { + smb2_reconnect(SMB2_ECHO, tcon); + list_del_init(&tcon->rlist); + cifs_put_tcon(tcon); + } + + cifs_dbg(FYI, "Reconnecting tcons finished\n"); + mutex_unlock(&server->reconnect_mutex); + + /* now we can safely release srv struct */ + if (tcon_exist) + cifs_put_tcp_session(server, 1); +} + int SMB2_echo(struct TCP_Server_Info *server) { @@ -1251,32 +1298,11 @@ SMB2_echo(struct TCP_Server_Info *server) cifs_dbg(FYI, "In echo request\n"); if (server->tcpStatus == CifsNeedNegotiate) { - struct list_head *tmp, *tmp2; - struct cifs_ses *ses; - struct cifs_tcon *tcon; - - cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n"); - spin_lock(&cifs_tcp_ses_lock); - list_for_each(tmp, &server->smb_ses_list) { - ses = list_entry(tmp, struct cifs_ses, smb_ses_list); - list_for_each(tmp2, &ses->tcon_list) { - tcon = list_entry(tmp2, struct cifs_tcon, - tcon_list); - /* add check for persistent handle reconnect */ - if (tcon && tcon->need_reconnect) { - spin_unlock(&cifs_tcp_ses_lock); - rc = smb2_reconnect(SMB2_ECHO, tcon); - spin_lock(&cifs_tcp_ses_lock); - } - } - } - spin_unlock(&cifs_tcp_ses_lock); + /* No need to send echo on newly established connections */ + queue_delayed_work(cifsiod_wq, &server->reconnect, 0); + return rc; } - /* if no session, renegotiate failed above */ - if (server->tcpStatus == CifsNeedNegotiate) - return -EIO; - rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req); if (rc) return rc; diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 2aa3535e38c..d0cd166ac88 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h @@ -93,6 +93,7 @@ extern void smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock); extern int smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, const unsigned int xid); extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile); +extern void smb2_reconnect_server(struct work_struct *work); /* * SMB2 Worker functions - most of protocol specific implementation details diff --git a/fs/dcache.c b/fs/dcache.c index 2d0b9d2f3c4..f4fd9651421 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -2405,6 +2405,12 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) dentry->d_parent = dentry; list_del_init(&dentry->d_child); anon->d_parent = dparent; + if (likely(!d_unhashed(anon))) { + hlist_bl_lock(&anon->d_sb->s_anon); + __hlist_bl_del(&anon->d_hash); + anon->d_hash.pprev = NULL; + hlist_bl_unlock(&anon->d_sb->s_anon); + } list_move(&anon->d_child, &dparent->d_subdirs); write_seqcount_end(&dentry->d_seq); @@ -2459,7 +2465,6 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) * could splice into our tree? */ __d_materialise_dentry(dentry, alias); write_sequnlock(&rename_lock); - __d_drop(alias); goto found; } else { /* Nope, but we must(!) avoid directory diff --git a/fs/exec.c b/fs/exec.c index cb7f31c71c6..15fddd916b7 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1091,6 +1091,13 @@ int flush_old_exec(struct linux_binprm * bprm) flush_thread(); current->personality &= ~bprm->per_clear; + /* + * We have to apply CLOEXEC before we change whether the process is + * dumpable (in setup_new_exec) to avoid a race with a process in userspace + * trying to access the should-be-closed file descriptors of a process + * undergoing exec(2). + */ + do_close_on_exec(current->files); return 0; out: @@ -1141,7 +1148,6 @@ void setup_new_exec(struct linux_binprm * bprm) current->self_exec_id++; flush_signal_handlers(current, 0); - do_close_on_exec(current->files); } EXPORT_SYMBOL(setup_new_exec); diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c index dbb5ad59a7f..2f994bbf73a 100644 --- a/fs/ext3/acl.c +++ b/fs/ext3/acl.c @@ -205,15 +205,11 @@ ext3_set_acl(handle_t *handle, struct inode *inode, int type, case ACL_TYPE_ACCESS: name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS; if (acl) { - error = posix_acl_equiv_mode(acl, &inode->i_mode); + error = posix_acl_update_mode(inode, &inode->i_mode, &acl); if (error < 0) return error; - else { - inode->i_ctime = CURRENT_TIME_SEC; - ext3_mark_inode_dirty(handle, inode); - if (error == 0) - acl = NULL; - } + inode->i_ctime = CURRENT_TIME_SEC; + ext3_mark_inode_dirty(handle, inode); } break; diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index e350be6c7ac..55af0d98d96 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -339,8 +339,10 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode, len -= EXT4_MIN_INLINE_DATA_SIZE; value = kzalloc(len, GFP_NOFS); - if (!value) + if (!value) { + error = -ENOMEM; goto out; + } error = ext4_xattr_ibody_get(inode, i.name_index, i.name, value, len); @@ -1145,10 +1147,9 @@ static int ext4_finish_convert_inline_dir(handle_t *handle, set_buffer_uptodate(dir_block); err = ext4_handle_dirty_dirent_node(handle, inode, dir_block); if (err) - goto out; + return err; set_buffer_verified(dir_block); -out: - return err; + return ext4_mark_inode_dirty(handle, inode); } static int ext4_convert_inline_data_nolock(handle_t *handle, diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 31179ba2072..1095d77c2a9 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -73,10 +73,9 @@ static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size); offset += csum_size; - csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, - EXT4_INODE_SIZE(inode->i_sb) - - offset); } + csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, + EXT4_INODE_SIZE(inode->i_sb) - offset); } return csum; @@ -759,6 +758,20 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, int ret = check_block_validity(inode, map); if (ret != 0) return ret; + + /* + * Inodes with freshly allocated blocks where contents will be + * visible after transaction commit must be on transaction's + * ordered data list. + */ + if (map->m_flags & EXT4_MAP_NEW && + !(map->m_flags & EXT4_MAP_UNWRITTEN) && + !IS_NOQUOTA(inode) && + ext4_should_order_data(inode)) { + ret = ext4_jbd2_file_inode(handle, inode); + if (ret) + return ret; + } } return retval; } @@ -1119,15 +1132,6 @@ static int ext4_write_end(struct file *file, int i_size_changed = 0; trace_ext4_write_end(inode, pos, len, copied); - if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) { - ret = ext4_jbd2_file_inode(handle, inode); - if (ret) { - unlock_page(page); - page_cache_release(page); - goto errout; - } - } - if (ext4_has_inline_data(inode)) { ret = ext4_write_inline_data_end(inode, pos, len, copied, page); @@ -4178,6 +4182,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) struct inode *inode; journal_t *journal = EXT4_SB(sb)->s_journal; long ret; + loff_t size; int block; uid_t i_uid; gid_t i_gid; @@ -4270,6 +4275,11 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) ei->i_file_acl |= ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; inode->i_size = ext4_isize(raw_inode); + if ((size = i_size_read(inode)) < 0) { + EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size); + ret = -EIO; + goto bad_inode; + } ei->i_disksize = inode->i_size; #ifdef CONFIG_QUOTA ei->i_reserved_quota = 0; diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index cb9eec025ba..cba1fc678ee 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -668,7 +668,7 @@ static void ext4_mb_mark_free_simple(struct super_block *sb, ext4_grpblk_t min; ext4_grpblk_t max; ext4_grpblk_t chunk; - unsigned short border; + unsigned int border; BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); @@ -2222,7 +2222,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) struct ext4_group_info *grinfo; struct sg { struct ext4_group_info info; - ext4_grpblk_t counters[16]; + ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2]; } sg; group--; @@ -3063,6 +3063,13 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, if (ar->pright && start + size - 1 >= ar->lright) size -= start + size - ar->lright; + /* + * Trim allocation request for filesystems with artificially small + * groups. + */ + if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) + size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); + end = start + size; /* check we don't cross already preallocated blocks */ diff --git a/fs/ext4/super.c b/fs/ext4/super.c index c029336f520..00d607ed08e 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -755,6 +755,7 @@ static void ext4_put_super(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; + int aborted = 0; int i, err; ext4_unregister_li_request(sb); @@ -764,9 +765,10 @@ static void ext4_put_super(struct super_block *sb) destroy_workqueue(sbi->dio_unwritten_wq); if (sbi->s_journal) { + aborted = is_journal_aborted(sbi->s_journal); err = jbd2_journal_destroy(sbi->s_journal); sbi->s_journal = NULL; - if (err < 0) + if ((err < 0) && !aborted) ext4_abort(sb, "Couldn't clean up the journal"); } @@ -777,7 +779,7 @@ static void ext4_put_super(struct super_block *sb) ext4_ext_release(sb); ext4_xattr_put_super(sb); - if (!(sb->s_flags & MS_RDONLY)) { + if (!(sb->s_flags & MS_RDONLY) && !aborted) { EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); es->s_state = cpu_to_le16(sbi->s_mount_state); } @@ -3187,10 +3189,15 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp, ext4_set_bit(s++, buf); count++; } - for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) { - ext4_set_bit(EXT4_B2C(sbi, s++), buf); - count++; + j = ext4_bg_num_gdb(sb, grp); + if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) { + ext4_error(sb, "Invalid number of block group " + "descriptor blocks: %d", j); + j = EXT4_BLOCKS_PER_GROUP(sb) - s; } + count += j; + for (; j > 0; j--) + ext4_set_bit(EXT4_B2C(sbi, s++), buf); } if (!count) return 0; @@ -3293,7 +3300,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) char *orig_data = kstrdup(data, GFP_KERNEL); struct buffer_head *bh; struct ext4_super_block *es = NULL; - struct ext4_sb_info *sbi; + struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); ext4_fsblk_t block; ext4_fsblk_t sb_block = get_sb_block(&data); ext4_fsblk_t logical_sb_block; @@ -3313,16 +3320,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; ext4_group_t first_not_zeroed; - sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); - if (!sbi) - goto out_free_orig; + if ((data && !orig_data) || !sbi) + goto out_free_base; sbi->s_blockgroup_lock = kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); - if (!sbi->s_blockgroup_lock) { - kfree(sbi); - goto out_free_orig; - } + if (!sbi->s_blockgroup_lock) + goto out_free_base; + sb->s_fs_info = sbi; sbi->s_sb = sb; sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS; @@ -3465,11 +3470,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) */ sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; - if (!parse_options((char *) sbi->s_es->s_mount_opts, sb, - &journal_devnum, &journal_ioprio, 0)) { - ext4_msg(sb, KERN_WARNING, - "failed to parse options in superblock: %s", - sbi->s_es->s_mount_opts); + if (sbi->s_es->s_mount_opts[0]) { + char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts, + sizeof(sbi->s_es->s_mount_opts), + GFP_KERNEL); + if (!s_mount_opts) + goto failed_mount; + if (!parse_options(s_mount_opts, sb, &journal_devnum, + &journal_ioprio, 0)) { + ext4_msg(sb, KERN_WARNING, + "failed to parse options in superblock: %s", + s_mount_opts); + } + kfree(s_mount_opts); } sbi->s_def_mount_opt = sbi->s_mount_opt; if (!parse_options((char *) data, sb, &journal_devnum, @@ -3616,12 +3629,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); - if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0) - goto cantfind_ext4; sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb); if (sbi->s_inodes_per_block == 0) goto cantfind_ext4; + if (sbi->s_inodes_per_group < sbi->s_inodes_per_block || + sbi->s_inodes_per_group > blocksize * 8) { + ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n", + sbi->s_blocks_per_group); + goto failed_mount; + } sbi->s_itb_per_group = sbi->s_inodes_per_group / sbi->s_inodes_per_block; sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb); @@ -3705,13 +3722,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) } sbi->s_cluster_ratio = clustersize / blocksize; - if (sbi->s_inodes_per_group > blocksize * 8) { - ext4_msg(sb, KERN_ERR, - "#inodes per group too big: %lu", - sbi->s_inodes_per_group); - goto failed_mount; - } - /* Do we have standard group size of clustersize * 8 blocks ? */ if (sbi->s_blocks_per_group == clustersize << 3) set_opt2(sb, STD_GROUP_SIZE); @@ -3771,6 +3781,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb); + if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG)) { + if (le32_to_cpu(es->s_first_meta_bg) > db_count) { + ext4_msg(sb, KERN_WARNING, + "first meta block group too large: %u " + "(group descriptor block count %u)", + le32_to_cpu(es->s_first_meta_bg), db_count); + goto failed_mount; + } + } sbi->s_group_desc = ext4_kvmalloc(db_count * sizeof(struct buffer_head *), GFP_KERNEL); @@ -3886,7 +3905,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) */ if (!test_opt(sb, NOLOAD) && EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) { - if (ext4_load_journal(sb, es, journal_devnum)) + err = ext4_load_journal(sb, es, journal_devnum); + if (err) goto failed_mount3; } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) && EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) { @@ -4101,7 +4121,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) } ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. " - "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts, + "Opts: %.*s%s%s", descr, + (int) sizeof(sbi->s_es->s_mount_opts), + sbi->s_es->s_mount_opts, *sbi->s_es->s_mount_opts ? "; " : "", orig_data); if (es->s_error_count) @@ -4169,8 +4191,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) out_fail: sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); +out_free_base: kfree(sbi); -out_free_orig: kfree(orig_data); return err ? err : ret; } diff --git a/fs/fat/inode.c b/fs/fat/inode.c index a14dd4c0528..285e87450e5 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -1193,6 +1193,16 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat, return 0; } +static void fat_dummy_inode_init(struct inode *inode) +{ + /* Initialize this dummy inode to work as no-op. */ + MSDOS_I(inode)->mmu_private = 0; + MSDOS_I(inode)->i_start = 0; + MSDOS_I(inode)->i_logstart = 0; + MSDOS_I(inode)->i_attrs = 0; + MSDOS_I(inode)->i_pos = 0; +} + static int fat_read_root(struct inode *inode) { struct super_block *sb = inode->i_sb; @@ -1512,12 +1522,13 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, fat_inode = new_inode(sb); if (!fat_inode) goto out_fail; - MSDOS_I(fat_inode)->i_pos = 0; + fat_dummy_inode_init(fat_inode); sbi->fat_inode = fat_inode; fsinfo_inode = new_inode(sb); if (!fsinfo_inode) goto out_fail; + fat_dummy_inode_init(fsinfo_inode); fsinfo_inode->i_ino = MSDOS_FSINFO_INO; sbi->fsinfo_inode = fsinfo_inode; insert_inode_hash(fsinfo_inode); diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 8282b423e18..619abe48909 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -128,6 +128,7 @@ static void fuse_file_put(struct fuse_file *ff, bool sync) struct fuse_req *req = ff->reserved_req; if (sync) { + req->force = 1; req->background = 0; fuse_request_send(ff->fc, req); path_put(&req->misc.release.path); @@ -2604,6 +2605,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t i_size; size_t count = iov_length(iov, nr_segs); struct fuse_io_priv *io; + bool is_sync = is_sync_kiocb(iocb); pos = offset; inode = file->f_mapping->host; @@ -2639,7 +2641,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, * to wait on real async I/O requests, so we must submit this request * synchronously. */ - if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE) + if (!is_sync && (offset + count > i_size) && rw == WRITE) io->async = false; if (rw == WRITE) @@ -2651,7 +2653,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, fuse_aio_complete(io, ret < 0 ? ret : 0, -1); /* we have a non-extending, async request, so return */ - if (!is_sync_kiocb(iocb)) + if (!is_sync) return -EIOCBQUEUED; ret = wait_on_sync_kiocb(iocb); diff --git a/fs/generic_acl.c b/fs/generic_acl.c index b3f3676796d..7855cfb938f 100644 --- a/fs/generic_acl.c +++ b/fs/generic_acl.c @@ -82,19 +82,21 @@ generic_acl_set(struct dentry *dentry, const char *name, const void *value, return PTR_ERR(acl); } if (acl) { + struct posix_acl *old_acl; + error = posix_acl_valid(acl); if (error) goto failed; switch (type) { case ACL_TYPE_ACCESS: - error = posix_acl_equiv_mode(acl, &inode->i_mode); + old_acl = acl; + error = posix_acl_update_mode(inode, &inode->i_mode, + &acl); if (error < 0) goto failed; + if (!acl) + posix_acl_release(old_acl); inode->i_ctime = CURRENT_TIME; - if (error == 0) { - posix_acl_release(acl); - acl = NULL; - } break; case ACL_TYPE_DEFAULT: if (!S_ISDIR(inode->i_mode)) { diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index b631c904346..9aaa6db3e4b 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c @@ -763,7 +763,7 @@ static int get_first_leaf(struct gfs2_inode *dip, u32 index, int error; error = get_leaf_nr(dip, index, &leaf_no); - if (!error) + if (!IS_ERR_VALUE(error)) error = get_leaf(dip, leaf_no, bh_out); return error; @@ -974,7 +974,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name) index = name->hash >> (32 - dip->i_depth); error = get_leaf_nr(dip, index, &leaf_no); - if (error) + if (IS_ERR_VALUE(error)) return error; /* Get the old leaf block */ diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 4e5f332f15d..db7d89cea2c 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -169,7 +169,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 21b828c713c..54e958125c1 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1655,7 +1655,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) __blist_del_buffer(list, jh); jh->b_jlist = BJ_None; - if (test_clear_buffer_jbddirty(bh)) + if (transaction && is_journal_aborted(transaction->t_journal)) + clear_buffer_jbddirty(bh); + else if (test_clear_buffer_jbddirty(bh)) mark_buffer_dirty(bh); /* Expose it to the VM */ } diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 39af2c528f4..f47f6a338a3 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1435,6 +1435,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, switch (err) { case -ENOENT: d_add(dentry, NULL); + nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); break; case -EISDIR: case -ENOTDIR: diff --git a/fs/nfs/file.c b/fs/nfs/file.c index a87a44f8411..f8bd4ea2a89 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -419,7 +419,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping, */ if (!PageUptodate(page)) { unsigned pglen = nfs_page_length(page); - unsigned end = offset + len; + unsigned end = offset + copied; if (pglen == 0) { zero_user_segments(page, 0, offset, diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c index d1612ee2e6e..a9427534045 100644 --- a/fs/nfs/nfs4filelayoutdev.c +++ b/fs/nfs/nfs4filelayoutdev.c @@ -821,7 +821,8 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx) nfs4_wait_ds_connect(ds); } out_test_devid: - if (filelayout_test_devid_unavailable(devid)) + if (ret->ds_clp == NULL || + filelayout_test_devid_unavailable(devid)) ret = NULL; out: return ret; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 6b4689b7085..bc3cb9d4f7d 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -4047,7 +4047,7 @@ static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size */ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) { - struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; + struct page *pages[NFS4ACL_MAXPAGES + 1] = {NULL, }; struct nfs_getaclargs args = { .fh = NFS_FH(inode), .acl_pages = pages, @@ -4061,13 +4061,9 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu .rpc_argp = &args, .rpc_resp = &res, }; - unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); + unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1; int ret = -ENOMEM, i; - /* As long as we're doing a round trip to the server anyway, - * let's be prepared for a page of acl data. */ - if (npages == 0) - npages = 1; if (npages > ARRAY_SIZE(pages)) return -ERANGE; diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 988efb4caac..f5d27ca1014 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -2435,7 +2435,7 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr, encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); - replen = hdr.replen + op_decode_hdr_maxsz + 1; + replen = hdr.replen + op_decode_hdr_maxsz; encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr); xdr_inline_pages(&req->rq_rcv_buf, replen << 2, diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 8016892f3f0..879b56d2f72 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -627,6 +627,37 @@ static __be32 map_new_errors(u32 vers, __be32 nfserr) return nfserr; } +/* + * A write procedure can have a large argument, and a read procedure can + * have a large reply, but no NFSv2 or NFSv3 procedure has argument and + * reply that can both be larger than a page. The xdr code has taken + * advantage of this assumption to be a sloppy about bounds checking in + * some cases. Pending a rewrite of the NFSv2/v3 xdr code to fix that + * problem, we enforce these assumptions here: + */ +static bool nfs_request_too_big(struct svc_rqst *rqstp, + struct svc_procedure *proc) +{ + /* + * The ACL code has more careful bounds-checking and is not + * susceptible to this problem: + */ + if (rqstp->rq_prog != NFS_PROGRAM) + return false; + /* + * Ditto NFSv4 (which can in theory have argument and reply both + * more than a page): + */ + if (rqstp->rq_vers >= 4) + return false; + /* The reply will be small, we're OK: */ + if (proc->pc_xdrressize > 0 && + proc->pc_xdrressize < XDR_QUADLEN(PAGE_SIZE)) + return false; + + return rqstp->rq_arg.len > PAGE_SIZE; +} + int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) { @@ -639,6 +670,11 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) rqstp->rq_vers, rqstp->rq_proc); proc = rqstp->rq_procinfo; + if (nfs_request_too_big(rqstp, proc)) { + dprintk("nfsd: NFSv%d argument too large\n", rqstp->rq_vers); + *statp = rpc_garbage_args; + return 1; + } /* * Give the xdr decoder a chance to change this if it wants * (necessary in the NFSv4.0 compound case) diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index b294deb27d1..cf88dd4f8f3 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -3264,6 +3264,16 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb, mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name, lockres->l_level, new_level); + /* + * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always + * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that + * we can recover correctly from node failure. Otherwise, we may get + * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set. + */ + if (!ocfs2_is_o2cb_active() && + lockres->l_ops->flags & LOCK_TYPE_USES_LVB) + lvb = 1; + if (lvb) dlm_flags |= DLM_LKF_VALBLK; diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 496af7fd87d..86ed0f4aefc 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1104,6 +1104,7 @@ static int ocfs2_extend_file(struct inode *inode, int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) { int status = 0, size_change; + int inode_locked = 0; struct inode *inode = dentry->d_inode; struct super_block *sb = inode->i_sb; struct ocfs2_super *osb = OCFS2_SB(sb); @@ -1149,6 +1150,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) mlog_errno(status); goto bail_unlock_rw; } + inode_locked = 1; if (size_change && attr->ia_size != i_size_read(inode)) { status = inode_newsize_ok(inode, attr->ia_size); @@ -1229,7 +1231,10 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) bail_commit: ocfs2_commit_trans(osb, handle); bail_unlock: - ocfs2_inode_unlock(inode, 1); + if (status) { + ocfs2_inode_unlock(inode, 1); + inode_locked = 0; + } bail_unlock_rw: if (size_change) ocfs2_rw_unlock(inode, 1); @@ -1245,6 +1250,8 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) if (status < 0) mlog_errno(status); } + if (inode_locked) + ocfs2_inode_unlock(inode, 1); return status; } diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index 0c60ef2d805..b9d16098ede 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c @@ -34,9 +34,8 @@ copy_to_user((typeof(a) __user *)b, &(a), sizeof(a)) /* - * This call is void because we are already reporting an error that may - * be -EFAULT. The error will be returned from the ioctl(2) call. It's - * just a best-effort to tell userspace that this request caused the error. + * This is just a best-effort to tell userspace that this request + * caused the error. */ static inline void o2info_set_request_error(struct ocfs2_info_request *kreq, struct ocfs2_info_request __user *req) @@ -145,136 +144,105 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags, int ocfs2_info_handle_blocksize(struct inode *inode, struct ocfs2_info_request __user *req) { - int status = -EFAULT; struct ocfs2_info_blocksize oib; if (o2info_from_user(oib, req)) - goto bail; + return -EFAULT; oib.ib_blocksize = inode->i_sb->s_blocksize; o2info_set_request_filled(&oib.ib_req); if (o2info_to_user(oib, req)) - goto bail; - - status = 0; -bail: - if (status) - o2info_set_request_error(&oib.ib_req, req); + return -EFAULT; - return status; + return 0; } int ocfs2_info_handle_clustersize(struct inode *inode, struct ocfs2_info_request __user *req) { - int status = -EFAULT; struct ocfs2_info_clustersize oic; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if (o2info_from_user(oic, req)) - goto bail; + return -EFAULT; oic.ic_clustersize = osb->s_clustersize; o2info_set_request_filled(&oic.ic_req); if (o2info_to_user(oic, req)) - goto bail; - - status = 0; -bail: - if (status) - o2info_set_request_error(&oic.ic_req, req); + return -EFAULT; - return status; + return 0; } int ocfs2_info_handle_maxslots(struct inode *inode, struct ocfs2_info_request __user *req) { - int status = -EFAULT; struct ocfs2_info_maxslots oim; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if (o2info_from_user(oim, req)) - goto bail; + return -EFAULT; oim.im_max_slots = osb->max_slots; o2info_set_request_filled(&oim.im_req); if (o2info_to_user(oim, req)) - goto bail; + return -EFAULT; - status = 0; -bail: - if (status) - o2info_set_request_error(&oim.im_req, req); - - return status; + return 0; } int ocfs2_info_handle_label(struct inode *inode, struct ocfs2_info_request __user *req) { - int status = -EFAULT; struct ocfs2_info_label oil; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if (o2info_from_user(oil, req)) - goto bail; + return -EFAULT; memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN); o2info_set_request_filled(&oil.il_req); if (o2info_to_user(oil, req)) - goto bail; + return -EFAULT; - status = 0; -bail: - if (status) - o2info_set_request_error(&oil.il_req, req); - - return status; + return 0; } int ocfs2_info_handle_uuid(struct inode *inode, struct ocfs2_info_request __user *req) { - int status = -EFAULT; struct ocfs2_info_uuid oiu; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if (o2info_from_user(oiu, req)) - goto bail; + return -EFAULT; memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1); o2info_set_request_filled(&oiu.iu_req); if (o2info_to_user(oiu, req)) - goto bail; - - status = 0; -bail: - if (status) - o2info_set_request_error(&oiu.iu_req, req); + return -EFAULT; - return status; + return 0; } int ocfs2_info_handle_fs_features(struct inode *inode, struct ocfs2_info_request __user *req) { - int status = -EFAULT; struct ocfs2_info_fs_features oif; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if (o2info_from_user(oif, req)) - goto bail; + return -EFAULT; oif.if_compat_features = osb->s_feature_compat; oif.if_incompat_features = osb->s_feature_incompat; @@ -283,39 +251,28 @@ int ocfs2_info_handle_fs_features(struct inode *inode, o2info_set_request_filled(&oif.if_req); if (o2info_to_user(oif, req)) - goto bail; + return -EFAULT; - status = 0; -bail: - if (status) - o2info_set_request_error(&oif.if_req, req); - - return status; + return 0; } int ocfs2_info_handle_journal_size(struct inode *inode, struct ocfs2_info_request __user *req) { - int status = -EFAULT; struct ocfs2_info_journal_size oij; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if (o2info_from_user(oij, req)) - goto bail; + return -EFAULT; oij.ij_journal_size = osb->journal->j_inode->i_size; o2info_set_request_filled(&oij.ij_req); if (o2info_to_user(oij, req)) - goto bail; + return -EFAULT; - status = 0; -bail: - if (status) - o2info_set_request_error(&oij.ij_req, req); - - return status; + return 0; } int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb, @@ -371,7 +328,7 @@ int ocfs2_info_handle_freeinode(struct inode *inode, u32 i; u64 blkno = -1; char namebuf[40]; - int status = -EFAULT, type = INODE_ALLOC_SYSTEM_INODE; + int status, type = INODE_ALLOC_SYSTEM_INODE; struct ocfs2_info_freeinode *oifi = NULL; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct inode *inode_alloc = NULL; @@ -383,8 +340,10 @@ int ocfs2_info_handle_freeinode(struct inode *inode, goto out_err; } - if (o2info_from_user(*oifi, req)) - goto bail; + if (o2info_from_user(*oifi, req)) { + status = -EFAULT; + goto out_free; + } oifi->ifi_slotnum = osb->max_slots; @@ -421,14 +380,16 @@ int ocfs2_info_handle_freeinode(struct inode *inode, o2info_set_request_filled(&oifi->ifi_req); - if (o2info_to_user(*oifi, req)) - goto bail; + if (o2info_to_user(*oifi, req)) { + status = -EFAULT; + goto out_free; + } status = 0; bail: if (status) o2info_set_request_error(&oifi->ifi_req, req); - +out_free: kfree(oifi); out_err: return status; @@ -655,7 +616,7 @@ int ocfs2_info_handle_freefrag(struct inode *inode, { u64 blkno = -1; char namebuf[40]; - int status = -EFAULT, type = GLOBAL_BITMAP_SYSTEM_INODE; + int status, type = GLOBAL_BITMAP_SYSTEM_INODE; struct ocfs2_info_freefrag *oiff; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); @@ -668,8 +629,10 @@ int ocfs2_info_handle_freefrag(struct inode *inode, goto out_err; } - if (o2info_from_user(*oiff, req)) - goto bail; + if (o2info_from_user(*oiff, req)) { + status = -EFAULT; + goto out_free; + } /* * chunksize from userspace should be power of 2. */ @@ -708,14 +671,14 @@ int ocfs2_info_handle_freefrag(struct inode *inode, if (o2info_to_user(*oiff, req)) { status = -EFAULT; - goto bail; + goto out_free; } status = 0; bail: if (status) o2info_set_request_error(&oiff->iff_req, req); - +out_free: kfree(oiff); out_err: return status; @@ -724,23 +687,17 @@ int ocfs2_info_handle_freefrag(struct inode *inode, int ocfs2_info_handle_unknown(struct inode *inode, struct ocfs2_info_request __user *req) { - int status = -EFAULT; struct ocfs2_info_request oir; if (o2info_from_user(oir, req)) - goto bail; + return -EFAULT; o2info_clear_request_filled(&oir); if (o2info_to_user(oir, req)) - goto bail; + return -EFAULT; - status = 0; -bail: - if (status) - o2info_set_request_error(&oir, req); - - return status; + return 0; } /* diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c index 39abf89697e..88610b3cbc0 100644 --- a/fs/ocfs2/stackglue.c +++ b/fs/ocfs2/stackglue.c @@ -48,6 +48,12 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl"; */ static struct ocfs2_stack_plugin *active_stack; +inline int ocfs2_is_o2cb_active(void) +{ + return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB); +} +EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active); + static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name) { struct ocfs2_stack_plugin *p; diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h index 1ec56fdb8d0..fa49d8a1dc7 100644 --- a/fs/ocfs2/stackglue.h +++ b/fs/ocfs2/stackglue.h @@ -289,4 +289,7 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin); void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin); +/* In ocfs2_downconvert_lock(), we need to know which stack we are using */ +int ocfs2_is_o2cb_active(void); + #endif /* STACKGLUE_H */ diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 08195a6e37d..2a1e405b9c3 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -322,11 +322,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) /* We don't show the stack guard page in /proc/maps */ start = vma->vm_start; - if (stack_guard_page_start(vma, start)) - start += PAGE_SIZE; end = vma->vm_end; - if (stack_guard_page_end(vma, end)) - end -= PAGE_SIZE; seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", diff --git a/fs/splice.c b/fs/splice.c index 2ffa7b0c62f..ce6ffe94ba2 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -215,6 +215,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, buf->len = spd->partial[page_nr].len; buf->private = spd->partial[page_nr].private; buf->ops = spd->ops; + buf->flags = 0; if (spd->flags & SPLICE_F_GIFT) buf->flags |= PIPE_BUF_FLAG_GIFT; diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index a34f4351928..939442d467c 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -34,6 +34,11 @@ #include #include "ubifs.h" +static int try_read_node(const struct ubifs_info *c, void *buf, int type, + int len, int lnum, int offs); +static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key, + struct ubifs_zbranch *zbr, void *node); + /* * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions. * @NAME_LESS: name corresponding to the first argument is less than second @@ -419,7 +424,19 @@ static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr, return 0; } - err = ubifs_tnc_read_node(c, zbr, node); + if (c->replaying) { + err = fallible_read_node(c, &zbr->key, zbr, node); + /* + * When the node was not found, return -ENOENT, 0 otherwise. + * Negative return codes stay as-is. + */ + if (err == 0) + err = -ENOENT; + else if (err == 1) + err = 0; + } else { + err = ubifs_tnc_read_node(c, zbr, node); + } if (err) return err; @@ -2786,7 +2803,11 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c, if (nm->name) { if (err) { /* Handle collisions */ - err = resolve_collision(c, key, &znode, &n, nm); + if (c->replaying) + err = fallible_resolve_collision(c, key, &znode, &n, + nm, 0); + else + err = resolve_collision(c, key, &znode, &n, nm); dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n); if (unlikely(err < 0)) diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index cfbb4c1b2f1..d738a7b842d 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -158,6 +158,12 @@ xfs_setfilesize( rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1], 0, 1, _THIS_IP_); + /* we abort the update if there was an IO error */ + if (ioend->io_error) { + xfs_trans_cancel(tp, 0); + return ioend->io_error; + } + xfs_ilock(ip, XFS_ILOCK_EXCL); isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size); if (!isize) { @@ -213,14 +219,17 @@ xfs_end_io( ioend->io_error = -EIO; goto done; } - if (ioend->io_error) - goto done; /* * For unwritten extents we need to issue transactions to convert a * range to normal written extens after the data I/O has finished. + * Detecting and handling completion IO errors is done individually + * for each case as different cleanup operations need to be performed + * on error. */ if (ioend->io_type == XFS_IO_UNWRITTEN) { + if (ioend->io_error) + goto done; error = xfs_iomap_write_unwritten(ip, ioend->io_offset, ioend->io_size); } else if (ioend->io_isdirect && xfs_ioend_is_append(ioend)) { diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 1b2472a46e4..8ff89db9e66 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -428,6 +428,7 @@ xfs_buf_allocate_memory( out_free_pages: for (i = 0; i < bp->b_page_count; i++) __free_page(bp->b_pages[i]); + bp->b_flags &= ~_XBF_PAGES; return error; } diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 7cf5e4eafe2..8325cb234d9 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -3382,6 +3382,7 @@ xlog_recover_clear_agi_bucket( agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); offset = offsetof(xfs_agi_t, agi_unlinked) + (sizeof(xfs_agino_t) * bucket); + xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF); xfs_trans_log_buf(tp, agibp, offset, (offset + sizeof(xfs_agino_t) - 1)); diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 063f8ef4930..016c2f110f6 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -393,5 +393,21 @@ static inline int crypto_requires_sync(u32 type, u32 mask) return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC; } -#endif /* _CRYPTO_ALGAPI_H */ +noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); + +/** + * crypto_memneq - Compare two areas of memory without leaking + * timing information. + * + * @a: One area of memory + * @b: Another area of memory + * @size: The size of the area. + * + * Returns 0 when data is equal, 1 otherwise. + */ +static inline int crypto_memneq(const void *a, const void *b, size_t size) +{ + return __crypto_memneq(a, b, size) != 0UL ? 1 : 0; +} +#endif /* _CRYPTO_ALGAPI_H */ diff --git a/include/linux/can/core.h b/include/linux/can/core.h index 78c6c52073a..6bdc00b6df0 100644 --- a/include/linux/can/core.h +++ b/include/linux/can/core.h @@ -45,10 +45,9 @@ struct can_proto { extern int can_proto_register(const struct can_proto *cp); extern void can_proto_unregister(const struct can_proto *cp); -extern int can_rx_register(struct net_device *dev, canid_t can_id, - canid_t mask, - void (*func)(struct sk_buff *, void *), - void *data, char *ident); +int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, + void (*func)(struct sk_buff *, void *), + void *data, char *ident, struct sock *sk); extern void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, diff --git a/include/linux/capability.h b/include/linux/capability.h index 9b4378af414..eeb43c4816e 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -40,8 +40,6 @@ struct inode; struct dentry; struct user_namespace; -struct user_namespace *current_user_ns(void); - extern const kernel_cap_t __cap_empty_set; extern const kernel_cap_t __cap_init_eff_set; diff --git a/include/linux/cred.h b/include/linux/cred.h index 6c58dd7cb9a..cd3fb73dc42 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -345,7 +345,10 @@ extern struct user_namespace init_user_ns; #ifdef CONFIG_USER_NS #define current_user_ns() (current_cred_xxx(user_ns)) #else -#define current_user_ns() (&init_user_ns) +static inline struct user_namespace *current_user_ns(void) +{ + return &init_user_ns; +} #endif diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 0976fc46d1e..7f831b28ee6 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -208,4 +208,20 @@ static inline bool static_key_enabled(struct static_key *key) return (atomic_read(&key->enabled) > 0); } +static inline void static_key_enable(struct static_key *key) +{ + int count = atomic_read(&key->enabled); + + if (!count) + static_key_slow_inc(key); +} + +static inline void static_key_disable(struct static_key *key) +{ + int count = atomic_read(&key->enabled); + + if (count) + static_key_slow_dec(key); +} + #endif /* _LINUX_JUMP_LABEL_H */ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 7b312defd07..b38148b71cc 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -145,7 +145,8 @@ struct kvm_io_range { #define NR_IOBUS_DEVS 1000 struct kvm_io_bus { - int dev_count; + int dev_count; + int ioeventfd_count; struct kvm_io_range range[]; }; @@ -162,8 +163,8 @@ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, void *val); int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, struct kvm_io_device *dev); -int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, - struct kvm_io_device *dev); +void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, + struct kvm_io_device *dev); #ifdef CONFIG_KVM_ASYNC_PF struct kvm_async_pf { diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index 0adf073f13b..669af5eaa89 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -355,7 +355,8 @@ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp) static inline int nlm_compare_locks(const struct file_lock *fl1, const struct file_lock *fl2) { - return fl1->fl_pid == fl2->fl_pid + return file_inode(fl1->fl_file) == file_inode(fl2->fl_file) + && fl1->fl_pid == fl2->fl_pid && fl1->fl_owner == fl2->fl_owner && fl1->fl_start == fl2->fl_start && fl1->fl_end == fl2->fl_end diff --git a/include/linux/log2.h b/include/linux/log2.h index fd7ff3d91e6..f38fae23bda 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -15,12 +15,6 @@ #include #include -/* - * deal with unrepresentable constant logarithms - */ -extern __attribute__((const, noreturn)) -int ____ilog2_NaN(void); - /* * non-constant log of base 2 calculators * - the arch may override these in asm/bitops.h if they can be implemented @@ -85,7 +79,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) #define ilog2(n) \ ( \ __builtin_constant_p(n) ? ( \ - (n) < 1 ? ____ilog2_NaN() : \ + (n) < 2 ? 0 : \ (n) & (1ULL << 63) ? 63 : \ (n) & (1ULL << 62) ? 62 : \ (n) & (1ULL << 61) ? 61 : \ @@ -148,10 +142,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) (n) & (1ULL << 4) ? 4 : \ (n) & (1ULL << 3) ? 3 : \ (n) & (1ULL << 2) ? 2 : \ - (n) & (1ULL << 1) ? 1 : \ - (n) & (1ULL << 0) ? 0 : \ - ____ilog2_NaN() \ - ) : \ + 1 ) : \ (sizeof(n) <= 4) ? \ __ilog2_u32(n) : \ __ilog2_u64(n) \ diff --git a/include/linux/mm.h b/include/linux/mm.h index 4c36c7e434c..044b3b06b2e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1090,34 +1090,6 @@ int set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); int clear_page_dirty_for_io(struct page *page); -/* Is the vma a continuation of the stack vma above it? */ -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) -{ - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); -} - -static inline int stack_guard_page_start(struct vm_area_struct *vma, - unsigned long addr) -{ - return (vma->vm_flags & VM_GROWSDOWN) && - (vma->vm_start == addr) && - !vma_growsdown(vma->vm_prev, addr); -} - -/* Is the vma a continuation of the stack vma below it? */ -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) -{ - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); -} - -static inline int stack_guard_page_end(struct vm_area_struct *vma, - unsigned long addr) -{ - return (vma->vm_flags & VM_GROWSUP) && - (vma->vm_end == addr) && - !vma_growsup(vma->vm_next, addr); -} - extern pid_t vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group); @@ -1654,6 +1626,7 @@ unsigned long ra_submit(struct file_ra_state *ra, struct address_space *mapping, struct file *filp); +extern unsigned long stack_guard_gap; /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); @@ -1682,6 +1655,30 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m return vma; } +static inline unsigned long vm_start_gap(struct vm_area_struct *vma) +{ + unsigned long vm_start = vma->vm_start; + + if (vma->vm_flags & VM_GROWSDOWN) { + vm_start -= stack_guard_gap; + if (vm_start > vma->vm_start) + vm_start = 0; + } + return vm_start; +} + +static inline unsigned long vm_end_gap(struct vm_area_struct *vma) +{ + unsigned long vm_end = vma->vm_end; + + if (vma->vm_flags & VM_GROWSUP) { + vm_end += stack_guard_gap; + if (vm_end < vma->vm_end) + vm_end = -PAGE_SIZE; + } + return vm_end; +} + static inline unsigned long vma_pages(struct vm_area_struct *vma) { return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 88397fc86ca..fddc4bf17d4 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1729,14 +1729,19 @@ static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) return NAPI_GRO_CB(skb)->frag0_len < hlen; } +static inline void skb_gro_frag0_invalidate(struct sk_buff *skb) +{ + NAPI_GRO_CB(skb)->frag0 = NULL; + NAPI_GRO_CB(skb)->frag0_len = 0; +} + static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, unsigned int offset) { if (!pskb_may_pull(skb, hlen)) return NULL; - NAPI_GRO_CB(skb)->frag0 = NULL; - NAPI_GRO_CB(skb)->frag0_len = 0; + skb_gro_frag0_invalidate(skb); return skb->data + offset; } diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 7b8fc73810a..f2c78595816 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -223,7 +223,7 @@ enum nfsstat4 { static inline bool seqid_mutating_err(u32 err) { - /* rfc 3530 section 8.1.5: */ + /* See RFC 7530, section 9.1.7 */ switch (err) { case NFS4ERR_STALE_CLIENTID: case NFS4ERR_STALE_STATEID: @@ -232,6 +232,7 @@ static inline bool seqid_mutating_err(u32 err) case NFS4ERR_BADXDR: case NFS4ERR_RESOURCE: case NFS4ERR_NOFILEHANDLE: + case NFS4ERR_MOVED: return false; }; return true; diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h index a8c2ef6d3b9..9078b31d336 100644 --- a/include/net/cipso_ipv4.h +++ b/include/net/cipso_ipv4.h @@ -303,6 +303,10 @@ static inline int cipso_v4_validate(const struct sk_buff *skb, } for (opt_iter = 6; opt_iter < opt_len;) { + if (opt_iter + 1 == opt_len) { + err_offset = opt_iter; + goto out; + } tag_len = opt[opt_iter + 1]; if ((tag_len == 0) || (opt[opt_iter + 1] > (opt_len - opt_iter))) { err_offset = opt_iter + 1; diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index 8275e539bac..969aff6f657 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h @@ -137,12 +137,12 @@ struct ib_sa_path_rec { union ib_gid sgid; __be16 dlid; __be16 slid; - int raw_traffic; + u8 raw_traffic; /* reserved */ __be32 flow_label; u8 hop_limit; u8 traffic_class; - int reversible; + u8 reversible; u8 numb_path; __be16 pkey; __be16 qos_class; @@ -193,7 +193,7 @@ struct ib_sa_mcmember_rec { u8 hop_limit; u8 scope; u8 join_state; - int proxy_join; + u8 proxy_join; }; /* Service Record Component Mask Sec 15.2.5.14 Ver 1.1 */ diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h index 5a4c04a75b3..55c9b99ff9a 100644 --- a/include/trace/events/syscalls.h +++ b/include/trace/events/syscalls.h @@ -1,5 +1,6 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM raw_syscalls +#undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE syscalls #if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ) diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h index e52958d7c2d..3018528bd1b 100644 --- a/include/uapi/linux/can.h +++ b/include/uapi/linux/can.h @@ -158,5 +158,6 @@ struct can_filter { }; #define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */ +#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */ #endif /* CAN_H */ diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h index b2cc0cd9c4d..1a9de73e845 100644 --- a/include/uapi/linux/packet_diag.h +++ b/include/uapi/linux/packet_diag.h @@ -63,7 +63,7 @@ struct packet_diag_mclist { __u32 pdmc_count; __u16 pdmc_type; __u16 pdmc_alen; - __u8 pdmc_addr[MAX_ADDR_LEN]; + __u8 pdmc_addr[32]; /* MAX_ADDR_LEN */ }; struct packet_diag_ring { diff --git a/kernel/events/core.c b/kernel/events/core.c index 4fd1aff35cf..88c84ef7452 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7482,7 +7482,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn) ret = inherit_task_group(event, parent, parent_ctx, child, ctxn, &inherited_all); if (ret) - break; + goto out_unlock; } /* @@ -7498,7 +7498,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn) ret = inherit_task_group(event, parent, parent_ctx, child, ctxn, &inherited_all); if (ret) - break; + goto out_unlock; } raw_spin_lock_irqsave(&parent_ctx->lock, flags); @@ -7526,6 +7526,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn) } raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); +out_unlock: mutex_unlock(&parent_ctx->mutex); perf_unpin_context(parent_ctx); diff --git a/kernel/futex.c b/kernel/futex.c index 9c1fcaf44f9..672c49463ad 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -2414,7 +2414,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, { struct hrtimer_sleeper timeout, *to = NULL; struct rt_mutex_waiter rt_waiter; - struct rt_mutex *pi_mutex = NULL; struct futex_hash_bucket *hb; union futex_key key2 = FUTEX_KEY_INIT; struct futex_q q = futex_q_init; @@ -2495,6 +2494,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, if (q.pi_state && (q.pi_state->owner != current)) { spin_lock(q.lock_ptr); ret = fixup_pi_state_owner(uaddr2, &q, current); + if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) + rt_mutex_unlock(&q.pi_state->pi_mutex); /* * Drop the reference to the pi state which * the requeue_pi() code acquired for us. @@ -2503,6 +2504,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, spin_unlock(q.lock_ptr); } } else { + struct rt_mutex *pi_mutex; + /* * We have been woken up by futex_unlock_pi(), a timeout, or a * signal. futex_unlock_pi() will not destroy the lock_ptr nor @@ -2526,18 +2529,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, if (res) ret = (res < 0) ? res : 0; + /* + * If fixup_pi_state_owner() faulted and was unable to handle + * the fault, unlock the rt_mutex and return the fault to + * userspace. + */ + if (ret && rt_mutex_owner(pi_mutex) == current) + rt_mutex_unlock(pi_mutex); + /* Unqueue and drop the lock. */ unqueue_me_pi(&q); } - /* - * If fixup_pi_state_owner() faulted and was unable to handle the - * fault, unlock the rt_mutex and return the fault to userspace. - */ - if (ret == -EFAULT) { - if (pi_mutex && rt_mutex_owner(pi_mutex) == current) - rt_mutex_unlock(pi_mutex); - } else if (ret == -EINTR) { + if (ret == -EINTR) { /* * We've already been requeued, but cannot restart by calling * futex_lock_pi() directly. We could restart this syscall, but @@ -2903,4 +2907,4 @@ static int __init futex_init(void) return 0; } -__initcall(futex_init); +core_initcall(futex_init); diff --git a/kernel/padata.c b/kernel/padata.c index 072f4ee4eb8..0925ccf92c7 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -190,19 +190,20 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd) reorder = &next_queue->reorder; + spin_lock(&reorder->lock); if (!list_empty(&reorder->list)) { padata = list_entry(reorder->list.next, struct padata_priv, list); - spin_lock(&reorder->lock); list_del_init(&padata->list); atomic_dec(&pd->reorder_objects); - spin_unlock(&reorder->lock); pd->processed++; + spin_unlock(&reorder->lock); goto out; } + spin_unlock(&reorder->lock); if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { padata = ERR_PTR(-ENODATA); diff --git a/kernel/printk.c b/kernel/printk.c index 1dc9db72b64..094b6a51941 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -1556,7 +1556,7 @@ static void call_console_drivers(int level, const char *text, size_t len) { struct console *con; - trace_console(text, len); + trace_console_rcuidle(text, len); if (level >= console_loglevel && !ignore_loglevel) return; diff --git a/kernel/ptrace.c b/kernel/ptrace.c index df4e1c0c07c..96bc78c908c 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -150,11 +150,17 @@ static void ptrace_unfreeze_traced(struct task_struct *task) WARN_ON(!task->ptrace || task->parent != current); + /* + * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely. + * Recheck state under the lock to close this race. + */ spin_lock_irq(&task->sighand->siglock); - if (__fatal_signal_pending(task)) - wake_up_state(task, __TASK_TRACED); - else - task->state = TASK_TRACED; + if (task->state == __TASK_TRACED) { + if (__fatal_signal_pending(task)) + wake_up_state(task, __TASK_TRACED); + else + task->state = TASK_TRACED; + } spin_unlock_irq(&task->sighand->siglock); } diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index d9ca207cec0..286c92f5573 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -64,8 +64,72 @@ static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) static void fixup_rt_mutex_waiters(struct rt_mutex *lock) { - if (!rt_mutex_has_waiters(lock)) - clear_rt_mutex_waiters(lock); + unsigned long owner, *p = (unsigned long *) &lock->owner; + + if (rt_mutex_has_waiters(lock)) + return; + + /* + * The rbtree has no waiters enqueued, now make sure that the + * lock->owner still has the waiters bit set, otherwise the + * following can happen: + * + * CPU 0 CPU 1 CPU2 + * l->owner=T1 + * rt_mutex_lock(l) + * lock(l->lock) + * l->owner = T1 | HAS_WAITERS; + * enqueue(T2) + * boost() + * unlock(l->lock) + * block() + * + * rt_mutex_lock(l) + * lock(l->lock) + * l->owner = T1 | HAS_WAITERS; + * enqueue(T3) + * boost() + * unlock(l->lock) + * block() + * signal(->T2) signal(->T3) + * lock(l->lock) + * dequeue(T2) + * deboost() + * unlock(l->lock) + * lock(l->lock) + * dequeue(T3) + * ==> wait list is empty + * deboost() + * unlock(l->lock) + * lock(l->lock) + * fixup_rt_mutex_waiters() + * if (wait_list_empty(l) { + * l->owner = owner + * owner = l->owner & ~HAS_WAITERS; + * ==> l->owner = T1 + * } + * lock(l->lock) + * rt_mutex_unlock(l) fixup_rt_mutex_waiters() + * if (wait_list_empty(l) { + * owner = l->owner & ~HAS_WAITERS; + * cmpxchg(l->owner, T1, NULL) + * ===> Success (l->owner = NULL) + * + * l->owner = owner + * ==> l->owner = T1 + * } + * + * With the check for the waiter bit in place T3 on CPU2 will not + * overwrite. All tasks fiddling with the waiters bit are + * serialized by l->lock, so nothing else can modify the waiters + * bit. If the bit is set then nothing can change l->owner either + * so the simple RMW is safe. The cmpxchg() will simply fail if it + * happens in the middle of the RMW because the waiters bit is + * still set. + */ + owner = ACCESS_ONCE(*p); + if (owner & RT_MUTEX_HAS_WAITERS) + ACCESS_ONCE(*p) = owner & ~RT_MUTEX_HAS_WAITERS; } /* diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1f1ef9ad6b0..98cdfa934fc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -227,14 +227,12 @@ struct static_key sched_feat_keys[__SCHED_FEAT_NR] = { static void sched_feat_disable(int i) { - if (static_key_enabled(&sched_feat_keys[i])) - static_key_slow_dec(&sched_feat_keys[i]); + static_key_disable(&sched_feat_keys[i]); } static void sched_feat_enable(int i) { - if (!static_key_enabled(&sched_feat_keys[i])) - static_key_slow_inc(&sched_feat_keys[i]); + static_key_enable(&sched_feat_keys[i]); } #else static void sched_feat_disable(int i) { }; @@ -6680,8 +6678,9 @@ void show_state_filter(unsigned long state_filter) touch_all_softlockup_watchdogs(); -#ifdef CONFIG_SYSRQ_SCHED_DEBUG - sysrq_sched_debug_show(); +#ifdef CONFIG_SCHED_DEBUG + if (!state_filter) + sysrq_sched_debug_show(); #endif rcu_read_unlock(); /* diff --git a/kernel/sysctl.c b/kernel/sysctl.c index a1d1acab8cd..f30a115b4ab 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2431,6 +2431,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int break; if (neg) continue; + val = convmul * val / convdiv; if ((min && val < *min) || (max && val > *max)) continue; *i = val; diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 64c5990fd50..1ee8b1d251d 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -840,6 +840,9 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { int cpu = smp_processor_id(); + if (!bc) + return; + /* Set it up only once ! */ if (bc->event_handler != tick_handle_oneshot_broadcast) { int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 86b6e45e96a..f88be8cefde 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -3405,11 +3405,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); int ring_buffer_iter_empty(struct ring_buffer_iter *iter) { struct ring_buffer_per_cpu *cpu_buffer; + struct buffer_page *reader; + struct buffer_page *head_page; + struct buffer_page *commit_page; + unsigned commit; cpu_buffer = iter->cpu_buffer; - return iter->head_page == cpu_buffer->commit_page && - iter->head == rb_commit_index(cpu_buffer); + /* Remember, trace recording is off when iterator is in use */ + reader = cpu_buffer->reader_page; + head_page = cpu_buffer->head_page; + commit_page = cpu_buffer->commit_page; + commit = rb_page_commit(commit_page); + + return ((iter->head_page == commit_page && iter->head == commit) || + (iter->head_page == reader && commit_page == head_page && + head_page->read == commit && + iter->head == rb_page_commit(cpu_buffer->reader_page))); } EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); @@ -4840,9 +4852,9 @@ static __init int test_ringbuffer(void) rb_data[cpu].cnt = cpu; rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu], "rbtester/%d", cpu); - if (WARN_ON(!rb_threads[cpu])) { + if (WARN_ON(IS_ERR(rb_threads[cpu]))) { pr_cont("FAILED\n"); - ret = -1; + ret = PTR_ERR(rb_threads[cpu]); goto out_free; } @@ -4852,9 +4864,9 @@ static __init int test_ringbuffer(void) /* Now create the rb hammer! */ rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); - if (WARN_ON(!rb_hammer)) { + if (WARN_ON(IS_ERR(rb_hammer))) { pr_cont("FAILED\n"); - ret = -1; + ret = PTR_ERR(rb_hammer); goto out_free; } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8a3f1ce3654..7deb98beb95 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -5568,11 +5568,13 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash, return ret; out_reg: - ret = register_ftrace_function_probe(glob, ops, count); + ret = alloc_snapshot(&global_trace); + if (ret < 0) + goto out; - if (ret >= 0) - alloc_snapshot(&global_trace); + ret = register_ftrace_function_probe(glob, ops, count); + out: return ret < 0 ? ret : 0; } diff --git a/mm/filemap.c b/mm/filemap.c index 075c4c6e994..fb32276faf8 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1123,6 +1123,11 @@ static void do_generic_file_read(struct file *filp, loff_t *ppos, cond_resched(); find_page: + if (fatal_signal_pending(current)) { + error = -EINTR; + goto out; + } + page = find_get_page(mapping, index); if (!page) { page_cache_sync_readahead(mapping, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d21c9ef0943..3877483a20f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1235,6 +1235,18 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, return ret; } +/* + * foll_force can write to even unwritable pmd's, but only + * after we've gone through a cow cycle and they are dirty. + */ +static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page, + unsigned int flags) +{ + return pmd_write(pmd) || + ((flags & FOLL_FORCE) && (flags & FOLL_COW) && + page && PageAnon(page)); +} + struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, @@ -1245,15 +1257,16 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, assert_spin_locked(&mm->page_table_lock); - if (flags & FOLL_WRITE && !pmd_write(*pmd)) - goto out; - /* Avoid dumping huge zero page */ if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) return ERR_PTR(-EFAULT); page = pmd_page(*pmd); VM_BUG_ON(!PageHead(page)); + + if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, page, flags)) + return NULL; + if (flags & FOLL_TOUCH) { pmd_t _pmd; /* diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e9fd382bf25..69832290015 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1070,23 +1070,32 @@ static int gather_surplus_pages(struct hstate *h, int delta) } /* - * When releasing a hugetlb pool reservation, any surplus pages that were - * allocated to satisfy the reservation must be explicitly freed if they were - * never used. - * Called with hugetlb_lock held. + * This routine has two main purposes: + * 1) Decrement the reservation count (resv_huge_pages) by the value passed + * in unused_resv_pages. This corresponds to the prior adjustments made + * to the associated reservation map. + * 2) Free any unused surplus pages that may have been allocated to satisfy + * the reservation. As many as unused_resv_pages may be freed. + * + * Called with hugetlb_lock held. However, the lock could be dropped (and + * reacquired) during calls to cond_resched_lock. Whenever dropping the lock, + * we must make sure nobody else can claim pages we are in the process of + * freeing. Do this by ensuring resv_huge_page always is greater than the + * number of huge pages we plan to free when dropping the lock. */ static void return_unused_surplus_pages(struct hstate *h, unsigned long unused_resv_pages) { unsigned long nr_pages; - /* Uncommit the reservation */ - h->resv_huge_pages -= unused_resv_pages; - /* Cannot return gigantic pages currently */ if (h->order >= MAX_ORDER) - return; + goto out; + /* + * Part (or even all) of the reservation could have been backed + * by pre-allocated pages. Only free surplus pages. + */ nr_pages = min(unused_resv_pages, h->surplus_huge_pages); /* @@ -1096,12 +1105,22 @@ static void return_unused_surplus_pages(struct hstate *h, * when the nodes with surplus pages have no free pages. * free_pool_huge_page() will balance the the freed pages across the * on-line nodes with memory and will handle the hstate accounting. + * + * Note that we decrement resv_huge_pages as we free the pages. If + * we drop the lock, resv_huge_pages will still be sufficiently large + * to cover subsequent pages we may free. */ while (nr_pages--) { + h->resv_huge_pages--; + unused_resv_pages--; if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) - break; + goto out; cond_resched_lock(&hugetlb_lock); } + +out: + /* Fully uncommit the reservation */ + h->resv_huge_pages -= unused_resv_pages; } /* diff --git a/mm/memory.c b/mm/memory.c index 3968333ec5f..effd60665ab 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1659,12 +1659,6 @@ struct page *follow_page_mask(struct vm_area_struct *vma, return page; } -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) -{ - return stack_guard_page_start(vma, addr) || - stack_guard_page_end(vma, addr+PAGE_SIZE); -} - /** * __get_user_pages() - pin user pages in memory * @tsk: task_struct of target task @@ -1845,11 +1839,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int ret; unsigned int fault_flags = 0; - /* For mlock, just skip the stack guard page. */ - if (foll_flags & FOLL_MLOCK) { - if (stack_guard_page(vma, start)) - goto next_page; - } if (foll_flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (nonblocking) @@ -3220,40 +3209,6 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, return ret; } -/* - * This is like a special single-page "expand_{down|up}wards()", - * except we must first make sure that 'address{-|+}PAGE_SIZE' - * doesn't hit another vma. - */ -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) -{ - address &= PAGE_MASK; - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { - struct vm_area_struct *prev = vma->vm_prev; - - /* - * Is there a mapping abutting this one below? - * - * That's only ok if it's the same stack mapping - * that has gotten split.. - */ - if (prev && prev->vm_end == address) - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; - - return expand_downwards(vma, address - PAGE_SIZE); - } - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { - struct vm_area_struct *next = vma->vm_next; - - /* As VM_GROWSDOWN but s/below/above/ */ - if (next && next->vm_start == address + PAGE_SIZE) - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; - - return expand_upwards(vma, address + PAGE_SIZE); - } - return 0; -} - /* * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. @@ -3273,10 +3228,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, if (vma->vm_flags & VM_SHARED) return VM_FAULT_SIGBUS; - /* Check if we need to add a guard page to the stack */ - if (check_stack_guard_page(vma, address) < 0) - return VM_FAULT_SIGSEGV; - /* Use the zero-page for reads */ if (!(flags & FAULT_FLAG_WRITE)) { entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a112ee02c35..18e83fd76eb 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1272,7 +1272,7 @@ int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) } /* - * Confirm all pages in a range [start, end) is belongs to the same zone. + * Confirm all pages in a range [start, end) belong to the same zone. */ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) { @@ -1280,9 +1280,9 @@ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) struct zone *zone = NULL; struct page *page; int i; - for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn); + for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1); pfn < end_pfn; - pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) { + pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) { /* Make sure the memory section is present first */ if (!present_section_nr(pfn_to_section_nr(pfn))) continue; @@ -1301,7 +1301,11 @@ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) zone = page_zone(page); } } - return 1; + + if (zone) + return 1; + else + return 0; } /* diff --git a/mm/mmap.c b/mm/mmap.c index fbadaaef056..404005babd1 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -275,6 +275,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) unsigned long rlim, retval; unsigned long newbrk, oldbrk; struct mm_struct *mm = current->mm; + struct vm_area_struct *next; unsigned long min_brk; bool populate; @@ -320,7 +321,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) } /* Check against existing mmap mappings. */ - if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) + next = find_vma(mm, oldbrk); + if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) goto out; /* Ok, looks good - let it rip. */ @@ -343,10 +345,22 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) static long vma_compute_subtree_gap(struct vm_area_struct *vma) { - unsigned long max, subtree_gap; - max = vma->vm_start; - if (vma->vm_prev) - max -= vma->vm_prev->vm_end; + unsigned long max, prev_end, subtree_gap; + + /* + * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we + * allow two stack_guard_gaps between them here, and when choosing + * an unmapped area; whereas when expanding we only require one. + * That's a little inconsistent, but keeps the code here simpler. + */ + max = vm_start_gap(vma); + if (vma->vm_prev) { + prev_end = vm_end_gap(vma->vm_prev); + if (max > prev_end) + max -= prev_end; + else + max = 0; + } if (vma->vm_rb.rb_left) { subtree_gap = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb)->rb_subtree_gap; @@ -430,7 +444,7 @@ void validate_mm(struct mm_struct *mm) list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_verify(avc); vma_unlock_anon_vma(vma); - highest_address = vma->vm_end; + highest_address = vm_end_gap(vma); vma = vma->vm_next; i++; } @@ -598,7 +612,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, if (vma->vm_next) vma_gap_update(vma->vm_next); else - mm->highest_vm_end = vma->vm_end; + mm->highest_vm_end = vm_end_gap(vma); /* * vma->vm_prev wasn't known when we followed the rbtree to find the @@ -847,7 +861,7 @@ again: remove_next = 1 + (end > next->vm_end); vma_gap_update(vma); if (end_changed) { if (!next) - mm->highest_vm_end = end; + mm->highest_vm_end = vm_end_gap(vma); else if (!adjust_next) vma_gap_update(next); } @@ -890,7 +904,7 @@ again: remove_next = 1 + (end > next->vm_end); else if (next) vma_gap_update(next); else - mm->highest_vm_end = end; + WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); } if (insert && file) uprobe_mmap(insert); @@ -1689,7 +1703,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) while (true) { /* Visit left subtree if it looks promising */ - gap_end = vma->vm_start; + gap_end = vm_start_gap(vma); if (gap_end >= low_limit && vma->vm_rb.rb_left) { struct vm_area_struct *left = rb_entry(vma->vm_rb.rb_left, @@ -1700,12 +1714,13 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) } } - gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; + gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; check_current: /* Check if current node has a suitable gap */ if (gap_start > high_limit) return -ENOMEM; - if (gap_end >= low_limit && gap_end - gap_start >= length) + if (gap_end >= low_limit && + gap_end > gap_start && gap_end - gap_start >= length) goto found; /* Visit right subtree if it looks promising */ @@ -1727,8 +1742,8 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) vma = rb_entry(rb_parent(prev), struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_left) { - gap_start = vma->vm_prev->vm_end; - gap_end = vma->vm_start; + gap_start = vm_end_gap(vma->vm_prev); + gap_end = vm_start_gap(vma); goto check_current; } } @@ -1792,7 +1807,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) while (true) { /* Visit right subtree if it looks promising */ - gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; + gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; if (gap_start <= high_limit && vma->vm_rb.rb_right) { struct vm_area_struct *right = rb_entry(vma->vm_rb.rb_right, @@ -1805,10 +1820,11 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) check_current: /* Check if current node has a suitable gap */ - gap_end = vma->vm_start; + gap_end = vm_start_gap(vma); if (gap_end < low_limit) return -ENOMEM; - if (gap_start <= high_limit && gap_end - gap_start >= length) + if (gap_start <= high_limit && + gap_end > gap_start && gap_end - gap_start >= length) goto found; /* Visit left subtree if it looks promising */ @@ -1831,7 +1847,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_right) { gap_start = vma->vm_prev ? - vma->vm_prev->vm_end : 0; + vm_end_gap(vma->vm_prev) : 0; goto check_current; } } @@ -1869,7 +1885,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; struct vm_unmapped_area_info info; if (len > TASK_SIZE - mmap_min_addr) @@ -1880,9 +1896,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) return addr; } @@ -1905,7 +1922,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info; @@ -1920,9 +1937,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) return addr; } @@ -2049,21 +2067,19 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr, * update accounting. This is shared with both the * grow-up and grow-down cases. */ -static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) +static int acct_stack_growth(struct vm_area_struct *vma, + unsigned long size, unsigned long grow) { struct mm_struct *mm = vma->vm_mm; struct rlimit *rlim = current->signal->rlim; - unsigned long new_start, actual_size; + unsigned long new_start; /* address space limit tests */ if (!may_expand_vm(mm, grow)) return -ENOMEM; /* Stack limit test */ - actual_size = size; - if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) - actual_size -= PAGE_SIZE; - if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) + if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) return -ENOMEM; /* mlock limit tests */ @@ -2104,32 +2120,43 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns */ int expand_upwards(struct vm_area_struct *vma, unsigned long address) { - int error; + struct vm_area_struct *next; + unsigned long gap_addr; + int error = 0; if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; - /* - * We must make sure the anon_vma is allocated - * so that the anon_vma locking is not a noop. - */ + /* Guard against exceeding limits of the address space. */ + address &= PAGE_MASK; + if (address >= TASK_SIZE) + return -ENOMEM; + address += PAGE_SIZE; + + /* Enforce stack_guard_gap */ + gap_addr = address + stack_guard_gap; + + /* Guard against overflow */ + if (gap_addr < address || gap_addr > TASK_SIZE) + gap_addr = TASK_SIZE; + + next = vma->vm_next; + if (next && next->vm_start < gap_addr) { + if (!(next->vm_flags & VM_GROWSUP)) + return -ENOMEM; + /* Check that both stack segments have the same anon_vma? */ + } + + /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; - vma_lock_anon_vma(vma); /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. - * Also guard against wrapping around to address 0. */ - if (address < PAGE_ALIGN(address+4)) - address = PAGE_ALIGN(address+4); - else { - vma_unlock_anon_vma(vma); - return -ENOMEM; - } - error = 0; + vma_lock_anon_vma(vma); /* Somebody else might have raced and expanded it already */ if (address > vma->vm_end) { @@ -2160,7 +2187,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) if (vma->vm_next) vma_gap_update(vma->vm_next); else - vma->vm_mm->highest_vm_end = address; + vma->vm_mm->highest_vm_end = vm_end_gap(vma); spin_unlock(&vma->vm_mm->page_table_lock); perf_event_mmap(vma); @@ -2180,27 +2207,36 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) int expand_downwards(struct vm_area_struct *vma, unsigned long address) { + struct vm_area_struct *prev; + unsigned long gap_addr; int error; - /* - * We must make sure the anon_vma is allocated - * so that the anon_vma locking is not a noop. - */ - if (unlikely(anon_vma_prepare(vma))) - return -ENOMEM; - address &= PAGE_MASK; error = security_mmap_addr(address); if (error) return error; - vma_lock_anon_vma(vma); + /* Enforce stack_guard_gap */ + gap_addr = address - stack_guard_gap; + if (gap_addr > address) + return -ENOMEM; + prev = vma->vm_prev; + if (prev && prev->vm_end > gap_addr) { + if (!(prev->vm_flags & VM_GROWSDOWN)) + return -ENOMEM; + /* Check that both stack segments have the same anon_vma? */ + } + + /* We must make sure the anon_vma is allocated. */ + if (unlikely(anon_vma_prepare(vma))) + return -ENOMEM; /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ + vma_lock_anon_vma(vma); /* Somebody else might have raced and expanded it already */ if (address < vma->vm_start) { @@ -2242,28 +2278,25 @@ int expand_downwards(struct vm_area_struct *vma, return error; } -/* - * Note how expand_stack() refuses to expand the stack all the way to - * abut the next virtual mapping, *unless* that mapping itself is also - * a stack mapping. We want to leave room for a guard page, after all - * (the guard page itself is not added here, that is done by the - * actual page faulting logic) - * - * This matches the behavior of the guard page logic (see mm/memory.c: - * check_stack_guard_page()), which only allows the guard page to be - * removed under these circumstances. - */ +/* enforced gap between the expanding stack and other mappings. */ +unsigned long stack_guard_gap = 256UL<rc); goto free_and_error; } + if (rsize < count) { + pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize); + count = rsize; + } p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count); diff --git a/net/can/af_can.c b/net/can/af_can.c index d3668c55b08..34064aa88f0 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -425,6 +425,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, * @func: callback function on filter match * @data: returned parameter for callback function * @ident: string for calling module indentification + * @sk: socket pointer (might be NULL) * * Description: * Invokes the callback function with the received sk_buff and the given @@ -448,7 +449,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, */ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, void (*func)(struct sk_buff *, void *), void *data, - char *ident) + char *ident, struct sock *sk) { struct receiver *r; struct hlist_head *rl; @@ -476,6 +477,7 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, r->func = func; r->data = data; r->ident = ident; + r->sk = sk; hlist_add_head_rcu(&r->list, rl); d->entries++; @@ -500,8 +502,11 @@ EXPORT_SYMBOL(can_rx_register); static void can_rx_delete_receiver(struct rcu_head *rp) { struct receiver *r = container_of(rp, struct receiver, rcu); + struct sock *sk = r->sk; kmem_cache_free(rcv_cache, r); + if (sk) + sock_put(sk); } /** @@ -576,8 +581,11 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, spin_unlock(&can_rcvlists_lock); /* schedule the receiver item for deletion */ - if (r) + if (r) { + if (r->sk) + sock_hold(r->sk); call_rcu(&r->rcu, can_rx_delete_receiver); + } } EXPORT_SYMBOL(can_rx_unregister); diff --git a/net/can/af_can.h b/net/can/af_can.h index 1dccb4c3389..0e95be42358 100644 --- a/net/can/af_can.h +++ b/net/can/af_can.h @@ -50,13 +50,14 @@ struct receiver { struct hlist_node list; - struct rcu_head rcu; canid_t can_id; canid_t mask; unsigned long matches; void (*func)(struct sk_buff *, void *); void *data; char *ident; + struct sock *sk; + struct rcu_head rcu; }; enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_EFF, RX_MAX }; diff --git a/net/can/bcm.c b/net/can/bcm.c index dd0781c49eb..6e0a88d9c55 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -706,14 +706,23 @@ static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id, static void bcm_remove_op(struct bcm_op *op) { - hrtimer_cancel(&op->timer); - hrtimer_cancel(&op->thrtimer); - - if (op->tsklet.func) - tasklet_kill(&op->tsklet); + if (op->tsklet.func) { + while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) || + test_bit(TASKLET_STATE_RUN, &op->tsklet.state) || + hrtimer_active(&op->timer)) { + hrtimer_cancel(&op->timer); + tasklet_kill(&op->tsklet); + } + } - if (op->thrtsklet.func) - tasklet_kill(&op->thrtsklet); + if (op->thrtsklet.func) { + while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) || + test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) || + hrtimer_active(&op->thrtimer)) { + hrtimer_cancel(&op->thrtimer); + tasklet_kill(&op->thrtsklet); + } + } if ((op->frames) && (op->frames != &op->sframe)) kfree(op->frames); @@ -1169,7 +1178,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, err = can_rx_register(dev, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op, - "bcm"); + "bcm", sk); op->rx_reg_dev = dev; dev_put(dev); @@ -1178,7 +1187,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, } else err = can_rx_register(NULL, op->can_id, REGMASK(op->can_id), - bcm_rx_handler, op, "bcm"); + bcm_rx_handler, op, "bcm", sk); if (err) { /* this bcm rx op is broken -> remove it */ list_del(&op->list); diff --git a/net/can/gw.c b/net/can/gw.c index de25455b4e3..2ad8aa4f9f0 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -435,7 +435,7 @@ static inline int cgw_register_filter(struct cgw_job *gwj) { return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id, gwj->ccgw.filter.can_mask, can_can_gw_rcv, - gwj, "gw"); + gwj, "gw", NULL); } static inline void cgw_unregister_filter(struct cgw_job *gwj) diff --git a/net/can/raw.c b/net/can/raw.c index 1085e65f848..602be0e07a0 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -168,7 +168,7 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk, for (i = 0; i < count; i++) { err = can_rx_register(dev, filter[i].can_id, filter[i].can_mask, - raw_rcv, sk, "raw"); + raw_rcv, sk, "raw", sk); if (err) { /* clean up successfully registered filters */ while (--i >= 0) @@ -189,7 +189,7 @@ static int raw_enable_errfilter(struct net_device *dev, struct sock *sk, if (err_mask) err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG, - raw_rcv, sk, "raw"); + raw_rcv, sk, "raw", sk); return err; } @@ -470,6 +470,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, if (optlen % sizeof(struct can_filter) != 0) return -EINVAL; + if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter)) + return -EINVAL; + count = optlen / sizeof(struct can_filter); if (count > 1) { diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 025ced8fbb5..c99cfde87bd 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -472,11 +473,16 @@ static int ceph_tcp_connect(struct ceph_connection *con) { struct sockaddr_storage *paddr = &con->peer_addr.in_addr; struct socket *sock; + unsigned int noio_flag; int ret; BUG_ON(con->sock); + + /* sock_create_kern() allocates with GFP_KERNEL */ + noio_flag = memalloc_noio_save(); ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM, IPPROTO_TCP, &sock); + memalloc_noio_restore(noio_flag); if (ret) return ret; sock->sk->sk_allocation = GFP_NOFS; @@ -1969,6 +1975,19 @@ static int process_connect(struct ceph_connection *con) dout("process_connect on %p tag %d\n", con, (int)con->in_tag); + if (con->auth_reply_buf) { + /* + * Any connection that defines ->get_authorizer() + * should also define ->verify_authorizer_reply(). + * See get_connect_authorizer(). + */ + ret = con->ops->verify_authorizer_reply(con, 0); + if (ret < 0) { + con->error_msg = "bad authorize reply"; + return ret; + } + } + switch (con->in_reply.tag) { case CEPH_MSGR_TAG_FEATURES: pr_err("%s%lld %s feature set mismatch," diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index c1de8d404c4..26e2235356c 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -870,7 +870,6 @@ static int decode_new_up_state_weight(void **p, void *end, if ((map->osd_state[osd] & CEPH_OSD_EXISTS) && (xorstate & CEPH_OSD_EXISTS)) { pr_info("osd%d does not exist\n", osd); - map->osd_weight[osd] = CEPH_OSD_IN; memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr)); map->osd_state[osd] = 0; } else { diff --git a/net/core/dev.c b/net/core/dev.c index 5c5caa3bef8..cddd3344814 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1559,37 +1559,59 @@ EXPORT_SYMBOL(call_netdevice_notifiers); static struct static_key netstamp_needed __read_mostly; #ifdef HAVE_JUMP_LABEL -/* We are not allowed to call static_key_slow_dec() from irq context - * If net_disable_timestamp() is called from irq context, defer the - * static_key_slow_dec() calls. - */ static atomic_t netstamp_needed_deferred; +static atomic_t netstamp_wanted; +static void netstamp_clear(struct work_struct *work) +{ + int deferred = atomic_xchg(&netstamp_needed_deferred, 0); + int wanted; + + wanted = atomic_add_return(deferred, &netstamp_wanted); + if (wanted > 0) + static_key_enable(&netstamp_needed); + else + static_key_disable(&netstamp_needed); +} +static DECLARE_WORK(netstamp_work, netstamp_clear); #endif void net_enable_timestamp(void) { #ifdef HAVE_JUMP_LABEL - int deferred = atomic_xchg(&netstamp_needed_deferred, 0); + int wanted; - if (deferred) { - while (--deferred) - static_key_slow_dec(&netstamp_needed); - return; + while (1) { + wanted = atomic_read(&netstamp_wanted); + if (wanted <= 0) + break; + if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted) + return; } -#endif + atomic_inc(&netstamp_needed_deferred); + schedule_work(&netstamp_work); +#else static_key_slow_inc(&netstamp_needed); +#endif } EXPORT_SYMBOL(net_enable_timestamp); void net_disable_timestamp(void) { #ifdef HAVE_JUMP_LABEL - if (in_interrupt()) { - atomic_inc(&netstamp_needed_deferred); - return; + int wanted; + + while (1) { + wanted = atomic_read(&netstamp_wanted); + if (wanted <= 1) + break; + if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted) + return; } -#endif + atomic_dec(&netstamp_needed_deferred); + schedule_work(&netstamp_work); +#else static_key_slow_dec(&netstamp_needed); +#endif } EXPORT_SYMBOL(net_disable_timestamp); @@ -2461,9 +2483,9 @@ static netdev_features_t harmonize_features(struct sk_buff *skb, if (skb->ip_summed != CHECKSUM_NONE && !can_checksum_protocol(features, protocol)) { features &= ~NETIF_F_ALL_CSUM; - } else if (illegal_highdma(dev, skb)) { - features &= ~NETIF_F_SG; } + if (illegal_highdma(dev, skb)) + features &= ~NETIF_F_SG; return features; } @@ -3891,7 +3913,9 @@ static void skb_gro_reset_offset(struct sk_buff *skb) pinfo->nr_frags && !PageHighMem(skb_frag_page(frag0))) { NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); - NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0); + NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, + skb_frag_size(frag0), + skb->end - skb->tail); } } diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index a974dfec4bf..55bb6909edc 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -80,6 +80,7 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data) struct nlattr *nla; struct sk_buff *skb; unsigned long flags; + void *msg_header; al = sizeof(struct net_dm_alert_msg); al += dm_hit_limit * sizeof(struct net_dm_drop_point); @@ -87,21 +88,41 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data) skb = genlmsg_new(al, GFP_KERNEL); - if (skb) { - genlmsg_put(skb, 0, 0, &net_drop_monitor_family, - 0, NET_DM_CMD_ALERT); - nla = nla_reserve(skb, NLA_UNSPEC, - sizeof(struct net_dm_alert_msg)); - msg = nla_data(nla); - memset(msg, 0, al); - } else { - mod_timer(&data->send_timer, jiffies + HZ / 10); + if (!skb) + goto err; + + msg_header = genlmsg_put(skb, 0, 0, &net_drop_monitor_family, + 0, NET_DM_CMD_ALERT); + if (!msg_header) { + nlmsg_free(skb); + skb = NULL; + goto err; + } + nla = nla_reserve(skb, NLA_UNSPEC, + sizeof(struct net_dm_alert_msg)); + if (!nla) { + nlmsg_free(skb); + skb = NULL; + goto err; } + msg = nla_data(nla); + memset(msg, 0, al); + goto out; +err: + mod_timer(&data->send_timer, jiffies + HZ / 10); +out: spin_lock_irqsave(&data->lock, flags); swap(data->skb, skb); spin_unlock_irqrestore(&data->lock, flags); + if (skb) { + struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data; + struct genlmsghdr *gnlh = (struct genlmsghdr *)nlmsg_data(nlh); + + genlmsg_end(skb, genlmsg_data(gnlh)); + } + return skb; } diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 2bc6c111340..f96970f9483 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -872,7 +872,8 @@ static void neigh_probe(struct neighbour *neigh) if (skb) skb = skb_copy(skb, GFP_ATOMIC); write_unlock(&neigh->lock); - neigh->ops->solicit(neigh, skb); + if (neigh->ops->solicit) + neigh->ops->solicit(neigh, skb); atomic_inc(&neigh->probes); kfree_skb(skb); } diff --git a/net/core/sock.c b/net/core/sock.c index e3cb45411f3..96e12591932 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1403,6 +1403,11 @@ static void __sk_free(struct sock *sk) pr_debug("%s: optmem leakage (%d bytes) detected\n", __func__, atomic_read(&sk->sk_omem_alloc)); + if (sk->sk_frag.page) { + put_page(sk->sk_frag.page); + sk->sk_frag.page = NULL; + } + if (sk->sk_peer_cred) put_cred(sk->sk_peer_cred); put_pid(sk->sk_peer_pid); @@ -2556,11 +2561,6 @@ void sk_common_release(struct sock *sk) sk_refcnt_debug_release(sk); - if (sk->sk_frag.page) { - put_page(sk->sk_frag.page); - sk->sk_frag.page = NULL; - } - sock_put(sk); } EXPORT_SYMBOL(sk_common_release); diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index f053198e730..5e3a7302f77 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c @@ -749,6 +749,7 @@ static void ccid2_hc_tx_exit(struct sock *sk) for (i = 0; i < hc->tx_seqbufc; i++) kfree(hc->tx_seqbuf[i]); hc->tx_seqbufc = 0; + dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); } static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 294c642fbeb..3bb5ff9e14a 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -263,7 +263,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) switch (type) { case ICMP_REDIRECT: - dccp_do_redirect(skb, sk); + if (!sock_owned_by_user(sk)) + dccp_do_redirect(skb, sk); goto out; case ICMP_SOURCE_QUENCH: /* Just silently ignore these. */ diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 736fdedf9c8..c3ae00de174 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -132,10 +132,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, np = inet6_sk(sk); if (type == NDISC_REDIRECT) { - struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); + if (!sock_owned_by_user(sk)) { + struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); - if (dst) - dst->ops->redirect(dst, sk, skb); + if (dst) + dst->ops->redirect(dst, sk, skb); + } goto out; } diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 662071b249c..e47b15dd9b3 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -140,6 +140,7 @@ struct sock *dccp_create_openreq_child(struct sock *sk, /* It is still raw copy of parent, so invalidate * destructor and make plain sk_free() */ newsk->sk_destruct = NULL; + bh_unlock_sock(newsk); sk_free(newsk); return NULL; } diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index ca118e8cb14..9d06b37acc4 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c @@ -459,7 +459,7 @@ static int lowpan_header_create(struct sk_buff *skb, hc06_ptr += 3; } else { /* compress nothing */ - memcpy(hc06_ptr, &hdr, 4); + memcpy(hc06_ptr, hdr, 4); /* replace the top byte with new ECN | DSCP format */ *hc06_ptr = tmp; hc06_ptr += 4; diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 667c1d4ca98..4322372dddb 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -1649,6 +1649,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) goto validate_return_locked; } + if (opt_iter + 1 == opt_len) { + err_offset = opt_iter; + goto validate_return_locked; + } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 4a7cf03a192..fdd555129fa 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -958,7 +958,8 @@ static void nl_fib_input(struct sk_buff *skb) net = sock_net(skb->sk); nlh = nlmsg_hdr(skb); - if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len || + if (skb->len < nlmsg_total_size(sizeof(*frn)) || + skb->len < nlh->nlmsg_len || nlmsg_len(nlh) < sizeof(*frn)) return; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 155adf8729c..4572ee7c71f 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -196,9 +196,14 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay) static void igmp_gq_start_timer(struct in_device *in_dev) { int tv = net_random() % in_dev->mr_maxdelay; + unsigned long exp = jiffies + tv + 2; + + if (in_dev->mr_gq_running && + time_after_eq(exp, (in_dev->mr_gq_timer).expires)) + return; in_dev->mr_gq_running = 1; - if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2)) + if (!mod_timer(&in_dev->mr_gq_timer, exp)) in_dev_hold(in_dev); } @@ -1874,7 +1879,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) rtnl_lock(); in_dev = ip_mc_find_dev(net, imr); - if (!in_dev) { + if (!imr->imr_ifindex && !imr->imr_address.s_addr && !in_dev) { ret = -ENODEV; goto out; } @@ -1895,8 +1900,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) *imlp = iml->next_rcu; - ip_mc_dec_group(in_dev, group); + if (in_dev) + ip_mc_dec_group(in_dev, group); rtnl_unlock(); + /* decrease mem now to avoid the memleak warning */ atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); kfree_rcu(iml, rcu); diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 6dfec2f1821..ca872fec1db 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -690,7 +690,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk, inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port; newsk->sk_write_space = sk_stream_write_space; - newsk->sk_mark = inet_rsk(req)->ir_mark; + inet_sk(newsk)->mc_list = NULL; newicsk->icsk_retransmits = 0; newicsk->icsk_backoff = 0; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index f6603142cb3..3d009e17416 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -1042,7 +1042,14 @@ void ipv4_pktinfo_prepare(struct sk_buff *skb) pktinfo->ipi_ifindex = 0; pktinfo->ipi_spec_dst.s_addr = 0; } - skb_dst_drop(skb); + /* We need to keep the dst for __ip_options_echo() + * We could restrict the test to opt.ts_needtime || opt.srr, + * but the following is good enough as IP options are not often used. + */ + if (unlikely(IPCB(skb)->opt.optlen)) + skb_dst_force(skb); + else + skb_dst_drop(skb); } int ip_setsockopt(struct sock *sk, int level, diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 4ec34275160..eadafac6f46 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -582,7 +582,6 @@ static void vti_tunnel_setup(struct net_device *dev) dev->type = ARPHRD_TUNNEL; dev->destructor = vti_dev_free; - dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); dev->mtu = ETH_DATA_LEN; dev->flags = IFF_NOARP; dev->iflink = 0; diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 76d40302ce3..3217acd4e33 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -1314,8 +1314,8 @@ static int translate_compat_table(struct xt_table_info **pinfo, newinfo->number = compatr->num_entries; for (i = 0; i < NF_ARP_NUMHOOKS; i++) { - newinfo->hook_entry[i] = info->hook_entry[i]; - newinfo->underflow[i] = info->underflow[i]; + newinfo->hook_entry[i] = compatr->hook_entry[i]; + newinfo->underflow[i] = compatr->underflow[i]; } entry1 = newinfo->entries[raw_smp_processor_id()]; pos = entry1; diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index f8c8f60ad7e..b4a590c6b75 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -150,7 +150,9 @@ void ping_hash(struct sock *sk) void ping_unhash(struct sock *sk) { struct inet_sock *isk = inet_sk(sk); - pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); + + pr_debug("ping_v4_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); + write_lock_bh(&ping_table.lock); if (sk_hashed(sk)) { write_lock_bh(&ping_table.lock); hlist_nulls_del(&sk->sk_nulls_node); @@ -639,6 +641,8 @@ static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh, { struct sk_buff *skb = skb_peek(&sk->sk_write_queue); + if (!skb) + return 0; pfh->wcheck = csum_partial((char *)&pfh->icmph, sizeof(struct icmphdr), pfh->wcheck); pfh->icmph.checksum = csum_fold(pfh->wcheck); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index c19519d2605..3283060a8cc 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1801,6 +1801,7 @@ int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, { int res; + tos &= IPTOS_RT_MASK; rcu_read_lock(); /* Multicast recognition logic is moved from route cache to here. diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index c9a5aa8e39c..d57f9dd2a7d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5329,6 +5329,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) struct inet_connection_sock *icsk = inet_csk(sk); tcp_set_state(sk, TCP_ESTABLISHED); + icsk->icsk_ack.lrcvtime = tcp_time_stamp; if (skb != NULL) { icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); @@ -5529,7 +5530,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, * to stand against the temptation 8) --ANK */ inet_csk_schedule_ack(sk); - icsk->icsk_ack.lrcvtime = tcp_time_stamp; tcp_enter_quickack_mode(sk); inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX, TCP_RTO_MAX); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3db3e4578d0..c127c361882 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -270,10 +270,13 @@ EXPORT_SYMBOL(tcp_v4_connect); */ void tcp_v4_mtu_reduced(struct sock *sk) { - struct dst_entry *dst; struct inet_sock *inet = inet_sk(sk); - u32 mtu = tcp_sk(sk)->mtu_info; + struct dst_entry *dst; + u32 mtu; + if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) + return; + mtu = tcp_sk(sk)->mtu_info; dst = inet_csk_update_pmtu(sk, mtu); if (!dst) return; @@ -389,7 +392,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) switch (type) { case ICMP_REDIRECT: - do_redirect(icmp_skb, sk); + if (!sock_owned_by_user(sk)) + do_redirect(icmp_skb, sk); goto out; case ICMP_SOURCE_QUENCH: /* Just silently ignore these. */ @@ -1422,6 +1426,7 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk, * scaled. So correct it appropriately. */ tp->snd_wnd = ntohs(tcp_hdr(skb)->window); + tp->max_window = tp->snd_wnd; /* Activate the retrans timer so that SYNACK can be retransmitted. * The request socket is not added to the SYN table of the parent diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 0f017882725..914a55db803 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -405,6 +405,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, newtp->srtt = 0; newtp->mdev = TCP_TIMEOUT_INIT; newicsk->icsk_rto = TCP_TIMEOUT_INIT; + newicsk->icsk_ack.lrcvtime = tcp_time_stamp; newtp->packets_out = 0; newtp->retrans_out = 0; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 181b3054e47..4d219d0101f 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2151,9 +2151,11 @@ u32 __tcp_select_window(struct sock *sk) int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); int window; - if (mss > full_space) + if (unlikely(mss > full_space)) { mss = full_space; - + if (mss <= 0) + return 0; + } if (free_space < (full_space >> 1)) { icsk->icsk_ack.quick = 0; diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 0dbb96172fd..8c61887277c 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -237,7 +237,8 @@ void tcp_delack_timer_handler(struct sock *sk) sk_mem_reclaim_partial(sk); - if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) + if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || + !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) goto out; if (time_after(icsk->icsk_ack.timeout, jiffies)) { @@ -516,7 +517,8 @@ void tcp_write_timer_handler(struct sock *sk) struct inet_connection_sock *icsk = inet_csk(sk); int event; - if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) + if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || + !icsk->icsk_pending) goto out; if (time_after(icsk->icsk_timeout, jiffies)) { diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 2346370808c..4d9cb4f44c0 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -4813,8 +4813,7 @@ static void addrconf_disable_change(struct net *net, __s32 newf) struct net_device *dev; struct inet6_dev *idev; - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { + for_each_netdev(net, dev) { idev = __in6_dev_get(dev); if (idev) { int changed = (!idev->cnf.disable_ipv6) ^ (!newf); @@ -4823,7 +4822,6 @@ static void addrconf_disable_change(struct net *net, __s32 newf) dev_disable_change(idev); } } - rcu_read_unlock(); } static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 603f251b6ca..ae88e17f5c7 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -55,6 +55,7 @@ #include #include #include +#include static bool log_ecn_error = true; @@ -365,35 +366,37 @@ static void ip6gre_tunnel_uninit(struct net_device *dev) static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, - u8 type, u8 code, int offset, __be32 info) + u8 type, u8 code, int offset, __be32 info) { - const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data; - __be16 *p = (__be16 *)(skb->data + offset); - int grehlen = offset + 4; + const struct gre_base_hdr *greh; + const struct ipv6hdr *ipv6h; + int grehlen = sizeof(*greh); struct ip6_tnl *t; + int key_off = 0; __be16 flags; + __be32 key; - flags = p[0]; - if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) { - if (flags&(GRE_VERSION|GRE_ROUTING)) - return; - if (flags&GRE_KEY) { - grehlen += 4; - if (flags&GRE_CSUM) - grehlen += 4; - } + if (!pskb_may_pull(skb, offset + grehlen)) + return; + greh = (const struct gre_base_hdr *)(skb->data + offset); + flags = greh->flags; + if (flags & (GRE_VERSION | GRE_ROUTING)) + return; + if (flags & GRE_CSUM) + grehlen += 4; + if (flags & GRE_KEY) { + key_off = grehlen + offset; + grehlen += 4; } - /* If only 8 bytes returned, keyed message will be dropped here */ - if (!pskb_may_pull(skb, grehlen)) + if (!pskb_may_pull(skb, offset + grehlen)) return; ipv6h = (const struct ipv6hdr *)skb->data; - p = (__be16 *)(skb->data + offset); + greh = (const struct gre_base_hdr *)(skb->data + offset); + key = key_off ? *(__be32 *)(skb->data + key_off) : 0; t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr, - flags & GRE_KEY ? - *(((__be32 *)p) + (grehlen / 4) - 1) : 0, - p[1]); + key, greh->protocol); if (t == NULL) return; diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 71b766ee821..88a2e8827ef 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -174,6 +174,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, ops = rcu_dereference(inet6_offloads[proto]); if (!ops || !ops->callbacks.gro_receive) { __pskb_pull(skb, skb_gro_offset(skb)); + skb_gro_frag0_invalidate(skb); proto = ipv6_gso_pull_exthdrs(skb, proto); skb_gro_pull(skb, -skb_transport_offset(skb)); skb_reset_transport_header(skb); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 1de6d24dd30..9266d5b770c 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -741,7 +741,6 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) * Fragment the datagram. */ - *prevhdr = NEXTHDR_FRAGMENT; hroom = LL_RESERVED_SPACE(rt->dst.dev); troom = rt->dst.dev->needed_tailroom; @@ -749,6 +748,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) * Keep copying data until we run out. */ while(left > 0) { + u8 *fragnexthdr_offset; + len = left; /* IF: it doesn't fit, use 'mtu' - the data space left */ if (len > mtu) @@ -795,6 +796,10 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) */ skb_copy_from_linear_data(skb, skb_network_header(frag), hlen); + fragnexthdr_offset = skb_network_header(frag); + fragnexthdr_offset += prevhdr - skb_network_header(skb); + *fragnexthdr_offset = NEXTHDR_FRAGMENT; + /* * Build fragment header. */ diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 12984e6794b..efc77acbe9e 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -103,16 +103,25 @@ struct ip6_tnl_net { static struct net_device_stats *ip6_get_stats(struct net_device *dev) { - struct pcpu_tstats sum = { 0 }; + struct pcpu_tstats tmp, sum = { 0 }; int i; for_each_possible_cpu(i) { + unsigned int start; const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); - sum.rx_packets += tstats->rx_packets; - sum.rx_bytes += tstats->rx_bytes; - sum.tx_packets += tstats->tx_packets; - sum.tx_bytes += tstats->tx_bytes; + do { + start = u64_stats_fetch_begin_bh(&tstats->syncp); + tmp.rx_packets = tstats->rx_packets; + tmp.rx_bytes = tstats->rx_bytes; + tmp.tx_packets = tstats->tx_packets; + tmp.tx_bytes = tstats->tx_bytes; + } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); + + sum.rx_packets += tmp.rx_packets; + sum.rx_bytes += tmp.rx_bytes; + sum.tx_packets += tmp.tx_packets; + sum.tx_bytes += tmp.tx_bytes; } dev->stats.rx_packets = sum.rx_packets; dev->stats.rx_bytes = sum.rx_bytes; @@ -394,18 +403,19 @@ ip6_tnl_dev_uninit(struct net_device *dev) __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) { - const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw; - __u8 nexthdr = ipv6h->nexthdr; - __u16 off = sizeof (*ipv6h); + const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw; + unsigned int nhoff = raw - skb->data; + unsigned int off = nhoff + sizeof(*ipv6h); + u8 next, nexthdr = ipv6h->nexthdr; while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { - __u16 optlen = 0; struct ipv6_opt_hdr *hdr; - if (raw + off + sizeof (*hdr) > skb->data && - !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr))) + u16 optlen; + + if (!pskb_may_pull(skb, off + sizeof(*hdr))) break; - hdr = (struct ipv6_opt_hdr *) (raw + off); + hdr = (struct ipv6_opt_hdr *)(skb->data + off); if (nexthdr == NEXTHDR_FRAGMENT) { struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr; if (frag_hdr->frag_off) @@ -416,20 +426,29 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) } else { optlen = ipv6_optlen(hdr); } + /* cache hdr->nexthdr, since pskb_may_pull() might + * invalidate hdr + */ + next = hdr->nexthdr; if (nexthdr == NEXTHDR_DEST) { - __u16 i = off + 2; + u16 i = 2; + + /* Remember : hdr is no longer valid at this point. */ + if (!pskb_may_pull(skb, off + optlen)) + break; + while (1) { struct ipv6_tlv_tnl_enc_lim *tel; /* No more room for encapsulation limit */ - if (i + sizeof (*tel) > off + optlen) + if (i + sizeof(*tel) > optlen) break; - tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i]; + tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i); /* return index of option if found and valid */ if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && tel->length == 1) - return i; + return i + off - nhoff; /* else jump to next option */ if (tel->type) i += tel->length + 2; @@ -437,7 +456,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) i++; } } - nexthdr = hdr->nexthdr; + nexthdr = next; off += optlen; } return 0; @@ -822,8 +841,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, } tstats = this_cpu_ptr(t->dev->tstats); + u64_stats_update_begin(&tstats->syncp); tstats->rx_packets++; tstats->rx_bytes += skb->len; + u64_stats_update_end(&tstats->syncp); netif_rx(skb); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 4147716a9d5..13d47e17766 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -777,7 +777,8 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt) * Delete a VIF entry */ -static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head) +static int mif6_delete(struct mr6_table *mrt, int vifi, int notify, + struct list_head *head) { struct mif_device *v; struct net_device *dev; @@ -823,7 +824,7 @@ static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head) dev->ifindex, &in6_dev->cnf); } - if (v->flags & MIFF_REGISTER) + if ((v->flags & MIFF_REGISTER) && !notify) unregister_netdevice_queue(dev, head); dev_put(dev); @@ -1333,7 +1334,6 @@ static int ip6mr_device_event(struct notifier_block *this, struct mr6_table *mrt; struct mif_device *v; int ct; - LIST_HEAD(list); if (event != NETDEV_UNREGISTER) return NOTIFY_DONE; @@ -1342,10 +1342,9 @@ static int ip6mr_device_event(struct notifier_block *this, v = &mrt->vif6_table[0]; for (ct = 0; ct < mrt->maxvif; ct++, v++) { if (v->dev == dev) - mif6_delete(mrt, ct, &list); + mif6_delete(mrt, ct, 1, NULL); } } - unregister_netdevice_many(&list); return NOTIFY_DONE; } @@ -1550,7 +1549,7 @@ static void mroute_clean_tables(struct mr6_table *mrt, bool all) for (i = 0; i < mrt->maxvif; i++) { if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC)) continue; - mif6_delete(mrt, i, &list); + mif6_delete(mrt, i, 0, &list); } unregister_netdevice_many(&list); @@ -1703,7 +1702,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns if (copy_from_user(&mifi, optval, sizeof(mifi_t))) return -EFAULT; rtnl_lock(); - ret = mif6_delete(mrt, mifi, NULL); + ret = mif6_delete(mrt, mifi, 0, NULL); rtnl_unlock(); return ret; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 27e974fdf2b..5bf00c6b1db 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -578,8 +578,11 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, } offset += skb_transport_offset(skb); - if (skb_copy_bits(skb, offset, &csum, 2)) - BUG(); + err = skb_copy_bits(skb, offset, &csum, 2); + if (err < 0) { + ip6_flush_pending_frames(sk); + goto out; + } /* in case cksum was not initialized */ if (unlikely(csum)) @@ -1135,7 +1138,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg) spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb != NULL) - amount = skb->tail - skb->transport_header; + amount = skb->len; spin_unlock_bh(&sk->sk_receive_queue.lock); return put_user(amount, (int __user *)arg); } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 2f34be9f178..0f68d4db7d3 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1672,6 +1672,8 @@ static int ip6_route_del(struct fib6_config *cfg) continue; if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric) continue; + if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol) + continue; dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 02898b68d74..184b67566cb 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -385,10 +385,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, np = inet6_sk(sk); if (type == NDISC_REDIRECT) { - struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); + if (!sock_owned_by_user(sk)) { + struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); - if (dst) - dst->ops->redirect(dst, sk, skb); + if (dst) + dst->ops->redirect(dst, sk, skb); + } goto out; } diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 797ff373e48..787ac0ef182 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -280,7 +280,8 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn } EXPORT_SYMBOL_GPL(l2tp_session_find); -struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) +struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth, + bool do_ref) { int hash; struct l2tp_session *session; @@ -290,6 +291,9 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) { if (++count > nth) { + l2tp_session_inc_refcount(session); + if (do_ref && session->ref) + session->ref(session); read_unlock_bh(&tunnel->hlist_lock); return session; } @@ -300,7 +304,7 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) return NULL; } -EXPORT_SYMBOL_GPL(l2tp_session_find_nth); +EXPORT_SYMBOL_GPL(l2tp_session_get_nth); /* Lookup a session by interface name. * This is very inefficient but is only used by management interfaces. diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 2f89d43877d..54f89f38386 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -236,7 +236,8 @@ static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk) extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel); extern void l2tp_tunnel_sock_put(struct sock *sk); extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id); -extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); +extern struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth, + bool do_ref); extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); @@ -256,6 +257,7 @@ extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops); extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); +int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg); /* Session reference counts. Incremented when code obtains a reference * to a session. diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index 072d7202e18..c6bd783cfb1 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -53,7 +53,7 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd) static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd) { - pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); + pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true); pd->session_idx++; if (pd->session == NULL) { @@ -237,10 +237,14 @@ static int l2tp_dfs_seq_show(struct seq_file *m, void *v) } /* Show the tunnel or session context */ - if (pd->session == NULL) + if (!pd->session) { l2tp_dfs_seq_tunnel_show(m, pd->tunnel); - else + } else { l2tp_dfs_seq_session_show(m, pd->session); + if (pd->session->deref) + pd->session->deref(pd->session); + l2tp_session_dec_refcount(pd->session); + } out: return 0; diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 07f8b97f9ae..1f65095c321 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -11,6 +11,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -381,7 +382,7 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb) drop: IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS); kfree_skb(skb); - return -1; + return 0; } /* Userspace will call sendmsg() on the tunnel socket to send L2TP @@ -554,6 +555,30 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m return err ? err : copied; } +int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg) +{ + struct sk_buff *skb; + int amount; + + switch (cmd) { + case SIOCOUTQ: + amount = sk_wmem_alloc_get(sk); + break; + case SIOCINQ: + spin_lock_bh(&sk->sk_receive_queue.lock); + skb = skb_peek(&sk->sk_receive_queue); + amount = skb ? skb->len : 0; + spin_unlock_bh(&sk->sk_receive_queue.lock); + break; + + default: + return -ENOIOCTLCMD; + } + + return put_user(amount, (int __user *)arg); +} +EXPORT_SYMBOL(l2tp_ioctl); + static struct proto l2tp_ip_prot = { .name = "L2TP/IP", .owner = THIS_MODULE, @@ -562,7 +587,7 @@ static struct proto l2tp_ip_prot = { .bind = l2tp_ip_bind, .connect = l2tp_ip_connect, .disconnect = l2tp_ip_disconnect, - .ioctl = udp_ioctl, + .ioctl = l2tp_ioctl, .destroy = l2tp_ip_destroy_sock, .setsockopt = ip_setsockopt, .getsockopt = ip_getsockopt, diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index eadfb3031ed..feb0e71bd4e 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -720,7 +720,7 @@ static struct proto l2tp_ip6_prot = { .bind = l2tp_ip6_bind, .connect = l2tp_ip6_connect, .disconnect = l2tp_ip6_disconnect, - .ioctl = udp_ioctl, + .ioctl = l2tp_ioctl, .destroy = l2tp_ip6_destroy_sock, .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 0825ff26e11..490024eaece 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -719,7 +719,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback goto out; } - session = l2tp_session_find_nth(tunnel, si); + session = l2tp_session_get_nth(tunnel, si, false); if (session == NULL) { ti++; tunnel = NULL; @@ -729,8 +729,11 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, - session) <= 0) + session) <= 0) { + l2tp_session_dec_refcount(session); break; + } + l2tp_session_dec_refcount(session); si++; } diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index c3ae2411650..c06c7ed47b6 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1576,7 +1576,7 @@ static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd) { - pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); + pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true); pd->session_idx++; if (pd->session == NULL) { @@ -1703,10 +1703,14 @@ static int pppol2tp_seq_show(struct seq_file *m, void *v) /* Show the tunnel or session context. */ - if (pd->session == NULL) + if (!pd->session) { pppol2tp_seq_tunnel_show(m, pd->tunnel); - else + } else { pppol2tp_seq_session_show(m, pd->session); + if (pd->session->deref) + pd->session->deref(pd->session); + l2tp_session_dec_refcount(pd->session); + } out: return 0; diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index f8765cc84e4..ddc63f92fa2 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -345,7 +345,7 @@ int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata, /* fast-forward to vendor IEs */ offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0); - if (offset) { + if (offset < ifmsh->ie_len) { len = ifmsh->ie_len - offset; data = ifmsh->ie + offset; if (skb_tailroom(skb) < len) diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index b70010076b9..3a13538712d 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -114,6 +114,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) break; } + flush_delayed_work(&sdata->dec_tailroom_needed_wk); drv_remove_interface(local, sdata); } diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index f23dd126cc1..73c8441a7a8 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1257,6 +1257,8 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po) f->arr[f->num_members] = sk; smp_wmb(); f->num_members++; + if (f->num_members == 1) + dev_add_pack(&f->prot_hook); spin_unlock(&f->lock); } @@ -1273,6 +1275,8 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po) BUG_ON(i >= f->num_members); f->arr[i] = f->arr[f->num_members - 1]; f->num_members--; + if (f->num_members == 0) + __dev_remove_pack(&f->prot_hook); spin_unlock(&f->lock); } @@ -1304,13 +1308,16 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) return -EINVAL; } + mutex_lock(&fanout_mutex); + + err = -EINVAL; if (!po->running) - return -EINVAL; + goto out; + err = -EALREADY; if (po->fanout) - return -EALREADY; + goto out; - mutex_lock(&fanout_mutex); match = NULL; list_for_each_entry(f, &fanout_list, list) { if (f->id == id && @@ -1340,7 +1347,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) match->prot_hook.func = packet_rcv_fanout; match->prot_hook.af_packet_priv = match; match->prot_hook.id_match = match_fanout_group; - dev_add_pack(&match->prot_hook); list_add(&match->list, &fanout_list); } err = -EINVAL; @@ -1361,24 +1367,29 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) return err; } -static void fanout_release(struct sock *sk) +/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes + * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout. + * It is the responsibility of the caller to call fanout_release_data() and + * free the returned packet_fanout (after synchronize_net()) + */ +static struct packet_fanout *fanout_release(struct sock *sk) { struct packet_sock *po = pkt_sk(sk); struct packet_fanout *f; - f = po->fanout; - if (!f) - return; - mutex_lock(&fanout_mutex); - po->fanout = NULL; + f = po->fanout; + if (f) { + po->fanout = NULL; - if (atomic_dec_and_test(&f->sk_ref)) { - list_del(&f->list); - dev_remove_pack(&f->prot_hook); - kfree(f); + if (atomic_dec_and_test(&f->sk_ref)) + list_del(&f->list); + else + f = NULL; } mutex_unlock(&fanout_mutex); + + return f; } static const struct proto_ops packet_ops; @@ -2231,7 +2242,7 @@ static int packet_snd(struct socket *sock, int vnet_hdr_len; struct packet_sock *po = pkt_sk(sk); unsigned short gso_type = 0; - int hlen, tlen; + int hlen, tlen, linear; int extra_len = 0; /* @@ -2325,7 +2336,9 @@ static int packet_snd(struct socket *sock, err = -ENOBUFS; hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; - skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len, + linear = vnet_hdr.hdr_len; + linear = max(linear, min_t(int, len, dev->hard_header_len)); + skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear, msg->msg_flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out_unlock; @@ -2428,6 +2441,7 @@ static int packet_release(struct socket *sock) { struct sock *sk = sock->sk; struct packet_sock *po; + struct packet_fanout *f; struct net *net; union tpacket_req_u req_u; @@ -2467,9 +2481,13 @@ static int packet_release(struct socket *sock) packet_set_ring(sk, &req_u, 1, 1); } - fanout_release(sk); + f = fanout_release(sk); synchronize_net(); + + if (f) { + kfree(f); + } /* * Now the socket is dead. No more input will appear. */ @@ -2540,7 +2558,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; - char name[15]; + char name[sizeof(uaddr->sa_data) + 1]; struct net_device *dev; int err = -ENODEV; @@ -2550,7 +2568,11 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, if (addr_len != sizeof(struct sockaddr)) return -EINVAL; - strlcpy(name, uaddr->sa_data, sizeof(name)); + /* uaddr->sa_data comes from the userspace, it's not guaranteed to be + * zero-terminated. + */ + memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data)); + name[sizeof(uaddr->sa_data)] = 0; dev = dev_get_by_name(sock_net(sk), name); if (dev) @@ -3384,7 +3406,6 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void } if (msg == NETDEV_UNREGISTER) { packet_cached_dev_reset(po); - fanout_release(sk); po->ifindex = -1; if (po->prot_hook.dev) dev_put(po->prot_hook.dev); @@ -3672,7 +3693,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, */ if (!tx_ring) init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring); - break; + break; default: break; } diff --git a/net/rds/cong.c b/net/rds/cong.c index e5b65acd650..cec4c4e6d90 100644 --- a/net/rds/cong.c +++ b/net/rds/cong.c @@ -285,7 +285,7 @@ void rds_cong_set_bit(struct rds_cong_map *map, __be16 port) i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; - __set_bit_le(off, (void *)map->m_page_addrs[i]); + set_bit_le(off, (void *)map->m_page_addrs[i]); } void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port) @@ -299,7 +299,7 @@ void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port) i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; - __clear_bit_le(off, (void *)map->m_page_addrs[i]); + clear_bit_le(off, (void *)map->m_page_addrs[i]); } static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port) diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 15d46b9166d..0a31f2c51e9 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -814,10 +814,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, goto out_module_put; err = a->ops->walk(skb, &dcb, RTM_DELACTION, a); - if (err < 0) + if (err <= 0) goto out_module_put; - if (err == 0) - goto noflush_out; nla_nest_end(skb, nest); @@ -835,7 +833,6 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, out_module_put: module_put(a->ops->owner); err_out: -noflush_out: kfree_skb(skb); kfree(a); return err; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 2ea40d1877a..042e5d83962 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -136,12 +136,14 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) unsigned long cl; unsigned long fh; int err; - int tp_created = 0; + int tp_created; if ((n->nlmsg_type != RTM_GETTFILTER) && !netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; replay: + tp_created = 0; + err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL); if (err < 0) return err; diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 7c3de6ffa51..eba9d1e49fa 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -176,11 +176,12 @@ META_COLLECTOR(int_vlan_tag) { unsigned short tag; - tag = vlan_tx_tag_get(skb); - if (!tag && __vlan_get_tag(skb, &tag)) - *err = -1; - else + if (vlan_tx_tag_present(skb)) + dst->value = vlan_tx_tag_get(skb); + else if (!__vlan_get_tag(skb, &tag)) dst->value = tag; + else + *err = -1; } diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 6360a14edea..59ab0c40e15 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1301,82 +1301,111 @@ void sctp_assoc_update(struct sctp_association *asoc, } /* Update the retran path for sending a retransmitted packet. - * Round-robin through the active transports, else round-robin - * through the inactive transports as this is the next best thing - * we can try. + * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints: + * + * When there is outbound data to send and the primary path + * becomes inactive (e.g., due to failures), or where the + * SCTP user explicitly requests to send data to an + * inactive destination transport address, before reporting + * an error to its ULP, the SCTP endpoint should try to send + * the data to an alternate active destination transport + * address if one exists. + * + * When retransmitting data that timed out, if the endpoint + * is multihomed, it should consider each source-destination + * address pair in its retransmission selection policy. + * When retransmitting timed-out data, the endpoint should + * attempt to pick the most divergent source-destination + * pair from the original source-destination pair to which + * the packet was transmitted. + * + * Note: Rules for picking the most divergent source-destination + * pair are an implementation decision and are not specified + * within this document. + * + * Our basic strategy is to round-robin transports in priorities + * according to sctp_state_prio_map[] e.g., if no such + * transport with state SCTP_ACTIVE exists, round-robin through + * SCTP_UNKNOWN, etc. You get the picture. */ -void sctp_assoc_update_retran_path(struct sctp_association *asoc) +static const u8 sctp_trans_state_to_prio_map[] = { + [SCTP_ACTIVE] = 3, /* best case */ + [SCTP_UNKNOWN] = 2, + [SCTP_PF] = 1, + [SCTP_INACTIVE] = 0, /* worst case */ +}; + +static u8 sctp_trans_score(const struct sctp_transport *trans) { - struct sctp_transport *t, *next; - struct list_head *head = &asoc->peer.transport_addr_list; - struct list_head *pos; + return sctp_trans_state_to_prio_map[trans->state]; +} - if (asoc->peer.transport_count == 1) - return; +static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr, + struct sctp_transport *best) +{ + if (best == NULL) + return curr; - /* Find the next transport in a round-robin fashion. */ - t = asoc->peer.retran_path; - pos = &t->transports; - next = NULL; + return sctp_trans_score(curr) > sctp_trans_score(best) ? curr : best; +} - while (1) { - /* Skip the head. */ - if (pos->next == head) - pos = head->next; - else - pos = pos->next; +void sctp_assoc_update_retran_path(struct sctp_association *asoc) +{ + struct sctp_transport *trans = asoc->peer.retran_path; + struct sctp_transport *trans_next = NULL; - t = list_entry(pos, struct sctp_transport, transports); + /* We're done as we only have the one and only path. */ + if (asoc->peer.transport_count == 1) + return; + /* If active_path and retran_path are the same and active, + * then this is the only active path. Use it. + */ + if (asoc->peer.active_path == asoc->peer.retran_path && + asoc->peer.active_path->state == SCTP_ACTIVE) + return; - /* We have exhausted the list, but didn't find any - * other active transports. If so, use the next - * transport. - */ - if (t == asoc->peer.retran_path) { - t = next; + /* Iterate from retran_path's successor back to retran_path. */ + for (trans = list_next_entry(trans, transports); 1; + trans = list_next_entry(trans, transports)) { + /* Manually skip the head element. */ + if (&trans->transports == &asoc->peer.transport_addr_list) + continue; + if (trans->state == SCTP_UNCONFIRMED) + continue; + trans_next = sctp_trans_elect_best(trans, trans_next); + /* Active is good enough for immediate return. */ + if (trans_next->state == SCTP_ACTIVE) break; - } - - /* Try to find an active transport. */ - - if ((t->state == SCTP_ACTIVE) || - (t->state == SCTP_UNKNOWN)) { + /* We've reached the end, time to update path. */ + if (trans == asoc->peer.retran_path) break; - } else { - /* Keep track of the next transport in case - * we don't find any active transport. - */ - if (t->state != SCTP_UNCONFIRMED && !next) - next = t; - } } - if (t) - asoc->peer.retran_path = t; - else - t = asoc->peer.retran_path; + if (trans_next != NULL) + asoc->peer.retran_path = trans_next; SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association" - " %p addr: ", + " %p updated new path to addr: ", " port: %d\n", asoc, - (&t->ipaddr), - ntohs(t->ipaddr.v4.sin_port)); + (&asoc->peer.retran_path->ipaddr), + ntohs(asoc->peer.retran_path->ipaddr.v4.sin_port)); } -/* Choose the transport for sending retransmit packet. */ -struct sctp_transport *sctp_assoc_choose_alter_transport( - struct sctp_association *asoc, struct sctp_transport *last_sent_to) +struct sctp_transport * +sctp_assoc_choose_alter_transport(struct sctp_association *asoc, + struct sctp_transport *last_sent_to) { /* If this is the first time packet is sent, use the active path, * else use the retran path. If the last packet was sent over the * retran path, update the retran path and use it. */ - if (!last_sent_to) + if (last_sent_to == NULL) { return asoc->peer.active_path; - else { + } else { if (last_sent_to == asoc->peer.retran_path) sctp_assoc_update_retran_path(asoc); + return asoc->peer.retran_path; } } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index ede7c540ea2..531c305c94a 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -6175,6 +6175,9 @@ int sctp_inet_listen(struct socket *sock, int backlog) if (sock->state != SS_UNCONNECTED) goto out; + if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED)) + goto out; + /* If backlog is zero, disable listening. */ if (!backlog) { if (sctp_sstate(sk, CLOSED)) diff --git a/net/socket.c b/net/socket.c index 00d0157b74b..25759c92b26 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2366,8 +2366,10 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, return err; err = sock_error(sock->sk); - if (err) + if (err) { + datagrams = err; goto out_put; + } entry = mmsg; compat_entry = (struct compat_mmsghdr __user *)mmsg; diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c index e0062c544ac..a9ca70579eb 100644 --- a/net/sunrpc/auth_gss/gss_rpc_xdr.c +++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c @@ -260,7 +260,7 @@ static int gssx_dec_option_array(struct xdr_stream *xdr, if (!oa->data) return -ENOMEM; - creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL); + creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL); if (!creds) { kfree(oa->data); return -ENOMEM; diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 62663a08ffb..e625efe0e03 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -1518,7 +1518,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp) case RPC_GSS_PROC_DESTROY: if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq)) goto auth_err; - rsci->h.expiry_time = get_seconds(); + rsci->h.expiry_time = seconds_since_boot(); set_bit(CACHE_NEGATIVE, &rsci->h.flags); if (resv->iov_len + 4 > PAGE_SIZE) goto drop; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 76ec699ebd1..62ccc9e1706 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -978,6 +978,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) unsigned int hash; struct unix_address *addr; struct hlist_head *list; + struct path path = { NULL, NULL }; err = -EINVAL; if (sunaddr->sun_family != AF_UNIX) @@ -993,9 +994,20 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) goto out; addr_len = err; + if (sun_path[0]) { + umode_t mode = S_IFSOCK | + (SOCK_INODE(sock)->i_mode & ~current_umask()); + err = unix_mknod(sun_path, mode, &path); + if (err) { + if (err == -EEXIST) + err = -EADDRINUSE; + goto out; + } + } + err = mutex_lock_interruptible(&u->readlock); if (err) - goto out; + goto out_put; err = -EINVAL; if (u->addr) @@ -1012,16 +1024,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) atomic_set(&addr->refcnt, 1); if (sun_path[0]) { - struct path path = {0}; - umode_t mode = S_IFSOCK | - (SOCK_INODE(sock)->i_mode & ~current_umask()); - err = unix_mknod(sun_path, mode, &path); - if (err) { - if (err == -EEXIST) - err = -EADDRINUSE; - unix_release_addr(addr); - goto out_up; - } addr->hash = UNIX_HASH_SIZE; hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1); spin_lock(&unix_table_lock); @@ -1048,6 +1050,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) spin_unlock(&unix_table_lock); out_up: mutex_unlock(&u->readlock); +out_put: + if (err) + path_put(&path); out: return err; } diff --git a/net/unix/garbage.c b/net/unix/garbage.c index a72182d6750..58ba0e5f147 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -152,6 +152,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp) if (s) { struct unix_sock *u = unix_sk(s); + BUG_ON(!atomic_long_read(&u->inflight)); BUG_ON(list_empty(&u->link)); if (atomic_long_dec_and_test(&u->inflight)) list_del_init(&u->link); @@ -358,6 +359,14 @@ void unix_gc(void) } list_del(&cursor); + /* Now gc_candidates contains only garbage. Restore original + * inflight counters for these as well, and remove the skbuffs + * which are creating the cycle(s). + */ + skb_queue_head_init(&hitlist); + list_for_each_entry(u, &gc_candidates, link) + scan_children(&u->sk, inc_inflight, &hitlist); + /* * not_cycle_list contains those sockets which do not make up a * cycle. Restore these to the inflight list. @@ -368,15 +377,6 @@ void unix_gc(void) list_move_tail(&u->link, &gc_inflight_list); } - /* - * Now gc_candidates contains only garbage. Restore original - * inflight counters for these as well, and remove the skbuffs - * which are creating the cycle(s). - */ - skb_queue_head_init(&hitlist); - list_for_each_entry(u, &gc_candidates, link) - scan_children(&u->sk, inc_inflight, &hitlist); - spin_unlock(&unix_gc_lock); /* Here we are. Hitlist is filled. Die. */ diff --git a/samples/seccomp/bpf-helper.h b/samples/seccomp/bpf-helper.h index 38ee70f3cd5..1d8de9edd85 100644 --- a/samples/seccomp/bpf-helper.h +++ b/samples/seccomp/bpf-helper.h @@ -138,7 +138,7 @@ union arg64 { #define ARG_32(idx) \ BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)) -/* Loads hi into A and lo in X */ +/* Loads lo into M[0] and hi into M[1] and A */ #define ARG_64(idx) \ BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)), \ BPF_STMT(BPF_ST, 0), /* lo -> M[0] */ \ @@ -153,88 +153,107 @@ union arg64 { BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 1, 0), \ jt -/* Checks the lo, then swaps to check the hi. A=lo,X=hi */ +#define JA32(value, jt) \ + BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \ + jt + +#define JGE32(value, jt) \ + BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \ + jt + +#define JGT32(value, jt) \ + BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \ + jt + +#define JLE32(value, jt) \ + BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \ + jt + +#define JLT32(value, jt) \ + BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \ + jt + +/* + * All the JXX64 checks assume lo is saved in M[0] and hi is saved in both + * A and M[1]. This invariant is kept by restoring A if necessary. + */ #define JEQ64(lo, hi, jt) \ + /* if (hi != arg.hi) goto NOMATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ + /* if (lo != arg.lo) goto NOMATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 0, 2), \ - BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ + BPF_STMT(BPF_LD+BPF_MEM, 1), \ jt, \ - BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ + BPF_STMT(BPF_LD+BPF_MEM, 1) #define JNE64(lo, hi, jt) \ - BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 5, 0), \ - BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ + /* if (hi != arg.hi) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \ + BPF_STMT(BPF_LD+BPF_MEM, 0), \ + /* if (lo != arg.lo) goto MATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 2, 0), \ - BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ + BPF_STMT(BPF_LD+BPF_MEM, 1), \ jt, \ - BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ - -#define JA32(value, jt) \ - BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \ - jt + BPF_STMT(BPF_LD+BPF_MEM, 1) #define JA64(lo, hi, jt) \ + /* if (hi & arg.hi) goto MATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (hi), 3, 0), \ - BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ + BPF_STMT(BPF_LD+BPF_MEM, 0), \ + /* if (lo & arg.lo) goto MATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (lo), 0, 2), \ - BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ + BPF_STMT(BPF_LD+BPF_MEM, 1), \ jt, \ - BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ + BPF_STMT(BPF_LD+BPF_MEM, 1) -#define JGE32(value, jt) \ - BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \ - jt - -#define JLT32(value, jt) \ - BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \ - jt - -/* Shortcut checking if hi > arg.hi. */ #define JGE64(lo, hi, jt) \ + /* if (hi > arg.hi) goto MATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \ + /* if (hi != arg.hi) goto NOMATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ - BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ + BPF_STMT(BPF_LD+BPF_MEM, 0), \ + /* if (lo >= arg.lo) goto MATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 0, 2), \ - BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ - jt, \ - BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ - -#define JLT64(lo, hi, jt) \ - BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \ - BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ - BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ - BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \ - BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ + BPF_STMT(BPF_LD+BPF_MEM, 1), \ jt, \ - BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ + BPF_STMT(BPF_LD+BPF_MEM, 1) -#define JGT32(value, jt) \ - BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \ - jt - -#define JLE32(value, jt) \ - BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \ - jt - -/* Check hi > args.hi first, then do the GE checking */ #define JGT64(lo, hi, jt) \ + /* if (hi > arg.hi) goto MATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \ + /* if (hi != arg.hi) goto NOMATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ - BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ + BPF_STMT(BPF_LD+BPF_MEM, 0), \ + /* if (lo > arg.lo) goto MATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 0, 2), \ - BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ + BPF_STMT(BPF_LD+BPF_MEM, 1), \ jt, \ - BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ + BPF_STMT(BPF_LD+BPF_MEM, 1) #define JLE64(lo, hi, jt) \ - BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 6, 0), \ - BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \ - BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ + /* if (hi < arg.hi) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \ + /* if (hi != arg.hi) goto NOMATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ + BPF_STMT(BPF_LD+BPF_MEM, 0), \ + /* if (lo <= arg.lo) goto MATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \ - BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ + BPF_STMT(BPF_LD+BPF_MEM, 1), \ + jt, \ + BPF_STMT(BPF_LD+BPF_MEM, 1) + +#define JLT64(lo, hi, jt) \ + /* if (hi < arg.hi) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \ + /* if (hi != arg.hi) goto NOMATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ + BPF_STMT(BPF_LD+BPF_MEM, 0), \ + /* if (lo < arg.lo) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 2, 0), \ + BPF_STMT(BPF_LD+BPF_MEM, 1), \ jt, \ - BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ + BPF_STMT(BPF_LD+BPF_MEM, 1) #define LOAD_SYSCALL_NR \ BPF_STMT(BPF_LD+BPF_W+BPF_ABS, \ diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c index 3ae28db5a64..c8031c1c52c 100644 --- a/security/apparmor/audit.c +++ b/security/apparmor/audit.c @@ -212,7 +212,8 @@ int aa_audit(int type, struct aa_profile *profile, gfp_t gfp, if (sa->aad->type == AUDIT_APPARMOR_KILL) (void)send_sig_info(SIGKILL, NULL, - sa->aad->tsk ? sa->aad->tsk : current); + sa->type == LSM_AUDIT_DATA_TASK && sa->aad->tsk ? + sa->aad->tsk : current); if (sa->aad->type == AUDIT_APPARMOR_ALLOWED) return complain_error(sa->aad->error); diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index 9aaa4e72cc1..15d172e39cf 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c @@ -441,7 +441,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) new_profile = aa_get_profile(ns->unconfined); info = "ux fallback"; } else { - error = -ENOENT; + error = -EACCES; info = "profile not found"; } } diff --git a/security/apparmor/file.c b/security/apparmor/file.c index fdaa50cb187..a4f7f1a5a79 100644 --- a/security/apparmor/file.c +++ b/security/apparmor/file.c @@ -110,7 +110,8 @@ int aa_audit_file(struct aa_profile *profile, struct file_perms *perms, int type = AUDIT_APPARMOR_AUTO; struct common_audit_data sa; struct apparmor_audit_data aad = {0,}; - sa.type = LSM_AUDIT_DATA_NONE; + sa.type = LSM_AUDIT_DATA_TASK; + sa.u.tsk = NULL; sa.aad = &aad; aad.op = op, aad.fs.request = request; diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h index 775843e7f98..b5029c77c3e 100644 --- a/security/apparmor/include/match.h +++ b/security/apparmor/include/match.h @@ -57,6 +57,7 @@ struct table_set_header { #define YYTD_ID_ACCEPT2 6 #define YYTD_ID_NXT 7 #define YYTD_ID_TSIZE 8 +#define YYTD_ID_MAX 8 #define YYTD_DATA8 1 #define YYTD_DATA16 2 diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h index bda4569fdd8..0c9d121f15d 100644 --- a/security/apparmor/include/policy.h +++ b/security/apparmor/include/policy.h @@ -313,6 +313,8 @@ static inline int AUDIT_MODE(struct aa_profile *profile) return profile->audit; } +bool policy_view_capable(void); +bool policy_admin_capable(void); bool aa_may_manage_policy(int op); #endif /* __AA_POLICY_H */ diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index b21830eced4..6eeaab80865 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -759,51 +759,49 @@ __setup("apparmor=", apparmor_enabled_setup); /* set global flag turning off the ability to load policy */ static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp) { - if (!capable(CAP_MAC_ADMIN)) + if (!policy_admin_capable()) return -EPERM; - if (aa_g_lock_policy) - return -EACCES; return param_set_bool(val, kp); } static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp) { - if (!capable(CAP_MAC_ADMIN)) + if (!policy_view_capable()) return -EPERM; return param_get_bool(buffer, kp); } static int param_set_aabool(const char *val, const struct kernel_param *kp) { - if (!capable(CAP_MAC_ADMIN)) + if (!policy_admin_capable()) return -EPERM; return param_set_bool(val, kp); } static int param_get_aabool(char *buffer, const struct kernel_param *kp) { - if (!capable(CAP_MAC_ADMIN)) + if (!policy_view_capable()) return -EPERM; return param_get_bool(buffer, kp); } static int param_set_aauint(const char *val, const struct kernel_param *kp) { - if (!capable(CAP_MAC_ADMIN)) + if (!policy_admin_capable()) return -EPERM; return param_set_uint(val, kp); } static int param_get_aauint(char *buffer, const struct kernel_param *kp) { - if (!capable(CAP_MAC_ADMIN)) + if (!policy_view_capable()) return -EPERM; return param_get_uint(buffer, kp); } static int param_get_audit(char *buffer, struct kernel_param *kp) { - if (!capable(CAP_MAC_ADMIN)) + if (!policy_view_capable()) return -EPERM; if (!apparmor_enabled) @@ -815,7 +813,7 @@ static int param_get_audit(char *buffer, struct kernel_param *kp) static int param_set_audit(const char *val, struct kernel_param *kp) { int i; - if (!capable(CAP_MAC_ADMIN)) + if (!policy_admin_capable()) return -EPERM; if (!apparmor_enabled) @@ -836,7 +834,7 @@ static int param_set_audit(const char *val, struct kernel_param *kp) static int param_get_mode(char *buffer, struct kernel_param *kp) { - if (!capable(CAP_MAC_ADMIN)) + if (!policy_admin_capable()) return -EPERM; if (!apparmor_enabled) @@ -848,7 +846,7 @@ static int param_get_mode(char *buffer, struct kernel_param *kp) static int param_set_mode(const char *val, struct kernel_param *kp) { int i; - if (!capable(CAP_MAC_ADMIN)) + if (!policy_admin_capable()) return -EPERM; if (!apparmor_enabled) diff --git a/security/apparmor/match.c b/security/apparmor/match.c index 90971a8c378..704b0eb2580 100644 --- a/security/apparmor/match.c +++ b/security/apparmor/match.c @@ -45,6 +45,8 @@ static struct table_header *unpack_table(char *blob, size_t bsize) * it every time we use td_id as an index */ th.td_id = be16_to_cpu(*(u16 *) (blob)) - 1; + if (th.td_id > YYTD_ID_MAX) + goto out; th.td_flags = be16_to_cpu(*(u16 *) (blob + 2)); th.td_lolen = be32_to_cpu(*(u32 *) (blob + 8)); blob += sizeof(struct table_header); @@ -59,7 +61,9 @@ static struct table_header *unpack_table(char *blob, size_t bsize) table = kvmalloc(tsize); if (table) { - *table = th; + table->td_id = th.td_id; + table->td_flags = th.td_flags; + table->td_lolen = th.td_lolen; if (th.td_flags == YYTD_DATA8) UNPACK_ARRAY(table->td_data, blob, th.td_lolen, u8, byte_to_byte); @@ -71,14 +75,14 @@ static struct table_header *unpack_table(char *blob, size_t bsize) u32, be32_to_cpu); else goto fail; + /* if table was vmalloced make sure the page tables are synced + * before it is used, as it goes live to all cpus. + */ + if (is_vmalloc_addr(table)) + vm_unmap_aliases(); } out: - /* if table was vmalloced make sure the page tables are synced - * before it is used, as it goes live to all cpus. - */ - if (is_vmalloc_addr(table)) - vm_unmap_aliases(); return table; fail: kvfree(table); diff --git a/security/apparmor/path.c b/security/apparmor/path.c index e91ffee8016..07bf2ac1ef6 100644 --- a/security/apparmor/path.c +++ b/security/apparmor/path.c @@ -25,7 +25,6 @@ #include "include/path.h" #include "include/policy.h" - /* modified from dcache.c */ static int prepend(char **buffer, int buflen, const char *str, int namelen) { @@ -39,6 +38,38 @@ static int prepend(char **buffer, int buflen, const char *str, int namelen) #define CHROOT_NSCONNECT (PATH_CHROOT_REL | PATH_CHROOT_NSCONNECT) +/* If the path is not connected to the expected root, + * check if it is a sysctl and handle specially else remove any + * leading / that __d_path may have returned. + * Unless + * specifically directed to connect the path, + * OR + * if in a chroot and doing chroot relative paths and the path + * resolves to the namespace root (would be connected outside + * of chroot) and specifically directed to connect paths to + * namespace root. + */ +static int disconnect(const struct path *path, char *buf, char **name, + int flags) +{ + int error = 0; + + if (!(flags & PATH_CONNECT_PATH) && + !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) && + our_mnt(path->mnt))) { + /* disconnected path, don't return pathname starting + * with '/' + */ + error = -EACCES; + if (**name == '/') + *name = *name + 1; + } else if (**name != '/') + /* CONNECT_PATH with missing root */ + error = prepend(name, *name - buf, "/", 1); + + return error; +} + /** * d_namespace_path - lookup a name associated with a given path * @path: path to lookup (NOT NULL) @@ -74,7 +105,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, * control instead of hard coded /proc */ return prepend(name, *name - buf, "/proc", 5); - } + } else + return disconnect(path, buf, name, flags); return 0; } @@ -120,29 +152,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, goto out; } - /* If the path is not connected to the expected root, - * check if it is a sysctl and handle specially else remove any - * leading / that __d_path may have returned. - * Unless - * specifically directed to connect the path, - * OR - * if in a chroot and doing chroot relative paths and the path - * resolves to the namespace root (would be connected outside - * of chroot) and specifically directed to connect paths to - * namespace root. - */ - if (!connected) { - if (!(flags & PATH_CONNECT_PATH) && - !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) && - our_mnt(path->mnt))) { - /* disconnected path, don't return pathname starting - * with '/' - */ - error = -EACCES; - if (*res == '/') - *name = res + 1; - } - } + if (!connected) + error = disconnect(path, buf, name, flags); out: return error; diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c index 813200384d9..c4780e108d7 100644 --- a/security/apparmor/policy.c +++ b/security/apparmor/policy.c @@ -1002,6 +1002,22 @@ static int audit_policy(int op, gfp_t gfp, const char *name, const char *info, &sa, NULL); } +bool policy_view_capable(void) +{ + struct user_namespace *user_ns = current_user_ns(); + bool response = false; + + if (ns_capable(user_ns, CAP_MAC_ADMIN)) + response = true; + + return response; +} + +bool policy_admin_capable(void) +{ + return policy_view_capable() && !aa_g_lock_policy; +} + /** * aa_may_manage_policy - can the current task manage policy * @op: the policy manipulation operation being done @@ -1016,7 +1032,7 @@ bool aa_may_manage_policy(int op) return 0; } - if (!capable(CAP_MAC_ADMIN)) { + if (!policy_admin_capable()) { audit_policy(op, GFP_KERNEL, NULL, "not policy admin", -EACCES); return 0; } diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c index 329b1fd3074..55ff3eecd36 100644 --- a/security/apparmor/policy_unpack.c +++ b/security/apparmor/policy_unpack.c @@ -571,6 +571,9 @@ static struct aa_profile *unpack_profile(struct aa_ext *e) error = PTR_ERR(profile->policy.dfa); profile->policy.dfa = NULL; goto fail; + } else if (!profile->policy.dfa) { + error = -EPROTO; + goto fail; } if (!unpack_u32(e, &profile->policy.start[0], "start")) /* default start state */ @@ -652,7 +655,7 @@ static bool verify_xindex(int xindex, int table_size) int index, xtype; xtype = xindex & AA_X_TYPE_MASK; index = xindex & AA_X_INDEX_MASK; - if (xtype == AA_X_TABLE && index > table_size) + if (xtype == AA_X_TABLE && index >= table_size) return 0; return 1; } diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c index b980a6ce5c7..3db2bf1f0a6 100644 --- a/security/integrity/evm/evm_main.c +++ b/security/integrity/evm/evm_main.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "evm.h" int evm_initialized; @@ -128,7 +129,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry, xattr_value_len, calc.digest); if (rc) break; - rc = memcmp(xattr_data->digest, calc.digest, + rc = crypto_memneq(xattr_data->digest, calc.digest, sizeof(calc.digest)); if (rc) rc = -EINVAL; diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 971fd058570..81c0a5f248b 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -5623,7 +5623,7 @@ static int selinux_setprocattr(struct task_struct *p, return error; /* Obtain a SID for the context, if one was specified. */ - if (size && str[1] && str[1] != '\n') { + if (size && str[0] && str[0] != '\n') { if (str[size-1] == '\n') { str[size-1] = 0; size--; diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index 08865dcbf5f..d449dde1bf5 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -1909,6 +1909,7 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client, info.output_pool != client->pool->size)) { if (snd_seq_write_pool_allocated(client)) { /* remove all existing cells */ + snd_seq_pool_mark_closing(client->pool); snd_seq_queue_client_leave_cells(client->number); snd_seq_pool_done(client->pool); } diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c index 0d75afa786b..490b697e83f 100644 --- a/sound/core/seq/seq_fifo.c +++ b/sound/core/seq/seq_fifo.c @@ -72,6 +72,9 @@ void snd_seq_fifo_delete(struct snd_seq_fifo **fifo) return; *fifo = NULL; + if (f->pool) + snd_seq_pool_mark_closing(f->pool); + snd_seq_fifo_clear(f); /* wake up clients if any */ @@ -137,6 +140,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f, f->tail = cell; if (f->head == NULL) f->head = cell; + cell->next = NULL; f->cells++; spin_unlock_irqrestore(&f->lock, flags); @@ -216,6 +220,8 @@ void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f, spin_lock_irqsave(&f->lock, flags); cell->next = f->head; f->head = cell; + if (!f->tail) + f->tail = cell; f->cells++; spin_unlock_irqrestore(&f->lock, flags); } @@ -261,6 +267,10 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize) /* NOTE: overflow flag is not cleared */ spin_unlock_irqrestore(&f->lock, flags); + /* close the old pool and wait until all users are gone */ + snd_seq_pool_mark_closing(oldpool); + snd_use_lock_sync(&f->use_lock); + /* release cells in old pool */ for (cell = oldhead; cell; cell = next) { next = cell->next; diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c index 2cfe50c71a9..8a6b7baafa3 100644 --- a/sound/core/seq/seq_lock.c +++ b/sound/core/seq/seq_lock.c @@ -28,19 +28,16 @@ /* wait until all locks are released */ void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line) { - int max_count = 5 * HZ; + int warn_count = 5 * HZ; if (atomic_read(lockp) < 0) { printk(KERN_WARNING "seq_lock: lock trouble [counter = %d] in %s:%d\n", atomic_read(lockp), file, line); return; } while (atomic_read(lockp) > 0) { - if (max_count == 0) { - snd_printk(KERN_WARNING "seq_lock: timeout [%d left] in %s:%d\n", atomic_read(lockp), file, line); - break; - } + if (warn_count-- == 0) + pr_warn("ALSA: seq_lock: waiting [%d left] in %s:%d\n", atomic_read(lockp), file, line); schedule_timeout_uninterruptible(1); - max_count--; } } diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c index f478f770bf5..8c510781558 100644 --- a/sound/core/seq/seq_memory.c +++ b/sound/core/seq/seq_memory.c @@ -411,32 +411,33 @@ int snd_seq_pool_init(struct snd_seq_pool *pool) return 0; } +/* refuse the further insertion to the pool */ +void snd_seq_pool_mark_closing(struct snd_seq_pool *pool) +{ + unsigned long flags; + + if (snd_BUG_ON(!pool)) + return; + spin_lock_irqsave(&pool->lock, flags); + pool->closing = 1; + spin_unlock_irqrestore(&pool->lock, flags); +} + /* remove events */ int snd_seq_pool_done(struct snd_seq_pool *pool) { unsigned long flags; struct snd_seq_event_cell *ptr; - int max_count = 5 * HZ; if (snd_BUG_ON(!pool)) return -EINVAL; /* wait for closing all threads */ - spin_lock_irqsave(&pool->lock, flags); - pool->closing = 1; - spin_unlock_irqrestore(&pool->lock, flags); - if (waitqueue_active(&pool->output_sleep)) wake_up(&pool->output_sleep); - while (atomic_read(&pool->counter) > 0) { - if (max_count == 0) { - snd_printk(KERN_WARNING "snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter)); - break; - } + while (atomic_read(&pool->counter) > 0) schedule_timeout_uninterruptible(1); - max_count--; - } /* release all resources */ spin_lock_irqsave(&pool->lock, flags); @@ -490,6 +491,7 @@ int snd_seq_pool_delete(struct snd_seq_pool **ppool) *ppool = NULL; if (pool == NULL) return 0; + snd_seq_pool_mark_closing(pool); snd_seq_pool_done(pool); kfree(pool); return 0; diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h index 4a2ec779b8a..32f959c1778 100644 --- a/sound/core/seq/seq_memory.h +++ b/sound/core/seq/seq_memory.h @@ -84,6 +84,7 @@ static inline int snd_seq_total_cells(struct snd_seq_pool *pool) int snd_seq_pool_init(struct snd_seq_pool *pool); /* done pool - free events */ +void snd_seq_pool_mark_closing(struct snd_seq_pool *pool); int snd_seq_pool_done(struct snd_seq_pool *pool); /* create pool */ diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c index 4c9aa462de9..17fe04d892f 100644 --- a/sound/core/seq/seq_queue.c +++ b/sound/core/seq/seq_queue.c @@ -183,6 +183,8 @@ void __exit snd_seq_queues_delete(void) } } +static void queue_use(struct snd_seq_queue *queue, int client, int use); + /* allocate a new queue - * return queue index value or negative value for error */ @@ -194,11 +196,11 @@ int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags) if (q == NULL) return -ENOMEM; q->info_flags = info_flags; + queue_use(q, client, 1); if (queue_list_add(q) < 0) { queue_delete(q); return -ENOMEM; } - snd_seq_queue_use(q->queue, client, 1); /* use this queue */ return q->queue; } @@ -504,19 +506,9 @@ int snd_seq_queue_timer_set_tempo(int queueid, int client, return result; } - -/* use or unuse this queue - - * if it is the first client, starts the timer. - * if it is not longer used by any clients, stop the timer. - */ -int snd_seq_queue_use(int queueid, int client, int use) +/* use or unuse this queue */ +static void queue_use(struct snd_seq_queue *queue, int client, int use) { - struct snd_seq_queue *queue; - - queue = queueptr(queueid); - if (queue == NULL) - return -EINVAL; - mutex_lock(&queue->timer_mutex); if (use) { if (!test_and_set_bit(client, queue->clients_bitmap)) queue->clients++; @@ -531,6 +523,21 @@ int snd_seq_queue_use(int queueid, int client, int use) } else { snd_seq_timer_close(queue); } +} + +/* use or unuse this queue - + * if it is the first client, starts the timer. + * if it is not longer used by any clients, stop the timer. + */ +int snd_seq_queue_use(int queueid, int client, int use) +{ + struct snd_seq_queue *queue; + + queue = queueptr(queueid); + if (queue == NULL) + return -EINVAL; + mutex_lock(&queue->timer_mutex); + queue_use(queue, client, use); mutex_unlock(&queue->timer_mutex); queuefree(queue); return 0; diff --git a/sound/core/timer.c b/sound/core/timer.c index 749857a889e..98904d8e51d 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -1659,9 +1659,21 @@ static int snd_timer_user_params(struct file *file, return -EBADFD; if (copy_from_user(¶ms, _params, sizeof(params))) return -EFAULT; - if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE) && params.ticks < 1) { - err = -EINVAL; - goto _end; + if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE)) { + u64 resolution; + + if (params.ticks < 1) { + err = -EINVAL; + goto _end; + } + + /* Don't allow resolution less than 1ms */ + resolution = snd_timer_resolution(tu->timeri); + resolution *= params.ticks; + if (resolution < 1000000) { + err = -EINVAL; + goto _end; + } } if (params.queue_size > 0 && (params.queue_size < 32 || params.queue_size > 1024)) { diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c index 6ac40beb49d..7f414b05644 100644 --- a/sound/pci/ctxfi/cthw20k1.c +++ b/sound/pci/ctxfi/cthw20k1.c @@ -27,12 +27,6 @@ #include "cthw20k1.h" #include "ct20k1reg.h" -#if BITS_PER_LONG == 32 -#define CT_XFI_DMA_MASK DMA_BIT_MASK(32) /* 32 bit PTE */ -#else -#define CT_XFI_DMA_MASK DMA_BIT_MASK(64) /* 64 bit PTE */ -#endif - struct hw20k1 { struct hw hw; spinlock_t reg_20k1_lock; @@ -1903,19 +1897,18 @@ static int hw_card_start(struct hw *hw) { int err; struct pci_dev *pci = hw->pci; + const unsigned int dma_bits = BITS_PER_LONG; err = pci_enable_device(pci); if (err < 0) return err; /* Set DMA transfer mask */ - if (pci_set_dma_mask(pci, CT_XFI_DMA_MASK) < 0 || - pci_set_consistent_dma_mask(pci, CT_XFI_DMA_MASK) < 0) { - printk(KERN_ERR "architecture does not support PCI " - "busmaster DMA with mask 0x%llx\n", - CT_XFI_DMA_MASK); - err = -ENXIO; - goto error1; + if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) { + dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits)); + } else { + dma_set_mask(&pci->dev, DMA_BIT_MASK(32)); + dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(32)); } if (!hw->io_base) { diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c index b1438861d38..5828a3ec58b 100644 --- a/sound/pci/ctxfi/cthw20k2.c +++ b/sound/pci/ctxfi/cthw20k2.c @@ -26,12 +26,6 @@ #include "cthw20k2.h" #include "ct20k2reg.h" -#if BITS_PER_LONG == 32 -#define CT_XFI_DMA_MASK DMA_BIT_MASK(32) /* 32 bit PTE */ -#else -#define CT_XFI_DMA_MASK DMA_BIT_MASK(64) /* 64 bit PTE */ -#endif - struct hw20k2 { struct hw hw; /* for i2c */ @@ -2026,18 +2020,18 @@ static int hw_card_start(struct hw *hw) int err = 0; struct pci_dev *pci = hw->pci; unsigned int gctl; + const unsigned int dma_bits = BITS_PER_LONG; err = pci_enable_device(pci); if (err < 0) return err; /* Set DMA transfer mask */ - if (pci_set_dma_mask(pci, CT_XFI_DMA_MASK) < 0 || - pci_set_consistent_dma_mask(pci, CT_XFI_DMA_MASK) < 0) { - printk(KERN_ERR "ctxfi: architecture does not support PCI " - "busmaster DMA with mask 0x%llx\n", CT_XFI_DMA_MASK); - err = -ENXIO; - goto error1; + if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) { + dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits)); + } else { + dma_set_mask(&pci->dev, DMA_BIT_MASK(32)); + dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(32)); } if (!hw->io_base) { diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index babbf238a64..af27d67efa8 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -2185,6 +2185,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC), SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601), SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS), + SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3), SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), diff --git a/sound/usb/card.c b/sound/usb/card.c index 36737a07806..90341c70a73 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -205,7 +205,6 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int if (! snd_usb_parse_audio_interface(chip, interface)) { usb_set_interface(dev, interface, 0); /* reset the current interface */ usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L); - return -EINVAL; } return 0; diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index ab9ae07454e..7e93d60973a 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -893,9 +893,10 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */ case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */ case USB_ID(0x046d, 0x0991): + case USB_ID(0x046d, 0x09a2): /* QuickCam Communicate Deluxe/S7500 */ /* Most audio usb devices lie about volume resolution. * Most Logitech webcams have res = 384. - * Proboly there is some logitech magic behind this number --fishor + * Probably there is some logitech magic behind this number --fishor */ if (!strcmp(kctl->id.name, "Mic Capture Volume")) { snd_printk(KERN_INFO diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index ab3ed4af146..9f2afbd3370 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -330,7 +330,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel, if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) { if (!trace->duration_filter) { - trace__fprintf_entry_head(trace, thread, 1, sample->time, stdout); + trace__fprintf_entry_head(trace, thread, 1, ttrace->entry_time, stdout); printf("%-70s\n", ttrace->entry_str); } } else @@ -364,7 +364,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel, } else if (trace->duration_filter) goto out; - trace__fprintf_entry_head(trace, thread, duration, sample->time, stdout); + trace__fprintf_entry_head(trace, thread, duration, ttrace->entry_time, stdout); if (ttrace->entry_pending) { printf("%-70s", ttrace->entry_str); diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c index 8715a1006d0..ae061a45fa0 100644 --- a/tools/perf/util/trace-event-scripting.c +++ b/tools/perf/util/trace-event-scripting.c @@ -90,7 +90,8 @@ static void register_python_scripting(struct scripting_ops *scripting_ops) if (err) die("error registering py script extension"); - scripting_context = malloc(sizeof(struct scripting_context)); + if (scripting_context == NULL) + scripting_context = malloc(sizeof(*scripting_context)); } #ifdef NO_LIBPYTHON @@ -153,7 +154,8 @@ static void register_perl_scripting(struct scripting_ops *scripting_ops) if (err) die("error registering pl script extension"); - scripting_context = malloc(sizeof(struct scripting_context)); + if (scripting_context == NULL) + scripting_context = malloc(sizeof(*scripting_context)); } #ifdef NO_LIBPERL diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 0d7fd8b5154..a0a8314df4b 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -2375,7 +2375,7 @@ sub do_run_test { } waitpid $child_pid, 0; - $child_exit = $?; + $child_exit = $? >> 8; if (!$bug && $in_bisect) { if (defined($bisect_ret_good)) { diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 64ee720b75c..362908c5f6c 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -753,6 +753,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) if (ret < 0) goto unlock_fail; + kvm->buses[bus_idx]->ioeventfd_count++; list_add_tail(&p->list, &kvm->ioeventfds); mutex_unlock(&kvm->slots_lock); @@ -798,6 +799,8 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) continue; kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); + if (kvm->buses[bus_idx]) + kvm->buses[bus_idx]->ioeventfd_count--; ioeventfd_release(p); ret = 0; break; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f71c4ad425c..0715673b696 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -607,8 +607,11 @@ static void kvm_destroy_vm(struct kvm *kvm) list_del(&kvm->vm_list); raw_spin_unlock(&kvm_lock); kvm_free_irq_routing(kvm); - for (i = 0; i < KVM_NR_BUSES; i++) - kvm_io_bus_destroy(kvm->buses[i]); + for (i = 0; i < KVM_NR_BUSES; i++) { + if (kvm->buses[i]) + kvm_io_bus_destroy(kvm->buses[i]); + kvm->buses[i] = NULL; + } kvm_coalesced_mmio_free(kvm); #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); @@ -2885,6 +2888,8 @@ int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, }; bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); + if (!bus) + return -ENOMEM; idx = kvm_io_bus_get_first_dev(bus, addr, len); if (idx < 0) return -EOPNOTSUPP; @@ -2913,6 +2918,8 @@ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, }; bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); + if (!bus) + return -ENOMEM; idx = kvm_io_bus_get_first_dev(bus, addr, len); if (idx < 0) return -EOPNOTSUPP; @@ -2934,7 +2941,11 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, struct kvm_io_bus *new_bus, *bus; bus = kvm->buses[bus_idx]; - if (bus->dev_count > NR_IOBUS_DEVS - 1) + if (!bus) + return -ENOMEM; + + /* exclude ioeventfd which is limited by maximum fd */ + if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) return -ENOSPC; new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) * @@ -2952,37 +2963,41 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, } /* Caller must hold slots_lock. */ -int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, - struct kvm_io_device *dev) +void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, + struct kvm_io_device *dev) { - int i, r; + int i; struct kvm_io_bus *new_bus, *bus; bus = kvm->buses[bus_idx]; - r = -ENOENT; + if (!bus) + return; + for (i = 0; i < bus->dev_count; i++) if (bus->range[i].dev == dev) { - r = 0; break; } - if (r) - return r; + if (i == bus->dev_count) + return; new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) * sizeof(struct kvm_io_range)), GFP_KERNEL); - if (!new_bus) - return -ENOMEM; + if (!new_bus) { + pr_err("kvm: failed to shrink bus, removing it completely\n"); + goto broken; + } memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); new_bus->dev_count--; memcpy(new_bus->range + i, bus->range + i + 1, (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); +broken: rcu_assign_pointer(kvm->buses[bus_idx], new_bus); synchronize_srcu_expedited(&kvm->srcu); kfree(bus); - return r; + return; } static struct notifier_block kvm_cpu_notifier = {