diff --git a/init_rebase.sh b/init_rebase.sh index 59a93f4..2d67c76 100755 --- a/init_rebase.sh +++ b/init_rebase.sh @@ -1,5 +1,5 @@ KERNEL_MAJOR_VERSION=4.9 -KERNEL_VERSION=4.9.124 +KERNEL_VERSION=4.9.125 echo "Setting up rebase directory..." rm -rf rebase diff --git a/kernel/patch-4.9.125.xz b/kernel/patch-4.9.125.xz new file mode 100644 index 0000000..ceb4373 Binary files /dev/null and b/kernel/patch-4.9.125.xz differ diff --git a/make_release.sh b/make_release.sh index 99856a3..981643c 100755 --- a/make_release.sh +++ b/make_release.sh @@ -1,6 +1,6 @@ DATE=`date +%Y-%m-%d` KERNEL_MAJOR_VERSION=4.9 -KERNEL_VERSION=4.9.124 +KERNEL_VERSION=4.9.125 echo "Setting up release directory..." mkdir release diff --git a/omitted-patches/included-patches/include-4.9.125.patch b/omitted-patches/included-patches/include-4.9.125.patch new file mode 100644 index 0000000..e523bb9 --- /dev/null +++ b/omitted-patches/included-patches/include-4.9.125.patch @@ -0,0 +1,2932 @@ +diff --git a/Makefile b/Makefile +index 53d57acfc17e..aef09ca7a924 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 124 ++SUBLEVEL = 125 + EXTRAVERSION = + NAME = Roaring Lionus + +diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h +index d5da2115d78a..03d6bb0f4e13 100644 +--- a/arch/arc/include/asm/delay.h ++++ b/arch/arc/include/asm/delay.h +@@ -17,8 +17,11 @@ + #ifndef __ASM_ARC_UDELAY_H + #define __ASM_ARC_UDELAY_H + ++#include + #include /* HZ */ + ++extern unsigned long loops_per_jiffy; ++ + static inline void __delay(unsigned long loops) + { + __asm__ __volatile__( +diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c +index bbdfeb31dee6..fefe357c3d31 100644 +--- a/arch/arc/mm/cache.c ++++ b/arch/arc/mm/cache.c +@@ -840,7 +840,7 @@ void flush_cache_mm(struct mm_struct *mm) + void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, + unsigned long pfn) + { +- unsigned int paddr = pfn << PAGE_SHIFT; ++ phys_addr_t paddr = pfn << PAGE_SHIFT; + + u_vaddr &= PAGE_MASK; + +@@ -860,8 +860,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, + unsigned long u_vaddr) + { + /* TBD: do we really need to clear the kernel mapping */ +- __flush_dcache_page(page_address(page), u_vaddr); +- __flush_dcache_page(page_address(page), page_address(page)); ++ __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr); ++ __flush_dcache_page((phys_addr_t)page_address(page), ++ (phys_addr_t)page_address(page)); + + } + +diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h +index 9d6718c1a199..3c401ce0351e 100644 +--- a/arch/arc/plat-eznps/include/plat/ctop.h ++++ b/arch/arc/plat-eznps/include/plat/ctop.h +@@ -21,6 +21,7 @@ + #error "Incorrect ctop.h include" + #endif + ++#include + #include + + /* core auxiliary registers */ +diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c +index f5077ea7af6d..30bcae0aef2a 100644 +--- a/arch/arm64/kernel/probes/kprobes.c ++++ b/arch/arm64/kernel/probes/kprobes.c +@@ -274,7 +274,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, + break; + case KPROBE_HIT_SS: + case KPROBE_REENTER: +- pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr); ++ pr_warn("Unrecoverable kprobe detected.\n"); + dump_kprobe(p); + BUG(); + break; +diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c +index 8c9cbf13d32a..6054d49e608e 100644 +--- a/arch/mips/bcm47xx/setup.c ++++ b/arch/mips/bcm47xx/setup.c +@@ -212,12 +212,6 @@ static int __init bcm47xx_cpu_fixes(void) + */ + if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706) + cpu_wait = NULL; +- +- /* +- * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail" +- * Enable ExternalSync for sync instruction to take effect +- */ +- set_c0_config7(MIPS_CONF7_ES); + break; + #endif + } +diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h +index 22a6782f84f5..df78b2ca70eb 100644 +--- a/arch/mips/include/asm/mipsregs.h ++++ b/arch/mips/include/asm/mipsregs.h +@@ -663,8 +663,6 @@ + #define MIPS_CONF7_WII (_ULCAST_(1) << 31) + + #define MIPS_CONF7_RPS (_ULCAST_(1) << 2) +-/* ExternalSync */ +-#define MIPS_CONF7_ES (_ULCAST_(1) << 8) + + #define MIPS_CONF7_IAR (_ULCAST_(1) << 10) + #define MIPS_CONF7_AR (_ULCAST_(1) << 16) +@@ -2643,7 +2641,6 @@ __BUILD_SET_C0(status) + __BUILD_SET_C0(cause) + __BUILD_SET_C0(config) + __BUILD_SET_C0(config5) +-__BUILD_SET_C0(config7) + __BUILD_SET_C0(intcontrol) + __BUILD_SET_C0(intctl) + __BUILD_SET_C0(srsmap) +diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h +index 0d36c87acbe2..ad6f019ff776 100644 +--- a/arch/mips/include/asm/processor.h ++++ b/arch/mips/include/asm/processor.h +@@ -141,7 +141,7 @@ struct mips_fpu_struct { + + #define NUM_DSP_REGS 6 + +-typedef __u32 dspreg_t; ++typedef unsigned long dspreg_t; + + struct mips_dsp_state { + dspreg_t dspr[NUM_DSP_REGS]; +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c +index 4f64913b4b4c..b702ba3a0df3 100644 +--- a/arch/mips/kernel/ptrace.c ++++ b/arch/mips/kernel/ptrace.c +@@ -876,7 +876,7 @@ long arch_ptrace(struct task_struct *child, long request, + goto out; + } + dregs = __get_dsp_regs(child); +- tmp = (unsigned long) (dregs[addr - DSP_BASE]); ++ tmp = dregs[addr - DSP_BASE]; + break; + } + case DSP_CONTROL: +diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c +index b1e945738138..4840af169683 100644 +--- a/arch/mips/kernel/ptrace32.c ++++ b/arch/mips/kernel/ptrace32.c +@@ -140,7 +140,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + goto out; + } + dregs = __get_dsp_regs(child); +- tmp = (unsigned long) (dregs[addr - DSP_BASE]); ++ tmp = dregs[addr - DSP_BASE]; + break; + } + case DSP_CONTROL: +diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c +index 111ad475aa0c..4c2483f410c2 100644 +--- a/arch/mips/lib/multi3.c ++++ b/arch/mips/lib/multi3.c +@@ -4,12 +4,12 @@ + #include "libgcc.h" + + /* +- * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that +- * specific case only we'll implement it here. ++ * GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for ++ * that specific case only we implement that intrinsic here. + * + * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981 + */ +-#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7) ++#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8) + + /* multiply 64-bit values, low 64-bits returned */ + static inline long long notrace dmulu(long long a, long long b) +diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c +index c0e817f35e69..bdbbc320b006 100644 +--- a/arch/powerpc/net/bpf_jit_comp64.c ++++ b/arch/powerpc/net/bpf_jit_comp64.c +@@ -326,6 +326,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, + u64 imm64; + u8 *func; + u32 true_cond; ++ u32 tmp_idx; + + /* + * addrs[] maps a BPF bytecode address into a real offset from +@@ -685,11 +686,7 @@ emit_clear: + case BPF_STX | BPF_XADD | BPF_W: + /* Get EA into TMP_REG_1 */ + PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); +- /* error if EA is not word-aligned */ +- PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03); +- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12); +- PPC_LI(b2p[BPF_REG_0], 0); +- PPC_JMP(exit_addr); ++ tmp_idx = ctx->idx * 4; + /* load value from memory into TMP_REG_2 */ + PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); + /* add value from src_reg into this */ +@@ -697,32 +694,16 @@ emit_clear: + /* store result back */ + PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); + /* we're done if this succeeded */ +- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4)); +- /* otherwise, let's try once more */ +- PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); +- PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); +- PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); +- /* exit if the store was not successful */ +- PPC_LI(b2p[BPF_REG_0], 0); +- PPC_BCC(COND_NE, exit_addr); ++ PPC_BCC_SHORT(COND_NE, tmp_idx); + break; + /* *(u64 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_DW: + PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); +- /* error if EA is not doubleword-aligned */ +- PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07); +- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4)); +- PPC_LI(b2p[BPF_REG_0], 0); +- PPC_JMP(exit_addr); +- PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); +- PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); +- PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); +- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4)); ++ tmp_idx = ctx->idx * 4; + PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); + PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); + PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); +- PPC_LI(b2p[BPF_REG_0], 0); +- PPC_BCC(COND_NE, exit_addr); ++ PPC_BCC_SHORT(COND_NE, tmp_idx); + break; + + /* +diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h +index 998b61cd0e56..4b39ba700d32 100644 +--- a/arch/s390/include/asm/qdio.h ++++ b/arch/s390/include/asm/qdio.h +@@ -261,7 +261,6 @@ struct qdio_outbuf_state { + void *user; + }; + +-#define QDIO_OUTBUF_STATE_FLAG_NONE 0x00 + #define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01 + + #define CHSC_AC1_INITIATE_INPUTQ 0x80 +diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c +index 661d9fe63c43..ba2f21873cbd 100644 +--- a/arch/s390/mm/fault.c ++++ b/arch/s390/mm/fault.c +@@ -462,6 +462,8 @@ retry: + /* No reason to continue if interrupted by SIGKILL. */ + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { + fault = VM_FAULT_SIGNAL; ++ if (flags & FAULT_FLAG_RETRY_NOWAIT) ++ goto out_up; + goto out; + } + if (unlikely(fault & VM_FAULT_ERROR)) +diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c +index f576f1073378..0dac2640c3a7 100644 +--- a/arch/s390/numa/numa.c ++++ b/arch/s390/numa/numa.c +@@ -133,26 +133,14 @@ void __init numa_setup(void) + { + pr_info("NUMA mode: %s\n", mode->name); + nodes_clear(node_possible_map); ++ /* Initially attach all possible CPUs to node 0. */ ++ cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask); + if (mode->setup) + mode->setup(); + numa_setup_memory(); + memblock_dump_all(); + } + +-/* +- * numa_init_early() - Initialization initcall +- * +- * This runs when only one CPU is online and before the first +- * topology update is called for by the scheduler. +- */ +-static int __init numa_init_early(void) +-{ +- /* Attach all possible CPUs to node 0 for now. */ +- cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask); +- return 0; +-} +-early_initcall(numa_init_early); +- + /* + * numa_init_late() - Initialization initcall + * +diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c +index 03a1d5976ff5..87574110394d 100644 +--- a/arch/s390/pci/pci.c ++++ b/arch/s390/pci/pci.c +@@ -407,6 +407,8 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) + hwirq = 0; + for_each_pci_msi_entry(msi, pdev) { + rc = -EIO; ++ if (hwirq >= msi_vecs) ++ break; + irq = irq_alloc_desc(0); /* Alloc irq on node 0 */ + if (irq < 0) + goto out_msi; +diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c +index 24384e1dc33d..a7aeb036b070 100644 +--- a/arch/sparc/kernel/pcic.c ++++ b/arch/sparc/kernel/pcic.c +@@ -602,7 +602,7 @@ void pcibios_fixup_bus(struct pci_bus *bus) + { + struct pci_dev *dev; + int i, has_io, has_mem; +- unsigned int cmd; ++ unsigned int cmd = 0; + struct linux_pcic *pcic; + /* struct linux_pbm_info* pbm = &pcic->pbm; */ + int node; +diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile +index 4669b3a931ed..cda8e14bd72a 100644 +--- a/arch/x86/boot/compressed/Makefile ++++ b/arch/x86/boot/compressed/Makefile +@@ -101,9 +101,13 @@ define cmd_check_data_rel + done + endef + ++# We need to run two commands under "if_changed", so merge them into a ++# single invocation. ++quiet_cmd_check-and-link-vmlinux = LD $@ ++ cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld) ++ + $(obj)/vmlinux: $(vmlinux-objs-y) FORCE +- $(call if_changed,check_data_rel) +- $(call if_changed,ld) ++ $(call if_changed,check-and-link-vmlinux) + + OBJCOPYFLAGS_vmlinux.bin := -R .comment -S + $(obj)/vmlinux.bin: vmlinux FORCE +diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c +index b26ee32f73e8..fd4484ae3ffc 100644 +--- a/arch/x86/events/amd/ibs.c ++++ b/arch/x86/events/amd/ibs.c +@@ -578,7 +578,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) + { + struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu); + struct perf_event *event = pcpu->event; +- struct hw_perf_event *hwc = &event->hw; ++ struct hw_perf_event *hwc; + struct perf_sample_data data; + struct perf_raw_record raw; + struct pt_regs regs; +@@ -601,6 +601,10 @@ fail: + return 0; + } + ++ if (WARN_ON_ONCE(!event)) ++ goto fail; ++ ++ hwc = &event->hw; + msr = hwc->config_base; + buf = ibs_data.regs; + rdmsrl(msr, *buf); +diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h +index 5b1177f5a963..508a062e6cf1 100644 +--- a/arch/x86/include/asm/irqflags.h ++++ b/arch/x86/include/asm/irqflags.h +@@ -32,7 +32,8 @@ extern inline unsigned long native_save_fl(void) + return flags; + } + +-static inline void native_restore_fl(unsigned long flags) ++extern inline void native_restore_fl(unsigned long flags); ++extern inline void native_restore_fl(unsigned long flags) + { + asm volatile("push %0 ; popf" + : /* no output */ +diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c +index 85f854b98a9d..3576ece9ef88 100644 +--- a/arch/x86/kernel/dumpstack.c ++++ b/arch/x86/kernel/dumpstack.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -229,7 +230,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr) + * We're not going to return, but we might be on an IST stack or + * have very little stack space left. Rewind the stack and kill + * the task. ++ * Before we rewind the stack, we have to tell KASAN that we're going to ++ * reuse the task stack and that existing poisons are invalid. + */ ++ kasan_unpoison_task_stack(current); + rewind_stack_do_exit(signr); + } + NOKPROBE_SYMBOL(oops_end); +diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c +index dffe81d3c261..a2661814bde0 100644 +--- a/arch/x86/kernel/process_64.c ++++ b/arch/x86/kernel/process_64.c +@@ -360,6 +360,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) + start_thread_common(regs, new_ip, new_sp, + __USER_CS, __USER_DS, 0); + } ++EXPORT_SYMBOL_GPL(start_thread); + + #ifdef CONFIG_COMPAT + void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp) +diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c +index 8e2e4757adcb..5a42ae4078c2 100644 +--- a/drivers/base/power/clock_ops.c ++++ b/drivers/base/power/clock_ops.c +@@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk); + int of_pm_clk_add_clks(struct device *dev) + { + struct clk **clks; +- unsigned int i, count; ++ int i, count; + int ret; + + if (!dev || !dev->of_node) +diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c +index 07b77fb102a1..987e8f503522 100644 +--- a/drivers/cdrom/cdrom.c ++++ b/drivers/cdrom/cdrom.c +@@ -2536,7 +2536,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi, + if (!CDROM_CAN(CDC_SELECT_DISC) || + (arg == CDSL_CURRENT || arg == CDSL_NONE)) + return cdi->ops->drive_status(cdi, CDSL_CURRENT); +- if (((int)arg >= cdi->capacity)) ++ if (arg >= cdi->capacity) + return -EINVAL; + return cdrom_slot_status(cdi, arg); + } +diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c +index 8387c7a40bda..05671c03efe2 100644 +--- a/drivers/clk/rockchip/clk-rk3399.c ++++ b/drivers/clk/rockchip/clk-rk3399.c +@@ -629,7 +629,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { + MUX(0, "clk_i2sout_src", mux_i2sch_p, CLK_SET_RATE_PARENT, + RK3399_CLKSEL_CON(31), 0, 2, MFLAGS), + COMPOSITE_NODIV(SCLK_I2S_8CH_OUT, "clk_i2sout", mux_i2sout_p, CLK_SET_RATE_PARENT, +- RK3399_CLKSEL_CON(30), 8, 2, MFLAGS, ++ RK3399_CLKSEL_CON(31), 2, 1, MFLAGS, + RK3399_CLKGATE_CON(8), 12, GFLAGS), + + /* uart */ +diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +index a68f94daf9b6..32ab5c32834b 100644 +--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c ++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +@@ -424,6 +424,18 @@ static void adv7511_hpd_work(struct work_struct *work) + else + status = connector_status_disconnected; + ++ /* ++ * The bridge resets its registers on unplug. So when we get a plug ++ * event and we're already supposed to be powered, cycle the bridge to ++ * restore its state. ++ */ ++ if (status == connector_status_connected && ++ adv7511->connector.status == connector_status_disconnected && ++ adv7511->powered) { ++ regcache_mark_dirty(adv7511->regmap); ++ adv7511_power_on(adv7511); ++ } ++ + if (adv7511->connector.status != status) { + adv7511->connector.status = status; + drm_kms_helper_hotplug_event(adv7511->connector.dev); +diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c +index 3ce391c239b0..67881e5517fb 100644 +--- a/drivers/gpu/drm/imx/imx-ldb.c ++++ b/drivers/gpu/drm/imx/imx-ldb.c +@@ -634,6 +634,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) + return PTR_ERR(imx_ldb->regmap); + } + ++ /* disable LDB by resetting the control register to POR default */ ++ regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0); ++ + imx_ldb->dev = dev; + + if (of_id) +@@ -675,14 +678,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) + if (ret || i < 0 || i > 1) + return -EINVAL; + ++ if (!of_device_is_available(child)) ++ continue; ++ + if (dual && i > 0) { + dev_warn(dev, "dual-channel mode, ignoring second output\n"); + continue; + } + +- if (!of_device_is_available(child)) +- continue; +- + channel = &imx_ldb->channel[i]; + channel->ldb = imx_ldb; + channel->chno = i; +diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c +index 39d0fdcb17d2..6a7994a79f55 100644 +--- a/drivers/gpu/drm/udl/udl_fb.c ++++ b/drivers/gpu/drm/udl/udl_fb.c +@@ -217,7 +217,7 @@ static int udl_fb_open(struct fb_info *info, int user) + + struct fb_deferred_io *fbdefio; + +- fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); ++ fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); + + if (fbdefio) { + fbdefio->delay = DL_DEFIO_WRITE_DELAY; +diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c +index 873f010d9616..10e2c198ad72 100644 +--- a/drivers/gpu/drm/udl/udl_main.c ++++ b/drivers/gpu/drm/udl/udl_main.c +@@ -169,18 +169,13 @@ static void udl_free_urb_list(struct drm_device *dev) + struct list_head *node; + struct urb_node *unode; + struct urb *urb; +- int ret; + unsigned long flags; + + DRM_DEBUG("Waiting for completes and freeing all render urbs\n"); + + /* keep waiting and freeing, until we've got 'em all */ + while (count--) { +- +- /* Getting interrupted means a leak, but ok at shutdown*/ +- ret = down_interruptible(&udl->urbs.limit_sem); +- if (ret) +- break; ++ down(&udl->urbs.limit_sem); + + spin_lock_irqsave(&udl->urbs.lock, flags); + +@@ -204,17 +199,22 @@ static void udl_free_urb_list(struct drm_device *dev) + static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) + { + struct udl_device *udl = dev->dev_private; +- int i = 0; + struct urb *urb; + struct urb_node *unode; + char *buf; ++ size_t wanted_size = count * size; + + spin_lock_init(&udl->urbs.lock); + ++retry: + udl->urbs.size = size; + INIT_LIST_HEAD(&udl->urbs.list); + +- while (i < count) { ++ sema_init(&udl->urbs.limit_sem, 0); ++ udl->urbs.count = 0; ++ udl->urbs.available = 0; ++ ++ while (udl->urbs.count * size < wanted_size) { + unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL); + if (!unode) + break; +@@ -230,11 +230,16 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) + } + unode->urb = urb; + +- buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL, ++ buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL, + &urb->transfer_dma); + if (!buf) { + kfree(unode); + usb_free_urb(urb); ++ if (size > PAGE_SIZE) { ++ size /= 2; ++ udl_free_urb_list(dev); ++ goto retry; ++ } + break; + } + +@@ -245,16 +250,14 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) + + list_add_tail(&unode->entry, &udl->urbs.list); + +- i++; ++ up(&udl->urbs.limit_sem); ++ udl->urbs.count++; ++ udl->urbs.available++; + } + +- sema_init(&udl->urbs.limit_sem, i); +- udl->urbs.count = i; +- udl->urbs.available = i; +- +- DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size); ++ DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size); + +- return i; ++ return udl->urbs.count; + } + + struct urb *udl_get_urb(struct drm_device *dev) +diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c +index 9e7ef5cf5d49..b2d8b63176db 100644 +--- a/drivers/i2c/busses/i2c-davinci.c ++++ b/drivers/i2c/busses/i2c-davinci.c +@@ -234,12 +234,16 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev) + /* + * It's not always possible to have 1 to 2 ratio when d=7, so fall back + * to minimal possible clkh in this case. ++ * ++ * Note: ++ * CLKH is not allowed to be 0, in this case I2C clock is not generated ++ * at all + */ +- if (clk >= clkl + d) { ++ if (clk > clkl + d) { + clkh = clk - clkl - d; + clkl -= d; + } else { +- clkh = 0; ++ clkh = 1; + clkl = clk - (d << 1); + } + +diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c +index 60f5a8ded8dd..8904491dfda4 100644 +--- a/drivers/misc/mei/main.c ++++ b/drivers/misc/mei/main.c +@@ -304,7 +304,6 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, + goto out; + } + +- *offset = 0; + cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file); + if (!cb) { + rets = -ENOMEM; +diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c +index c7427bdd3a4b..2949a381a94d 100644 +--- a/drivers/net/can/mscan/mpc5xxx_can.c ++++ b/drivers/net/can/mscan/mpc5xxx_can.c +@@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev, + return 0; + } + cdm = of_iomap(np_cdm, 0); ++ if (!cdm) { ++ of_node_put(np_cdm); ++ dev_err(&ofdev->dev, "can't map clock node!\n"); ++ return 0; ++ } + + if (in_8(&cdm->ipb_clk_sel) & 0x1) + freq *= 2; +diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig +index 5b7658bcf020..5c3ef9fc8207 100644 +--- a/drivers/net/ethernet/3com/Kconfig ++++ b/drivers/net/ethernet/3com/Kconfig +@@ -32,7 +32,7 @@ config EL3 + + config 3C515 + tristate "3c515 ISA \"Fast EtherLink\"" +- depends on ISA && ISA_DMA_API ++ depends on ISA && ISA_DMA_API && !PPC32 + ---help--- + If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet + network card, say Y here. +diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig +index 0038709fd317..ec59425fdbff 100644 +--- a/drivers/net/ethernet/amd/Kconfig ++++ b/drivers/net/ethernet/amd/Kconfig +@@ -44,7 +44,7 @@ config AMD8111_ETH + + config LANCE + tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" +- depends on ISA && ISA_DMA_API && !ARM ++ depends on ISA && ISA_DMA_API && !ARM && !PPC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y here. + Some LinkSys cards are of this type. +@@ -138,7 +138,7 @@ config PCMCIA_NMCLAN + + config NI65 + tristate "NI6510 support" +- depends on ISA && ISA_DMA_API && !ARM ++ depends on ISA && ISA_DMA_API && !ARM && !PPC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y here. + +diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +index a3200ea6d765..85e7177c479f 100644 +--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c ++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +@@ -1678,6 +1678,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter) + skb = build_skb(page_address(page) + adapter->rx_page_offset, + adapter->rx_frag_size); + if (likely(skb)) { ++ skb_reserve(skb, NET_SKB_PAD); + adapter->rx_page_offset += adapter->rx_frag_size; + if (adapter->rx_page_offset >= PAGE_SIZE) + adapter->rx_page = NULL; +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +index 5f19427c7b27..8aecd8ef6542 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +@@ -3367,14 +3367,18 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) + DP(BNX2X_MSG_ETHTOOL, + "rss re-configured, UDP 4-tupple %s\n", + udp_rss_requested ? "enabled" : "disabled"); +- return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); ++ if (bp->state == BNX2X_STATE_OPEN) ++ return bnx2x_rss(bp, &bp->rss_conf_obj, false, ++ true); + } else if ((info->flow_type == UDP_V6_FLOW) && + (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) { + bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested; + DP(BNX2X_MSG_ETHTOOL, + "rss re-configured, UDP 4-tupple %s\n", + udp_rss_requested ? "enabled" : "disabled"); +- return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); ++ if (bp->state == BNX2X_STATE_OPEN) ++ return bnx2x_rss(bp, &bp->rss_conf_obj, false, ++ true); + } + return 0; + +@@ -3488,7 +3492,10 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir, + bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id; + } + +- return bnx2x_config_rss_eth(bp, false); ++ if (bp->state == BNX2X_STATE_OPEN) ++ return bnx2x_config_rss_eth(bp, false); ++ ++ return 0; + } + + /** +diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig +index 5ab912937aff..ec0b545197e2 100644 +--- a/drivers/net/ethernet/cirrus/Kconfig ++++ b/drivers/net/ethernet/cirrus/Kconfig +@@ -19,6 +19,7 @@ if NET_VENDOR_CIRRUS + config CS89x0 + tristate "CS89x0 support" + depends on ISA || EISA || ARM ++ depends on !PPC32 + ---help--- + Support for CS89x0 chipset based Ethernet cards. If you have a + network (Ethernet) card of this type, say Y and read the file +diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c +index 2e9bab45d419..f7e7b79c6050 100644 +--- a/drivers/net/ethernet/cisco/enic/enic_main.c ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c +@@ -1842,10 +1842,32 @@ static int enic_stop(struct net_device *netdev) + return 0; + } + ++static int _enic_change_mtu(struct net_device *netdev, int new_mtu) ++{ ++ bool running = netif_running(netdev); ++ int err = 0; ++ ++ ASSERT_RTNL(); ++ if (running) { ++ err = enic_stop(netdev); ++ if (err) ++ return err; ++ } ++ ++ netdev->mtu = new_mtu; ++ ++ if (running) { ++ err = enic_open(netdev); ++ if (err) ++ return err; ++ } ++ ++ return 0; ++} ++ + static int enic_change_mtu(struct net_device *netdev, int new_mtu) + { + struct enic *enic = netdev_priv(netdev); +- int running = netif_running(netdev); + + if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU) + return -EINVAL; +@@ -1853,20 +1875,12 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu) + if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) + return -EOPNOTSUPP; + +- if (running) +- enic_stop(netdev); +- +- netdev->mtu = new_mtu; +- + if (netdev->mtu > enic->port_mtu) + netdev_warn(netdev, +- "interface MTU (%d) set higher than port MTU (%d)\n", +- netdev->mtu, enic->port_mtu); ++ "interface MTU (%d) set higher than port MTU (%d)\n", ++ netdev->mtu, enic->port_mtu); + +- if (running) +- enic_open(netdev); +- +- return 0; ++ return _enic_change_mtu(netdev, new_mtu); + } + + static void enic_change_mtu_work(struct work_struct *work) +@@ -1874,47 +1888,9 @@ static void enic_change_mtu_work(struct work_struct *work) + struct enic *enic = container_of(work, struct enic, change_mtu_work); + struct net_device *netdev = enic->netdev; + int new_mtu = vnic_dev_mtu(enic->vdev); +- int err; +- unsigned int i; +- +- new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu)); + + rtnl_lock(); +- +- /* Stop RQ */ +- del_timer_sync(&enic->notify_timer); +- +- for (i = 0; i < enic->rq_count; i++) +- napi_disable(&enic->napi[i]); +- +- vnic_intr_mask(&enic->intr[0]); +- enic_synchronize_irqs(enic); +- err = vnic_rq_disable(&enic->rq[0]); +- if (err) { +- rtnl_unlock(); +- netdev_err(netdev, "Unable to disable RQ.\n"); +- return; +- } +- vnic_rq_clean(&enic->rq[0], enic_free_rq_buf); +- vnic_cq_clean(&enic->cq[0]); +- vnic_intr_clean(&enic->intr[0]); +- +- /* Fill RQ with new_mtu-sized buffers */ +- netdev->mtu = new_mtu; +- vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); +- /* Need at least one buffer on ring to get going */ +- if (vnic_rq_desc_used(&enic->rq[0]) == 0) { +- rtnl_unlock(); +- netdev_err(netdev, "Unable to alloc receive buffers.\n"); +- return; +- } +- +- /* Start RQ */ +- vnic_rq_enable(&enic->rq[0]); +- napi_enable(&enic->napi[0]); +- vnic_intr_unmask(&enic->intr[0]); +- enic_notify_timer_start(enic); +- ++ (void)_enic_change_mtu(netdev, new_mtu); + rtnl_unlock(); + + netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu); +diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c +index ddd410a91e13..715776e2cfe5 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c +@@ -313,7 +313,7 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, + + p_ramrod->common.update_approx_mcast_flg = 1; + for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { +- u32 *p_bins = (u32 *)p_params->bins; ++ u32 *p_bins = p_params->bins; + + p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]); + } +@@ -1182,8 +1182,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) + { +- unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; + struct vport_update_ramrod_data *p_ramrod = NULL; ++ u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + u8 abs_vport_id = 0; +@@ -1219,26 +1219,25 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, + /* explicitly clear out the entire vector */ + memset(&p_ramrod->approx_mcast.bins, 0, + sizeof(p_ramrod->approx_mcast.bins)); +- memset(bins, 0, sizeof(unsigned long) * +- ETH_MULTICAST_MAC_BINS_IN_REGS); ++ memset(bins, 0, sizeof(bins)); + /* filter ADD op is explicit set op and it removes + * any existing filters for the vport + */ + if (p_filter_cmd->opcode == QED_FILTER_ADD) { + for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { +- u32 bit; ++ u32 bit, nbits; + + bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); +- __set_bit(bit, bins); ++ nbits = sizeof(u32) * BITS_PER_BYTE; ++ bins[bit / nbits] |= 1 << (bit % nbits); + } + + /* Convert to correct endianity */ + for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { + struct vport_update_ramrod_mcast *p_ramrod_bins; +- u32 *p_bins = (u32 *)bins; + + p_ramrod_bins = &p_ramrod->approx_mcast; +- p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]); ++ p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]); + } + } + +diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h +index e495d62fcc03..14d00173cad0 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h ++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h +@@ -156,7 +156,7 @@ struct qed_sp_vport_update_params { + u8 anti_spoofing_en; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; +- unsigned long bins[8]; ++ u32 bins[8]; + struct qed_rss_params *rss_params; + struct qed_filter_accept_flags accept_flags; + struct qed_sge_tpa_params *sge_tpa_params; +diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c +index 8b7d2f963ee1..eaa242df4131 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c +@@ -613,6 +613,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, + break; + default: + p_link->speed = 0; ++ p_link->link_up = 0; + } + + if (p_link->link_up && p_link->speed) +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c +index 48bc5c151336..6379bfedc9f0 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c +@@ -2157,7 +2157,7 @@ qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn, + + p_data->update_approx_mcast_flg = 1; + memcpy(p_data->bins, p_mcast_tlv->bins, +- sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); ++ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; + } + +diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c +index 0645124a887b..faf8215872de 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c +@@ -786,7 +786,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, + resp_size += sizeof(struct pfvf_def_resp_tlv); + + memcpy(p_mcast_tlv->bins, p_params->bins, +- sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); ++ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); + } + + update_rx = p_params->accept_flags.update_rx_mode_config; +@@ -972,7 +972,7 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, + u32 bit; + + bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); +- __set_bit(bit, sp_params.bins); ++ sp_params.bins[bit / 32] |= 1 << (bit % 32); + } + } + +diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h +index 35db7a28aa13..b962ef8e98ef 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h ++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h +@@ -336,7 +336,12 @@ struct vfpf_vport_update_mcast_bin_tlv { + struct channel_tlv tl; + u8 padding[4]; + +- u64 bins[8]; ++ /* There are only 256 approx bins, and in HSI they're divided into ++ * 32-bit values. As old VFs used to set-bit to the values on its side, ++ * the upper half of the array is never expected to contain any data. ++ */ ++ u64 bins[4]; ++ u64 obsolete_bins[4]; + }; + + struct vfpf_vport_update_accept_param_tlv { +diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +index 63307ea97846..9beea13e2e1f 100644 +--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c ++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +@@ -217,6 +217,7 @@ issue: + ret = of_mdiobus_register(bus, np1); + if (ret) { + mdiobus_free(bus); ++ lp->mii_bus = NULL; + return ret; + } + return 0; +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index 31a6d87b61b2..0d4440f28f6b 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -946,7 +946,7 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ + {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ + {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ +- {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */ ++ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ + {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ + {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ + {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ +diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c +index 299140c04556..04b60ed59ea0 100644 +--- a/drivers/net/wan/lmc/lmc_main.c ++++ b/drivers/net/wan/lmc/lmc_main.c +@@ -1372,7 +1372,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ + case 0x001: + printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name); + break; +- case 0x010: ++ case 0x002: + printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name); + break; + default: +diff --git a/drivers/net/wireless/broadcom/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c +index cb987c2ecc6b..87131f663292 100644 +--- a/drivers/net/wireless/broadcom/b43/leds.c ++++ b/drivers/net/wireless/broadcom/b43/leds.c +@@ -131,7 +131,7 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led, + led->wl = dev->wl; + led->index = led_index; + led->activelow = activelow; +- strncpy(led->name, name, sizeof(led->name)); ++ strlcpy(led->name, name, sizeof(led->name)); + atomic_set(&led->state, 0); + + led->led_dev.name = led->name; +diff --git a/drivers/net/wireless/broadcom/b43legacy/leds.c b/drivers/net/wireless/broadcom/b43legacy/leds.c +index fd4565389c77..bc922118b6ac 100644 +--- a/drivers/net/wireless/broadcom/b43legacy/leds.c ++++ b/drivers/net/wireless/broadcom/b43legacy/leds.c +@@ -101,7 +101,7 @@ static int b43legacy_register_led(struct b43legacy_wldev *dev, + led->dev = dev; + led->index = led_index; + led->activelow = activelow; +- strncpy(led->name, name, sizeof(led->name)); ++ strlcpy(led->name, name, sizeof(led->name)); + + led->led_dev.name = led->name; + led->led_dev.default_trigger = default_trigger; +diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c +index a4e9f430d452..e2cca91fd266 100644 +--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c ++++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c +@@ -433,7 +433,7 @@ static void imx1_pinconf_group_dbg_show(struct pinctrl_dev *pctldev, + const char *name; + int i, ret; + +- if (group > info->ngroups) ++ if (group >= info->ngroups) + return; + + seq_puts(s, "\n"); +diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c +index edb36bf781b0..f627b39f64bf 100644 +--- a/drivers/power/supply/generic-adc-battery.c ++++ b/drivers/power/supply/generic-adc-battery.c +@@ -243,10 +243,10 @@ static int gab_probe(struct platform_device *pdev) + struct power_supply_desc *psy_desc; + struct power_supply_config psy_cfg = {}; + struct gab_platform_data *pdata = pdev->dev.platform_data; +- enum power_supply_property *properties; + int ret = 0; + int chan; +- int index = 0; ++ int index = ARRAY_SIZE(gab_props); ++ bool any = false; + + adc_bat = devm_kzalloc(&pdev->dev, sizeof(*adc_bat), GFP_KERNEL); + if (!adc_bat) { +@@ -280,8 +280,6 @@ static int gab_probe(struct platform_device *pdev) + } + + memcpy(psy_desc->properties, gab_props, sizeof(gab_props)); +- properties = (enum power_supply_property *) +- ((char *)psy_desc->properties + sizeof(gab_props)); + + /* + * getting channel from iio and copying the battery properties +@@ -295,15 +293,22 @@ static int gab_probe(struct platform_device *pdev) + adc_bat->channel[chan] = NULL; + } else { + /* copying properties for supported channels only */ +- memcpy(properties + sizeof(*(psy_desc->properties)) * index, +- &gab_dyn_props[chan], +- sizeof(gab_dyn_props[chan])); +- index++; ++ int index2; ++ ++ for (index2 = 0; index2 < index; index2++) { ++ if (psy_desc->properties[index2] == ++ gab_dyn_props[chan]) ++ break; /* already known */ ++ } ++ if (index2 == index) /* really new */ ++ psy_desc->properties[index++] = ++ gab_dyn_props[chan]; ++ any = true; + } + } + + /* none of the channels are supported so let's bail out */ +- if (index == 0) { ++ if (!any) { + ret = -ENODEV; + goto second_mem_fail; + } +@@ -314,7 +319,7 @@ static int gab_probe(struct platform_device *pdev) + * as come channels may be not be supported by the device.So + * we need to take care of that. + */ +- psy_desc->num_properties = ARRAY_SIZE(gab_props) + index; ++ psy_desc->num_properties = index; + + adc_bat->psy = power_supply_register(&pdev->dev, psy_desc, &psy_cfg); + if (IS_ERR(adc_bat->psy)) { +diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c +index 66e9bb053629..18ab84e9c6b2 100644 +--- a/drivers/s390/cio/qdio_main.c ++++ b/drivers/s390/cio/qdio_main.c +@@ -640,21 +640,20 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q, + unsigned long phys_aob = 0; + + if (!q->use_cq) +- goto out; ++ return 0; + + if (!q->aobs[bufnr]) { + struct qaob *aob = qdio_allocate_aob(); + q->aobs[bufnr] = aob; + } + if (q->aobs[bufnr]) { +- q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE; + q->sbal_state[bufnr].aob = q->aobs[bufnr]; + q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user; + phys_aob = virt_to_phys(q->aobs[bufnr]); + WARN_ON_ONCE(phys_aob & 0xFF); + } + +-out: ++ q->sbal_state[bufnr].flags = 0; + return phys_aob; + } + +diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c +index dcf36537a767..cc3994d4e7bc 100644 +--- a/drivers/scsi/fcoe/fcoe_ctlr.c ++++ b/drivers/scsi/fcoe/fcoe_ctlr.c +@@ -755,9 +755,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, + case ELS_LOGO: + if (fip->mode == FIP_MODE_VN2VN) { + if (fip->state != FIP_ST_VNMP_UP) +- return -EINVAL; ++ goto drop; + if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI) +- return -EINVAL; ++ goto drop; + } else { + if (fip->state != FIP_ST_ENABLED) + return 0; +diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c +index 97aeaddd600d..e3ffd244603e 100644 +--- a/drivers/scsi/libfc/fc_rport.c ++++ b/drivers/scsi/libfc/fc_rport.c +@@ -1935,6 +1935,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) + FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", + fc_rport_state(rdata)); + ++ rdata->flags &= ~FC_RP_STARTED; + fc_rport_enter_delete(rdata, RPORT_EV_STOP); + mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c +index c2b682916337..cc8f2a7c2463 100644 +--- a/drivers/scsi/libiscsi.c ++++ b/drivers/scsi/libiscsi.c +@@ -283,11 +283,11 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) + */ + if (opcode != ISCSI_OP_SCSI_DATA_OUT) { + iscsi_conn_printk(KERN_INFO, conn, +- "task [op %x/%x itt " ++ "task [op %x itt " + "0x%x/0x%x] " + "rejected.\n", +- task->hdr->opcode, opcode, +- task->itt, task->hdr_itt); ++ opcode, task->itt, ++ task->hdr_itt); + return -EACCES; + } + /* +@@ -296,10 +296,10 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) + */ + if (conn->session->fast_abort) { + iscsi_conn_printk(KERN_INFO, conn, +- "task [op %x/%x itt " ++ "task [op %x itt " + "0x%x/0x%x] fast abort.\n", +- task->hdr->opcode, opcode, +- task->itt, task->hdr_itt); ++ opcode, task->itt, ++ task->hdr_itt); + return -EACCES; + } + break; +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c +index 3a6f557ec128..56b65b85b121 100644 +--- a/drivers/scsi/scsi_sysfs.c ++++ b/drivers/scsi/scsi_sysfs.c +@@ -709,8 +709,24 @@ static ssize_t + sdev_store_delete(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) + { +- if (device_remove_file_self(dev, attr)) +- scsi_remove_device(to_scsi_device(dev)); ++ struct kernfs_node *kn; ++ ++ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr); ++ WARN_ON_ONCE(!kn); ++ /* ++ * Concurrent writes into the "delete" sysfs attribute may trigger ++ * concurrent calls to device_remove_file() and scsi_remove_device(). ++ * device_remove_file() handles concurrent removal calls by ++ * serializing these and by ignoring the second and later removal ++ * attempts. Concurrent calls of scsi_remove_device() are ++ * serialized. The second and later calls of scsi_remove_device() are ++ * ignored because the first call of that function changes the device ++ * state into SDEV_DEL. ++ */ ++ device_remove_file(dev, attr); ++ scsi_remove_device(to_scsi_device(dev)); ++ if (kn) ++ sysfs_unbreak_active_protection(kn); + return count; + }; + static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete); +diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c +index 15ca09cd16f3..874e9f085326 100644 +--- a/drivers/scsi/vmw_pvscsi.c ++++ b/drivers/scsi/vmw_pvscsi.c +@@ -564,9 +564,14 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter, + (btstat == BTSTAT_SUCCESS || + btstat == BTSTAT_LINKED_COMMAND_COMPLETED || + btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) { +- cmd->result = (DID_OK << 16) | sdstat; +- if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer) +- cmd->result |= (DRIVER_SENSE << 24); ++ if (sdstat == SAM_STAT_COMMAND_TERMINATED) { ++ cmd->result = (DID_RESET << 16); ++ } else { ++ cmd->result = (DID_OK << 16) | sdstat; ++ if (sdstat == SAM_STAT_CHECK_CONDITION && ++ cmd->sense_buffer) ++ cmd->result |= (DRIVER_SENSE << 24); ++ } + } else + switch (btstat) { + case BTSTAT_SUCCESS: +diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c +index 2b700e8455c6..e3596855a703 100644 +--- a/drivers/staging/android/ion/ion-ioctl.c ++++ b/drivers/staging/android/ion/ion-ioctl.c +@@ -128,11 +128,15 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + { + struct ion_handle *handle; + +- handle = ion_handle_get_by_id(client, data.handle.handle); +- if (IS_ERR(handle)) ++ mutex_lock(&client->lock); ++ handle = ion_handle_get_by_id_nolock(client, data.handle.handle); ++ if (IS_ERR(handle)) { ++ mutex_unlock(&client->lock); + return PTR_ERR(handle); +- data.fd.fd = ion_share_dma_buf_fd(client, handle); +- ion_handle_put(handle); ++ } ++ data.fd.fd = ion_share_dma_buf_fd_nolock(client, handle); ++ ion_handle_put_nolock(handle); ++ mutex_unlock(&client->lock); + if (data.fd.fd < 0) + ret = data.fd.fd; + break; +diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c +index 6f9974cb0e15..806e9b30b9dc 100644 +--- a/drivers/staging/android/ion/ion.c ++++ b/drivers/staging/android/ion/ion.c +@@ -15,6 +15,7 @@ + * + */ + ++#include + #include + #include + #include +@@ -305,6 +306,16 @@ static void ion_handle_get(struct ion_handle *handle) + kref_get(&handle->ref); + } + ++/* Must hold the client lock */ ++static struct ion_handle *ion_handle_get_check_overflow( ++ struct ion_handle *handle) ++{ ++ if (atomic_read(&handle->ref.refcount) + 1 == 0) ++ return ERR_PTR(-EOVERFLOW); ++ ion_handle_get(handle); ++ return handle; ++} ++ + int ion_handle_put_nolock(struct ion_handle *handle) + { + return kref_put(&handle->ref, ion_handle_destroy); +@@ -347,21 +358,9 @@ struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client, + + handle = idr_find(&client->idr, id); + if (handle) +- ion_handle_get(handle); +- +- return handle ? handle : ERR_PTR(-EINVAL); +-} +- +-struct ion_handle *ion_handle_get_by_id(struct ion_client *client, +- int id) +-{ +- struct ion_handle *handle; ++ return ion_handle_get_check_overflow(handle); + +- mutex_lock(&client->lock); +- handle = ion_handle_get_by_id_nolock(client, id); +- mutex_unlock(&client->lock); +- +- return handle; ++ return ERR_PTR(-EINVAL); + } + + static bool ion_handle_validate(struct ion_client *client, +@@ -1029,24 +1028,28 @@ static struct dma_buf_ops dma_buf_ops = { + .kunmap = ion_dma_buf_kunmap, + }; + +-struct dma_buf *ion_share_dma_buf(struct ion_client *client, +- struct ion_handle *handle) ++static struct dma_buf *__ion_share_dma_buf(struct ion_client *client, ++ struct ion_handle *handle, ++ bool lock_client) + { + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + struct ion_buffer *buffer; + struct dma_buf *dmabuf; + bool valid_handle; + +- mutex_lock(&client->lock); ++ if (lock_client) ++ mutex_lock(&client->lock); + valid_handle = ion_handle_validate(client, handle); + if (!valid_handle) { + WARN(1, "%s: invalid handle passed to share.\n", __func__); +- mutex_unlock(&client->lock); ++ if (lock_client) ++ mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); + } + buffer = handle->buffer; + ion_buffer_get(buffer); +- mutex_unlock(&client->lock); ++ if (lock_client) ++ mutex_unlock(&client->lock); + + exp_info.ops = &dma_buf_ops; + exp_info.size = buffer->size; +@@ -1061,14 +1064,21 @@ struct dma_buf *ion_share_dma_buf(struct ion_client *client, + + return dmabuf; + } ++ ++struct dma_buf *ion_share_dma_buf(struct ion_client *client, ++ struct ion_handle *handle) ++{ ++ return __ion_share_dma_buf(client, handle, true); ++} + EXPORT_SYMBOL(ion_share_dma_buf); + +-int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) ++static int __ion_share_dma_buf_fd(struct ion_client *client, ++ struct ion_handle *handle, bool lock_client) + { + struct dma_buf *dmabuf; + int fd; + +- dmabuf = ion_share_dma_buf(client, handle); ++ dmabuf = __ion_share_dma_buf(client, handle, lock_client); + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + +@@ -1078,8 +1088,19 @@ int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) + + return fd; + } ++ ++int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) ++{ ++ return __ion_share_dma_buf_fd(client, handle, true); ++} + EXPORT_SYMBOL(ion_share_dma_buf_fd); + ++int ion_share_dma_buf_fd_nolock(struct ion_client *client, ++ struct ion_handle *handle) ++{ ++ return __ion_share_dma_buf_fd(client, handle, false); ++} ++ + struct ion_handle *ion_import_dma_buf(struct ion_client *client, + struct dma_buf *dmabuf) + { +@@ -1100,7 +1121,7 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, + /* if a handle exists for this buffer just take a reference to it */ + handle = ion_handle_lookup(client, buffer); + if (!IS_ERR(handle)) { +- ion_handle_get(handle); ++ handle = ion_handle_get_check_overflow(handle); + mutex_unlock(&client->lock); + goto end; + } +diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h +index 3c3b3245275d..760e41885448 100644 +--- a/drivers/staging/android/ion/ion_priv.h ++++ b/drivers/staging/android/ion/ion_priv.h +@@ -463,11 +463,11 @@ void ion_free_nolock(struct ion_client *client, struct ion_handle *handle); + + int ion_handle_put_nolock(struct ion_handle *handle); + +-struct ion_handle *ion_handle_get_by_id(struct ion_client *client, +- int id); +- + int ion_handle_put(struct ion_handle *handle); + + int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query); + ++int ion_share_dma_buf_fd_nolock(struct ion_client *client, ++ struct ion_handle *handle); ++ + #endif /* _ION_PRIV_H */ +diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c +index c16927ac8eb0..395c7a2244ff 100644 +--- a/drivers/staging/media/omap4iss/iss_video.c ++++ b/drivers/staging/media/omap4iss/iss_video.c +@@ -11,7 +11,6 @@ + * (at your option) any later version. + */ + +-#include + #include + #include + #include +@@ -24,6 +23,8 @@ + #include + #include + ++#include ++ + #include "iss_video.h" + #include "iss.h" + +diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c +index 9ccd5da8f204..d2f82aaf6a85 100644 +--- a/drivers/target/iscsi/iscsi_target_login.c ++++ b/drivers/target/iscsi/iscsi_target_login.c +@@ -333,8 +333,7 @@ static int iscsi_login_zero_tsih_s1( + pr_err("idr_alloc() for sess_idr failed\n"); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); +- kfree(sess); +- return -ENOMEM; ++ goto free_sess; + } + + sess->creation_time = get_jiffies_64(); +@@ -350,20 +349,28 @@ static int iscsi_login_zero_tsih_s1( + ISCSI_LOGIN_STATUS_NO_RESOURCES); + pr_err("Unable to allocate memory for" + " struct iscsi_sess_ops.\n"); +- kfree(sess); +- return -ENOMEM; ++ goto remove_idr; + } + + sess->se_sess = transport_init_session(TARGET_PROT_NORMAL); + if (IS_ERR(sess->se_sess)) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); +- kfree(sess->sess_ops); +- kfree(sess); +- return -ENOMEM; ++ goto free_ops; + } + + return 0; ++ ++free_ops: ++ kfree(sess->sess_ops); ++remove_idr: ++ spin_lock_bh(&sess_idr_lock); ++ idr_remove(&sess_idr, sess->session_index); ++ spin_unlock_bh(&sess_idr_lock); ++free_sess: ++ kfree(sess); ++ conn->sess = NULL; ++ return -ENOMEM; + } + + static int iscsi_login_zero_tsih_s2( +@@ -1152,13 +1159,13 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn, + ISCSI_LOGIN_STATUS_INIT_ERR); + if (!zero_tsih || !conn->sess) + goto old_sess_out; +- if (conn->sess->se_sess) +- transport_free_session(conn->sess->se_sess); +- if (conn->sess->session_index != 0) { +- spin_lock_bh(&sess_idr_lock); +- idr_remove(&sess_idr, conn->sess->session_index); +- spin_unlock_bh(&sess_idr_lock); +- } ++ ++ transport_free_session(conn->sess->se_sess); ++ ++ spin_lock_bh(&sess_idr_lock); ++ idr_remove(&sess_idr, conn->sess->session_index); ++ spin_unlock_bh(&sess_idr_lock); ++ + kfree(conn->sess->sess_ops); + kfree(conn->sess); + conn->sess = NULL; +diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c +index 5474b5187be0..f4bd08cfac11 100644 +--- a/drivers/usb/gadget/function/f_uac2.c ++++ b/drivers/usb/gadget/function/f_uac2.c +@@ -929,14 +929,14 @@ static struct usb_descriptor_header *hs_audio_desc[] = { + }; + + struct cntrl_cur_lay3 { +- __u32 dCUR; ++ __le32 dCUR; + }; + + struct cntrl_range_lay3 { +- __u16 wNumSubRanges; +- __u32 dMIN; +- __u32 dMAX; +- __u32 dRES; ++ __le16 wNumSubRanges; ++ __le32 dMIN; ++ __le32 dMAX; ++ __le32 dRES; + } __packed; + + static inline void +@@ -1285,9 +1285,9 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr) + memset(&c, 0, sizeof(struct cntrl_cur_lay3)); + + if (entity_id == USB_IN_CLK_ID) +- c.dCUR = p_srate; ++ c.dCUR = cpu_to_le32(p_srate); + else if (entity_id == USB_OUT_CLK_ID) +- c.dCUR = c_srate; ++ c.dCUR = cpu_to_le32(c_srate); + + value = min_t(unsigned, w_length, sizeof c); + memcpy(req->buf, &c, value); +@@ -1325,15 +1325,15 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr) + + if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) { + if (entity_id == USB_IN_CLK_ID) +- r.dMIN = p_srate; ++ r.dMIN = cpu_to_le32(p_srate); + else if (entity_id == USB_OUT_CLK_ID) +- r.dMIN = c_srate; ++ r.dMIN = cpu_to_le32(c_srate); + else + return -EOPNOTSUPP; + + r.dMAX = r.dMIN; + r.dRES = 0; +- r.wNumSubRanges = 1; ++ r.wNumSubRanges = cpu_to_le16(1); + + value = min_t(unsigned, w_length, sizeof r); + memcpy(req->buf, &r, value); +diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c +index f2c8862093a2..230e3248f386 100644 +--- a/drivers/usb/gadget/udc/r8a66597-udc.c ++++ b/drivers/usb/gadget/udc/r8a66597-udc.c +@@ -835,11 +835,11 @@ static void init_controller(struct r8a66597 *r8a66597) + + r8a66597_bset(r8a66597, XCKE, SYSCFG0); + +- msleep(3); ++ mdelay(3); + + r8a66597_bset(r8a66597, PLLC, SYSCFG0); + +- msleep(1); ++ mdelay(1); + + r8a66597_bset(r8a66597, SCKE, SYSCFG0); + +@@ -1193,7 +1193,7 @@ __acquires(r8a66597->lock) + r8a66597->ep0_req->length = 2; + /* AV: what happens if we get called again before that gets through? */ + spin_unlock(&r8a66597->lock); +- r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL); ++ r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC); + spin_lock(&r8a66597->lock); + } + +diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c +index 94eb2923afed..85d031ce85c1 100644 +--- a/drivers/usb/phy/phy-fsl-usb.c ++++ b/drivers/usb/phy/phy-fsl-usb.c +@@ -879,6 +879,7 @@ int usb_otg_start(struct platform_device *pdev) + if (pdata->init && pdata->init(pdev) != 0) + return -EINVAL; + ++#ifdef CONFIG_PPC32 + if (pdata->big_endian_mmio) { + _fsl_readl = _fsl_readl_be; + _fsl_writel = _fsl_writel_be; +@@ -886,6 +887,7 @@ int usb_otg_start(struct platform_device *pdev) + _fsl_readl = _fsl_readl_le; + _fsl_writel = _fsl_writel_le; + } ++#endif + + /* request irq */ + p_otg->irq = platform_get_irq(pdev, 0); +@@ -976,7 +978,7 @@ int usb_otg_start(struct platform_device *pdev) + /* + * state file in sysfs + */ +-static int show_fsl_usb2_otg_state(struct device *dev, ++static ssize_t show_fsl_usb2_otg_state(struct device *dev, + struct device_attribute *attr, char *buf) + { + struct otg_fsm *fsm = &fsl_otg_dev->fsm; +diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c +index 41df8a27d7eb..2026885702a2 100644 +--- a/fs/cachefiles/namei.c ++++ b/fs/cachefiles/namei.c +@@ -195,7 +195,6 @@ wait_for_old_object: + pr_err("\n"); + pr_err("Error: Unexpected object collision\n"); + cachefiles_printk_object(object, xobject); +- BUG(); + } + atomic_inc(&xobject->usage); + write_unlock(&cache->active_lock); +diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c +index afbdc418966d..5e3bc9de7a16 100644 +--- a/fs/cachefiles/rdwr.c ++++ b/fs/cachefiles/rdwr.c +@@ -27,6 +27,7 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode, + struct cachefiles_one_read *monitor = + container_of(wait, struct cachefiles_one_read, monitor); + struct cachefiles_object *object; ++ struct fscache_retrieval *op = monitor->op; + struct wait_bit_key *key = _key; + struct page *page = wait->private; + +@@ -51,16 +52,22 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode, + list_del(&wait->task_list); + + /* move onto the action list and queue for FS-Cache thread pool */ +- ASSERT(monitor->op); ++ ASSERT(op); + +- object = container_of(monitor->op->op.object, +- struct cachefiles_object, fscache); ++ /* We need to temporarily bump the usage count as we don't own a ref ++ * here otherwise cachefiles_read_copier() may free the op between the ++ * monitor being enqueued on the op->to_do list and the op getting ++ * enqueued on the work queue. ++ */ ++ fscache_get_retrieval(op); + ++ object = container_of(op->op.object, struct cachefiles_object, fscache); + spin_lock(&object->work_lock); +- list_add_tail(&monitor->op_link, &monitor->op->to_do); ++ list_add_tail(&monitor->op_link, &op->to_do); + spin_unlock(&object->work_lock); + +- fscache_enqueue_retrieval(monitor->op); ++ fscache_enqueue_retrieval(op); ++ fscache_put_retrieval(op); + return 0; + } + +diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c +index 3d03e48a9213..ad8bd96093f7 100644 +--- a/fs/cifs/cifs_debug.c ++++ b/fs/cifs/cifs_debug.c +@@ -123,25 +123,41 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) + seq_printf(m, "CIFS Version %s\n", CIFS_VERSION); + seq_printf(m, "Features:"); + #ifdef CONFIG_CIFS_DFS_UPCALL +- seq_printf(m, " dfs"); ++ seq_printf(m, " DFS"); + #endif + #ifdef CONFIG_CIFS_FSCACHE +- seq_printf(m, " fscache"); ++ seq_printf(m, ",FSCACHE"); ++#endif ++#ifdef CONFIG_CIFS_SMB_DIRECT ++ seq_printf(m, ",SMB_DIRECT"); ++#endif ++#ifdef CONFIG_CIFS_STATS2 ++ seq_printf(m, ",STATS2"); ++#elif defined(CONFIG_CIFS_STATS) ++ seq_printf(m, ",STATS"); ++#endif ++#ifdef CONFIG_CIFS_DEBUG2 ++ seq_printf(m, ",DEBUG2"); ++#elif defined(CONFIG_CIFS_DEBUG) ++ seq_printf(m, ",DEBUG"); ++#endif ++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY ++ seq_printf(m, ",ALLOW_INSECURE_LEGACY"); + #endif + #ifdef CONFIG_CIFS_WEAK_PW_HASH +- seq_printf(m, " lanman"); ++ seq_printf(m, ",WEAK_PW_HASH"); + #endif + #ifdef CONFIG_CIFS_POSIX +- seq_printf(m, " posix"); ++ seq_printf(m, ",CIFS_POSIX"); + #endif + #ifdef CONFIG_CIFS_UPCALL +- seq_printf(m, " spnego"); ++ seq_printf(m, ",UPCALL(SPNEGO)"); + #endif + #ifdef CONFIG_CIFS_XATTR +- seq_printf(m, " xattr"); ++ seq_printf(m, ",XATTR"); + #endif + #ifdef CONFIG_CIFS_ACL +- seq_printf(m, " acl"); ++ seq_printf(m, ",ACL"); + #endif + seq_putc(m, '\n'); + seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid); +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c +index 24c19eb94fa3..a012f70bba5c 100644 +--- a/fs/cifs/inode.c ++++ b/fs/cifs/inode.c +@@ -1116,6 +1116,8 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, unsigned int xid, + if (!server->ops->set_file_info) + return -ENOSYS; + ++ info_buf.Pad = 0; ++ + if (attrs->ia_valid & ATTR_ATIME) { + set_time = true; + info_buf.LastAccessTime = +diff --git a/fs/cifs/link.c b/fs/cifs/link.c +index d031af8d3d4d..38d26cbcad07 100644 +--- a/fs/cifs/link.c ++++ b/fs/cifs/link.c +@@ -419,7 +419,7 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, + struct cifs_io_parms io_parms; + int buf_type = CIFS_NO_BUFFER; + __le16 *utf16_path; +- __u8 oplock = SMB2_OPLOCK_LEVEL_II; ++ __u8 oplock = SMB2_OPLOCK_LEVEL_NONE; + struct smb2_file_all_info *pfile_info = NULL; + + oparms.tcon = tcon; +@@ -481,7 +481,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, + struct cifs_io_parms io_parms; + int create_options = CREATE_NOT_DIR; + __le16 *utf16_path; +- __u8 oplock = SMB2_OPLOCK_LEVEL_EXCLUSIVE; ++ __u8 oplock = SMB2_OPLOCK_LEVEL_NONE; + struct kvec iov[2]; + + if (backup_cred(cifs_sb)) +diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c +index c3db2a882aee..bb208076cb71 100644 +--- a/fs/cifs/sess.c ++++ b/fs/cifs/sess.c +@@ -398,6 +398,12 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer, + goto setup_ntlmv2_ret; + } + *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL); ++ if (!*pbuffer) { ++ rc = -ENOMEM; ++ cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc); ++ *buflen = 0; ++ goto setup_ntlmv2_ret; ++ } + sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer; + + memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8); +diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c +index 1238cd3552f9..0267d8cbc996 100644 +--- a/fs/cifs/smb2inode.c ++++ b/fs/cifs/smb2inode.c +@@ -267,7 +267,7 @@ smb2_set_file_info(struct inode *inode, const char *full_path, + int rc; + + if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) && +- (buf->LastWriteTime == 0) && (buf->ChangeTime) && ++ (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) && + (buf->Attributes == 0)) + return 0; /* would be a no op, no sense sending this */ + +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c +index 812e4884c392..68622f1e706b 100644 +--- a/fs/cifs/smb2ops.c ++++ b/fs/cifs/smb2ops.c +@@ -894,6 +894,13 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon, + + } + ++/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */ ++#define GMT_TOKEN_SIZE 50 ++ ++/* ++ * Input buffer contains (empty) struct smb_snapshot array with size filled in ++ * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2 ++ */ + static int + smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon, + struct cifsFileInfo *cfile, void __user *ioc_buf) +@@ -922,14 +929,27 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon, + kfree(retbuf); + return rc; + } +- if (snapshot_in.snapshot_array_size < sizeof(struct smb_snapshot_array)) { +- rc = -ERANGE; +- kfree(retbuf); +- return rc; +- } + +- if (ret_data_len > snapshot_in.snapshot_array_size) +- ret_data_len = snapshot_in.snapshot_array_size; ++ /* ++ * Check for min size, ie not large enough to fit even one GMT ++ * token (snapshot). On the first ioctl some users may pass in ++ * smaller size (or zero) to simply get the size of the array ++ * so the user space caller can allocate sufficient memory ++ * and retry the ioctl again with larger array size sufficient ++ * to hold all of the snapshot GMT tokens on the second try. ++ */ ++ if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE) ++ ret_data_len = sizeof(struct smb_snapshot_array); ++ ++ /* ++ * We return struct SRV_SNAPSHOT_ARRAY, followed by ++ * the snapshot array (of 50 byte GMT tokens) each ++ * representing an available previous version of the data ++ */ ++ if (ret_data_len > (snapshot_in.snapshot_array_size + ++ sizeof(struct smb_snapshot_array))) ++ ret_data_len = snapshot_in.snapshot_array_size + ++ sizeof(struct smb_snapshot_array); + + if (copy_to_user(ioc_buf, retbuf, ret_data_len)) + rc = -EFAULT; +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c +index 248c43b63f13..a225a21d04ad 100644 +--- a/fs/ext4/namei.c ++++ b/fs/ext4/namei.c +@@ -1415,6 +1415,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir, + goto cleanup_and_exit; + dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, " + "falling back\n")); ++ ret = NULL; + } + nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); + if (!nblocks) { +diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c +index 5dc655e410b4..54942d60e72a 100644 +--- a/fs/ext4/sysfs.c ++++ b/fs/ext4/sysfs.c +@@ -277,8 +277,12 @@ static ssize_t ext4_attr_show(struct kobject *kobj, + case attr_pointer_ui: + if (!ptr) + return 0; +- return snprintf(buf, PAGE_SIZE, "%u\n", +- *((unsigned int *) ptr)); ++ if (a->attr_ptr == ptr_ext4_super_block_offset) ++ return snprintf(buf, PAGE_SIZE, "%u\n", ++ le32_to_cpup(ptr)); ++ else ++ return snprintf(buf, PAGE_SIZE, "%u\n", ++ *((unsigned int *) ptr)); + case attr_pointer_atomic: + if (!ptr) + return 0; +@@ -311,7 +315,10 @@ static ssize_t ext4_attr_store(struct kobject *kobj, + ret = kstrtoul(skip_spaces(buf), 0, &t); + if (ret) + return ret; +- *((unsigned int *) ptr) = t; ++ if (a->attr_ptr == ptr_ext4_super_block_offset) ++ *((__le32 *) ptr) = cpu_to_le32(t); ++ else ++ *((unsigned int *) ptr) = t; + return len; + case attr_inode_readahead: + return inode_readahead_blks_store(a, sbi, buf, len); +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c +index 3fadfabcac39..fdcbe0f2814f 100644 +--- a/fs/ext4/xattr.c ++++ b/fs/ext4/xattr.c +@@ -184,6 +184,8 @@ ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end, + struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e); + if ((void *)next >= end) + return -EFSCORRUPTED; ++ if (strnlen(e->e_name, e->e_name_len) != e->e_name_len) ++ return -EFSCORRUPTED; + e = next; + } + +diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c +index de67745e1cd7..77946d6f617d 100644 +--- a/fs/fscache/operation.c ++++ b/fs/fscache/operation.c +@@ -66,7 +66,8 @@ void fscache_enqueue_operation(struct fscache_operation *op) + ASSERT(op->processor != NULL); + ASSERT(fscache_object_is_available(op->object)); + ASSERTCMP(atomic_read(&op->usage), >, 0); +- ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS); ++ ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS, ++ op->state, ==, FSCACHE_OP_ST_CANCELLED); + + fscache_stat(&fscache_n_op_enqueue); + switch (op->flags & FSCACHE_OP_TYPE) { +@@ -481,7 +482,8 @@ void fscache_put_operation(struct fscache_operation *op) + struct fscache_cache *cache; + + _enter("{OBJ%x OP%x,%d}", +- op->object->debug_id, op->debug_id, atomic_read(&op->usage)); ++ op->object ? op->object->debug_id : 0, ++ op->debug_id, atomic_read(&op->usage)); + + ASSERTCMP(atomic_read(&op->usage), >, 0); + +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c +index f11792672977..c94bab6103f5 100644 +--- a/fs/fuse/dev.c ++++ b/fs/fuse/dev.c +@@ -130,6 +130,16 @@ static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) + return !fc->initialized || (for_background && fc->blocked); + } + ++static void fuse_drop_waiting(struct fuse_conn *fc) ++{ ++ if (fc->connected) { ++ atomic_dec(&fc->num_waiting); ++ } else if (atomic_dec_and_test(&fc->num_waiting)) { ++ /* wake up aborters */ ++ wake_up_all(&fc->blocked_waitq); ++ } ++} ++ + static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, + bool for_background) + { +@@ -170,7 +180,7 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, + return req; + + out: +- atomic_dec(&fc->num_waiting); ++ fuse_drop_waiting(fc); + return ERR_PTR(err); + } + +@@ -277,7 +287,7 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) + + if (test_bit(FR_WAITING, &req->flags)) { + __clear_bit(FR_WAITING, &req->flags); +- atomic_dec(&fc->num_waiting); ++ fuse_drop_waiting(fc); + } + + if (req->stolen_file) +@@ -363,7 +373,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) + struct fuse_iqueue *fiq = &fc->iq; + + if (test_and_set_bit(FR_FINISHED, &req->flags)) +- return; ++ goto put_request; + + spin_lock(&fiq->waitq.lock); + list_del_init(&req->intr_entry); +@@ -393,6 +403,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) + wake_up(&req->waitq); + if (req->end) + req->end(fc, req); ++put_request: + fuse_put_request(fc, req); + } + +@@ -1935,11 +1946,14 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, + if (!fud) + return -EPERM; + ++ pipe_lock(pipe); ++ + bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL); +- if (!bufs) ++ if (!bufs) { ++ pipe_unlock(pipe); + return -ENOMEM; ++ } + +- pipe_lock(pipe); + nbuf = 0; + rem = 0; + for (idx = 0; idx < pipe->nrbufs && rem < len; idx++) +@@ -2094,6 +2108,7 @@ void fuse_abort_conn(struct fuse_conn *fc) + set_bit(FR_ABORTED, &req->flags); + if (!test_bit(FR_LOCKED, &req->flags)) { + set_bit(FR_PRIVATE, &req->flags); ++ __fuse_get_request(req); + list_move(&req->list, &to_end1); + } + spin_unlock(&req->waitq.lock); +@@ -2120,7 +2135,6 @@ void fuse_abort_conn(struct fuse_conn *fc) + + while (!list_empty(&to_end1)) { + req = list_first_entry(&to_end1, struct fuse_req, list); +- __fuse_get_request(req); + list_del_init(&req->list); + request_end(fc, req); + } +@@ -2131,6 +2145,11 @@ void fuse_abort_conn(struct fuse_conn *fc) + } + EXPORT_SYMBOL_GPL(fuse_abort_conn); + ++void fuse_wait_aborted(struct fuse_conn *fc) ++{ ++ wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0); ++} ++ + int fuse_dev_release(struct inode *inode, struct file *file) + { + struct fuse_dev *fud = fuse_get_dev(file); +@@ -2138,9 +2157,15 @@ int fuse_dev_release(struct inode *inode, struct file *file) + if (fud) { + struct fuse_conn *fc = fud->fc; + struct fuse_pqueue *fpq = &fud->pq; ++ LIST_HEAD(to_end); + ++ spin_lock(&fpq->lock); + WARN_ON(!list_empty(&fpq->io)); +- end_requests(fc, &fpq->processing); ++ list_splice_init(&fpq->processing, &to_end); ++ spin_unlock(&fpq->lock); ++ ++ end_requests(fc, &to_end); ++ + /* Are we the last open device? */ + if (atomic_dec_and_test(&fc->dev_count)) { + WARN_ON(fc->iq.fasync != NULL); +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c +index cca8dd3bda09..60dd2bc10776 100644 +--- a/fs/fuse/dir.c ++++ b/fs/fuse/dir.c +@@ -355,11 +355,12 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, + struct inode *inode; + struct dentry *newent; + bool outarg_valid = true; ++ bool locked; + +- fuse_lock_inode(dir); ++ locked = fuse_lock_inode(dir); + err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name, + &outarg, &inode); +- fuse_unlock_inode(dir); ++ fuse_unlock_inode(dir, locked); + if (err == -ENOENT) { + outarg_valid = false; + err = 0; +@@ -1336,6 +1337,7 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx) + struct fuse_conn *fc = get_fuse_conn(inode); + struct fuse_req *req; + u64 attr_version = 0; ++ bool locked; + + if (is_bad_inode(inode)) + return -EIO; +@@ -1363,9 +1365,9 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx) + fuse_read_fill(req, file, ctx->pos, PAGE_SIZE, + FUSE_READDIR); + } +- fuse_lock_inode(inode); ++ locked = fuse_lock_inode(inode); + fuse_request_send(fc, req); +- fuse_unlock_inode(inode); ++ fuse_unlock_inode(inode, locked); + nbytes = req->out.args[0].size; + err = req->out.h.error; + fuse_put_request(fc, req); +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index 996aa23c409e..4408abf6675b 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -868,6 +868,7 @@ static int fuse_readpages_fill(void *_data, struct page *page) + } + + if (WARN_ON(req->num_pages >= req->max_pages)) { ++ unlock_page(page); + fuse_put_request(fc, req); + return -EIO; + } +diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h +index 91307940c8ac..1c905c7666de 100644 +--- a/fs/fuse/fuse_i.h ++++ b/fs/fuse/fuse_i.h +@@ -854,6 +854,7 @@ void fuse_request_send_background_locked(struct fuse_conn *fc, + + /* Abort all requests */ + void fuse_abort_conn(struct fuse_conn *fc); ++void fuse_wait_aborted(struct fuse_conn *fc); + + /** + * Invalidate inode attributes +@@ -967,8 +968,8 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr, + + void fuse_set_initialized(struct fuse_conn *fc); + +-void fuse_unlock_inode(struct inode *inode); +-void fuse_lock_inode(struct inode *inode); ++void fuse_unlock_inode(struct inode *inode, bool locked); ++bool fuse_lock_inode(struct inode *inode); + + int fuse_setxattr(struct inode *inode, const char *name, const void *value, + size_t size, int flags); +diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c +index f95e1d49b048..7a9b1069d267 100644 +--- a/fs/fuse/inode.c ++++ b/fs/fuse/inode.c +@@ -356,15 +356,21 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, + return 0; + } + +-void fuse_lock_inode(struct inode *inode) ++bool fuse_lock_inode(struct inode *inode) + { +- if (!get_fuse_conn(inode)->parallel_dirops) ++ bool locked = false; ++ ++ if (!get_fuse_conn(inode)->parallel_dirops) { + mutex_lock(&get_fuse_inode(inode)->mutex); ++ locked = true; ++ } ++ ++ return locked; + } + +-void fuse_unlock_inode(struct inode *inode) ++void fuse_unlock_inode(struct inode *inode, bool locked) + { +- if (!get_fuse_conn(inode)->parallel_dirops) ++ if (locked) + mutex_unlock(&get_fuse_inode(inode)->mutex); + } + +@@ -396,9 +402,6 @@ static void fuse_put_super(struct super_block *sb) + { + struct fuse_conn *fc = get_fuse_conn_super(sb); + +- fuse_send_destroy(fc); +- +- fuse_abort_conn(fc); + mutex_lock(&fuse_mutex); + list_del(&fc->entry); + fuse_ctl_remove_conn(fc); +@@ -1198,16 +1201,25 @@ static struct dentry *fuse_mount(struct file_system_type *fs_type, + return mount_nodev(fs_type, flags, raw_data, fuse_fill_super); + } + +-static void fuse_kill_sb_anon(struct super_block *sb) ++static void fuse_sb_destroy(struct super_block *sb) + { + struct fuse_conn *fc = get_fuse_conn_super(sb); + + if (fc) { ++ fuse_send_destroy(fc); ++ ++ fuse_abort_conn(fc); ++ fuse_wait_aborted(fc); ++ + down_write(&fc->killsb); + fc->sb = NULL; + up_write(&fc->killsb); + } ++} + ++static void fuse_kill_sb_anon(struct super_block *sb) ++{ ++ fuse_sb_destroy(sb); + kill_anon_super(sb); + } + +@@ -1230,14 +1242,7 @@ static struct dentry *fuse_mount_blk(struct file_system_type *fs_type, + + static void fuse_kill_sb_blk(struct super_block *sb) + { +- struct fuse_conn *fc = get_fuse_conn_super(sb); +- +- if (fc) { +- down_write(&fc->killsb); +- fc->sb = NULL; +- up_write(&fc->killsb); +- } +- ++ fuse_sb_destroy(sb); + kill_block_super(sb); + } + +diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c +index fcff2e0487fe..f1c1430ae721 100644 +--- a/fs/squashfs/file.c ++++ b/fs/squashfs/file.c +@@ -374,13 +374,29 @@ static int read_blocklist(struct inode *inode, int index, u64 *block) + return squashfs_block_size(size); + } + ++void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail) ++{ ++ int copied; ++ void *pageaddr; ++ ++ pageaddr = kmap_atomic(page); ++ copied = squashfs_copy_data(pageaddr, buffer, offset, avail); ++ memset(pageaddr + copied, 0, PAGE_SIZE - copied); ++ kunmap_atomic(pageaddr); ++ ++ flush_dcache_page(page); ++ if (copied == avail) ++ SetPageUptodate(page); ++ else ++ SetPageError(page); ++} ++ + /* Copy data into page cache */ + void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, + int bytes, int offset) + { + struct inode *inode = page->mapping->host; + struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; +- void *pageaddr; + int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; + int start_index = page->index & ~mask, end_index = start_index | mask; + +@@ -406,12 +422,7 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, + if (PageUptodate(push_page)) + goto skip_page; + +- pageaddr = kmap_atomic(push_page); +- squashfs_copy_data(pageaddr, buffer, offset, avail); +- memset(pageaddr + avail, 0, PAGE_SIZE - avail); +- kunmap_atomic(pageaddr); +- flush_dcache_page(push_page); +- SetPageUptodate(push_page); ++ squashfs_fill_page(push_page, buffer, offset, avail); + skip_page: + unlock_page(push_page); + if (i != page->index) +@@ -420,10 +431,9 @@ skip_page: + } + + /* Read datablock stored packed inside a fragment (tail-end packed block) */ +-static int squashfs_readpage_fragment(struct page *page) ++static int squashfs_readpage_fragment(struct page *page, int expected) + { + struct inode *inode = page->mapping->host; +- struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; + struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, + squashfs_i(inode)->fragment_block, + squashfs_i(inode)->fragment_size); +@@ -434,23 +444,16 @@ static int squashfs_readpage_fragment(struct page *page) + squashfs_i(inode)->fragment_block, + squashfs_i(inode)->fragment_size); + else +- squashfs_copy_cache(page, buffer, i_size_read(inode) & +- (msblk->block_size - 1), ++ squashfs_copy_cache(page, buffer, expected, + squashfs_i(inode)->fragment_offset); + + squashfs_cache_put(buffer); + return res; + } + +-static int squashfs_readpage_sparse(struct page *page, int index, int file_end) ++static int squashfs_readpage_sparse(struct page *page, int expected) + { +- struct inode *inode = page->mapping->host; +- struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; +- int bytes = index == file_end ? +- (i_size_read(inode) & (msblk->block_size - 1)) : +- msblk->block_size; +- +- squashfs_copy_cache(page, NULL, bytes, 0); ++ squashfs_copy_cache(page, NULL, expected, 0); + return 0; + } + +@@ -460,6 +463,9 @@ static int squashfs_readpage(struct file *file, struct page *page) + struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; + int index = page->index >> (msblk->block_log - PAGE_SHIFT); + int file_end = i_size_read(inode) >> msblk->block_log; ++ int expected = index == file_end ? ++ (i_size_read(inode) & (msblk->block_size - 1)) : ++ msblk->block_size; + int res; + void *pageaddr; + +@@ -478,11 +484,11 @@ static int squashfs_readpage(struct file *file, struct page *page) + goto error_out; + + if (bsize == 0) +- res = squashfs_readpage_sparse(page, index, file_end); ++ res = squashfs_readpage_sparse(page, expected); + else +- res = squashfs_readpage_block(page, block, bsize); ++ res = squashfs_readpage_block(page, block, bsize, expected); + } else +- res = squashfs_readpage_fragment(page); ++ res = squashfs_readpage_fragment(page, expected); + + if (!res) + return 0; +diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c +index f2310d2a2019..a9ba8d96776a 100644 +--- a/fs/squashfs/file_cache.c ++++ b/fs/squashfs/file_cache.c +@@ -20,7 +20,7 @@ + #include "squashfs.h" + + /* Read separately compressed datablock and memcopy into page cache */ +-int squashfs_readpage_block(struct page *page, u64 block, int bsize) ++int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected) + { + struct inode *i = page->mapping->host; + struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, +@@ -31,7 +31,7 @@ int squashfs_readpage_block(struct page *page, u64 block, int bsize) + ERROR("Unable to read page, block %llx, size %x\n", block, + bsize); + else +- squashfs_copy_cache(page, buffer, buffer->length, 0); ++ squashfs_copy_cache(page, buffer, expected, 0); + + squashfs_cache_put(buffer); + return res; +diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c +index cb485d8e0e91..80db1b86a27c 100644 +--- a/fs/squashfs/file_direct.c ++++ b/fs/squashfs/file_direct.c +@@ -21,10 +21,11 @@ + #include "page_actor.h" + + static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, +- int pages, struct page **page); ++ int pages, struct page **page, int bytes); + + /* Read separately compressed datablock directly into page cache */ +-int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) ++int squashfs_readpage_block(struct page *target_page, u64 block, int bsize, ++ int expected) + + { + struct inode *inode = target_page->mapping->host; +@@ -83,7 +84,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) + * using an intermediate buffer. + */ + res = squashfs_read_cache(target_page, block, bsize, pages, +- page); ++ page, expected); + if (res < 0) + goto mark_errored; + +@@ -95,6 +96,11 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) + if (res < 0) + goto mark_errored; + ++ if (res != expected) { ++ res = -EIO; ++ goto mark_errored; ++ } ++ + /* Last page may have trailing bytes not filled */ + bytes = res % PAGE_SIZE; + if (bytes) { +@@ -138,13 +144,12 @@ out: + + + static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, +- int pages, struct page **page) ++ int pages, struct page **page, int bytes) + { + struct inode *i = target_page->mapping->host; + struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, + block, bsize); +- int bytes = buffer->length, res = buffer->error, n, offset = 0; +- void *pageaddr; ++ int res = buffer->error, n, offset = 0; + + if (res) { + ERROR("Unable to read page, block %llx, size %x\n", block, +@@ -159,12 +164,7 @@ static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, + if (page[n] == NULL) + continue; + +- pageaddr = kmap_atomic(page[n]); +- squashfs_copy_data(pageaddr, buffer, offset, avail); +- memset(pageaddr + avail, 0, PAGE_SIZE - avail); +- kunmap_atomic(pageaddr); +- flush_dcache_page(page[n]); +- SetPageUptodate(page[n]); ++ squashfs_fill_page(page[n], buffer, offset, avail); + unlock_page(page[n]); + if (page[n] != target_page) + put_page(page[n]); +diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h +index 887d6d270080..f89f8a74c6ce 100644 +--- a/fs/squashfs/squashfs.h ++++ b/fs/squashfs/squashfs.h +@@ -67,11 +67,12 @@ extern __le64 *squashfs_read_fragment_index_table(struct super_block *, + u64, u64, unsigned int); + + /* file.c */ ++void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int); + void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int, + int); + + /* file_xxx.c */ +-extern int squashfs_readpage_block(struct page *, u64, int); ++extern int squashfs_readpage_block(struct page *, u64, int, int); + + /* id.c */ + extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *); +diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c +index 39c75a86c67f..666986b95c5d 100644 +--- a/fs/sysfs/file.c ++++ b/fs/sysfs/file.c +@@ -407,6 +407,50 @@ int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr, + } + EXPORT_SYMBOL_GPL(sysfs_chmod_file); + ++/** ++ * sysfs_break_active_protection - break "active" protection ++ * @kobj: The kernel object @attr is associated with. ++ * @attr: The attribute to break the "active" protection for. ++ * ++ * With sysfs, just like kernfs, deletion of an attribute is postponed until ++ * all active .show() and .store() callbacks have finished unless this function ++ * is called. Hence this function is useful in methods that implement self ++ * deletion. ++ */ ++struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, ++ const struct attribute *attr) ++{ ++ struct kernfs_node *kn; ++ ++ kobject_get(kobj); ++ kn = kernfs_find_and_get(kobj->sd, attr->name); ++ if (kn) ++ kernfs_break_active_protection(kn); ++ return kn; ++} ++EXPORT_SYMBOL_GPL(sysfs_break_active_protection); ++ ++/** ++ * sysfs_unbreak_active_protection - restore "active" protection ++ * @kn: Pointer returned by sysfs_break_active_protection(). ++ * ++ * Undo the effects of sysfs_break_active_protection(). Since this function ++ * calls kernfs_put() on the kernfs node that corresponds to the 'attr' ++ * argument passed to sysfs_break_active_protection() that attribute may have ++ * been removed between the sysfs_break_active_protection() and ++ * sysfs_unbreak_active_protection() calls, it is not safe to access @kn after ++ * this function has returned. ++ */ ++void sysfs_unbreak_active_protection(struct kernfs_node *kn) ++{ ++ struct kobject *kobj = kn->parent->priv; ++ ++ kernfs_unbreak_active_protection(kn); ++ kernfs_put(kn); ++ kobject_put(kobj); ++} ++EXPORT_SYMBOL_GPL(sysfs_unbreak_active_protection); ++ + /** + * sysfs_remove_file_ns - remove an object attribute with a custom ns tag + * @kobj: object we're acting for +diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h +index 00a1f330f93a..d3c19f8c4564 100644 +--- a/include/linux/sysfs.h ++++ b/include/linux/sysfs.h +@@ -238,6 +238,9 @@ int __must_check sysfs_create_files(struct kobject *kobj, + const struct attribute **attr); + int __must_check sysfs_chmod_file(struct kobject *kobj, + const struct attribute *attr, umode_t mode); ++struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, ++ const struct attribute *attr); ++void sysfs_unbreak_active_protection(struct kernfs_node *kn); + void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, + const void *ns); + bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr); +@@ -351,6 +354,17 @@ static inline int sysfs_chmod_file(struct kobject *kobj, + return 0; + } + ++static inline struct kernfs_node * ++sysfs_break_active_protection(struct kobject *kobj, ++ const struct attribute *attr) ++{ ++ return NULL; ++} ++ ++static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn) ++{ ++} ++ + static inline void sysfs_remove_file_ns(struct kobject *kobj, + const struct attribute *attr, + const void *ns) +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index 69485183af79..b9e966bcdd20 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -2441,7 +2441,7 @@ static int __init debugfs_kprobe_init(void) + if (!dir) + return -ENOMEM; + +- file = debugfs_create_file("list", 0444, dir, NULL, ++ file = debugfs_create_file("list", 0400, dir, NULL, + &debugfs_kprobes_operations); + if (!file) + goto error; +@@ -2451,7 +2451,7 @@ static int __init debugfs_kprobe_init(void) + if (!file) + goto error; + +- file = debugfs_create_file("blacklist", 0444, dir, NULL, ++ file = debugfs_create_file("blacklist", 0400, dir, NULL, + &debugfs_kprobe_blacklist_ops); + if (!file) + goto error; +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index 24d603d29512..7df6be31be36 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -345,7 +345,8 @@ static struct ctl_table kern_table[] = { + .data = &sysctl_sched_time_avg, + .maxlen = sizeof(unsigned int), + .mode = 0644, +- .proc_handler = proc_dointvec, ++ .proc_handler = proc_dointvec_minmax, ++ .extra1 = &one, + }, + { + .procname = "sched_shares_window_ns", +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index 349f4a8e3c4f..86a6b331b964 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -4072,6 +4072,14 @@ static struct cftype mem_cgroup_legacy_files[] = { + + static DEFINE_IDR(mem_cgroup_idr); + ++static void mem_cgroup_id_remove(struct mem_cgroup *memcg) ++{ ++ if (memcg->id.id > 0) { ++ idr_remove(&mem_cgroup_idr, memcg->id.id); ++ memcg->id.id = 0; ++ } ++} ++ + static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) + { + VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0); +@@ -4082,8 +4090,7 @@ static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) + { + VM_BUG_ON(atomic_read(&memcg->id.ref) < n); + if (atomic_sub_and_test(n, &memcg->id.ref)) { +- idr_remove(&mem_cgroup_idr, memcg->id.id); +- memcg->id.id = 0; ++ mem_cgroup_id_remove(memcg); + + /* Memcg ID pins CSS */ + css_put(&memcg->css); +@@ -4208,8 +4215,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void) + idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); + return memcg; + fail: +- if (memcg->id.id > 0) +- idr_remove(&mem_cgroup_idr, memcg->id.id); ++ mem_cgroup_id_remove(memcg); + __mem_cgroup_free(memcg); + return NULL; + } +@@ -4268,6 +4274,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) + + return &memcg->css; + fail: ++ mem_cgroup_id_remove(memcg); + mem_cgroup_free(memcg); + return ERR_PTR(-ENOMEM); + } +diff --git a/mm/memory.c b/mm/memory.c +index 88f8d6a2af05..0ff735601654 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -3861,6 +3861,9 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, + return -EINVAL; + + maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); ++ if (!maddr) ++ return -ENOMEM; ++ + if (write) + memcpy_toio(maddr + offset, buf, len); + else +diff --git a/mm/zswap.c b/mm/zswap.c +index ded051e3433d..c2b5435fe617 100644 +--- a/mm/zswap.c ++++ b/mm/zswap.c +@@ -1018,6 +1018,15 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, + ret = -ENOMEM; + goto reject; + } ++ ++ /* A second zswap_is_full() check after ++ * zswap_shrink() to make sure it's now ++ * under the max_pool_percent ++ */ ++ if (zswap_is_full()) { ++ ret = -ENOMEM; ++ goto reject; ++ } + } + + /* allocate entry */ +diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c +index d730a0f68f46..a0443d40d677 100644 +--- a/net/caif/caif_dev.c ++++ b/net/caif/caif_dev.c +@@ -131,8 +131,10 @@ static void caif_flow_cb(struct sk_buff *skb) + caifd = caif_get(skb->dev); + + WARN_ON(caifd == NULL); +- if (caifd == NULL) ++ if (!caifd) { ++ rcu_read_unlock(); + return; ++ } + + caifd_hold(caifd); + rcu_read_unlock(); +diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c +index 972353cd1778..65a15889d432 100644 +--- a/net/ipv4/cipso_ipv4.c ++++ b/net/ipv4/cipso_ipv4.c +@@ -1523,9 +1523,17 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb) + int taglen; + + for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) { +- if (optptr[0] == IPOPT_CIPSO) ++ switch (optptr[0]) { ++ case IPOPT_CIPSO: + return optptr; +- taglen = optptr[1]; ++ case IPOPT_END: ++ return NULL; ++ case IPOPT_NOOP: ++ taglen = 1; ++ break; ++ default: ++ taglen = optptr[1]; ++ } + optlen -= taglen; + optptr += taglen; + } +diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c +index beae93fd66d5..a5aeeb613fac 100644 +--- a/net/ipv6/ip6_vti.c ++++ b/net/ipv6/ip6_vti.c +@@ -480,10 +480,6 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) + goto tx_err_dst_release; + } + +- skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); +- skb_dst_set(skb, dst); +- skb->dev = skb_dst(skb)->dev; +- + mtu = dst_mtu(dst); + if (!skb->ignore_df && skb->len > mtu) { + skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu); +@@ -498,9 +494,14 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) + htonl(mtu)); + } + +- return -EMSGSIZE; ++ err = -EMSGSIZE; ++ goto tx_err_dst_release; + } + ++ skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); ++ skb_dst_set(skb, dst); ++ skb->dev = skb_dst(skb)->dev; ++ + err = dst_output(t->net, skb->sk, skb); + if (net_xmit_eval(err) == 0) { + struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); +diff --git a/net/mac80211/util.c b/net/mac80211/util.c +index a2756096b94a..ca7de02e0a6e 100644 +--- a/net/mac80211/util.c ++++ b/net/mac80211/util.c +@@ -2061,7 +2061,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) + if (!sta->uploaded) + continue; + +- if (sta->sdata->vif.type != NL80211_IFTYPE_AP) ++ if (sta->sdata->vif.type != NL80211_IFTYPE_AP && ++ sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN) + continue; + + for (state = IEEE80211_STA_NOTEXIST; +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c +index 5b75468b5acd..146d83785b37 100644 +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -4058,6 +4058,7 @@ static int parse_station_flags(struct genl_info *info, + params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) | + BIT(NL80211_STA_FLAG_MFP) | + BIT(NL80211_STA_FLAG_AUTHORIZED); ++ break; + default: + return -EINVAL; + } +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 5b8fa6832687..1f943d97dc29 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -2354,6 +2354,9 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, + if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) + return make_blackhole(net, dst_orig->ops->family, dst_orig); + ++ if (IS_ERR(dst)) ++ dst_release(dst_orig); ++ + return dst; + } + EXPORT_SYMBOL(xfrm_lookup_route); +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c +index bb61956c0f9c..6e768093d7c8 100644 +--- a/net/xfrm/xfrm_user.c ++++ b/net/xfrm/xfrm_user.c +@@ -984,10 +984,12 @@ static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb, + { + struct sock *nlsk = rcu_dereference(net->xfrm.nlsk); + +- if (nlsk) +- return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); +- else +- return -1; ++ if (!nlsk) { ++ kfree_skb(skb); ++ return -EPIPE; ++ } ++ ++ return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); + } + + static inline size_t xfrm_spdinfo_msgsize(void) +diff --git a/sound/soc/sirf/sirf-usp.c b/sound/soc/sirf/sirf-usp.c +index 45fc06c0e0e5..6b504f407079 100644 +--- a/sound/soc/sirf/sirf-usp.c ++++ b/sound/soc/sirf/sirf-usp.c +@@ -367,10 +367,9 @@ static int sirf_usp_pcm_probe(struct platform_device *pdev) + platform_set_drvdata(pdev, usp); + + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +- base = devm_ioremap(&pdev->dev, mem_res->start, +- resource_size(mem_res)); +- if (base == NULL) +- return -ENOMEM; ++ base = devm_ioremap_resource(&pdev->dev, mem_res); ++ if (IS_ERR(base)) ++ return PTR_ERR(base); + usp->regmap = devm_regmap_init_mmio(&pdev->dev, base, + &sirf_usp_regmap_config); + if (IS_ERR(usp->regmap)) +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c +index 20680a490897..b111ecda6439 100644 +--- a/sound/soc/soc-pcm.c ++++ b/sound/soc/soc-pcm.c +@@ -1621,6 +1621,14 @@ static u64 dpcm_runtime_base_format(struct snd_pcm_substream *substream) + int i; + + for (i = 0; i < be->num_codecs; i++) { ++ /* ++ * Skip CODECs which don't support the current stream ++ * type. See soc_pcm_init_runtime_hw() for more details ++ */ ++ if (!snd_soc_dai_stream_valid(be->codec_dais[i], ++ stream)) ++ continue; ++ + codec_dai_drv = be->codec_dais[i]->driver; + if (stream == SNDRV_PCM_STREAM_PLAYBACK) + codec_stream = &codec_dai_drv->playback; +diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c +index 9664b1ff4285..5ec2de8f49b4 100644 +--- a/tools/power/x86/turbostat/turbostat.c ++++ b/tools/power/x86/turbostat/turbostat.c +@@ -733,9 +733,7 @@ void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_ + if (!printed || !summary_only) + print_header(); + +- if (topo.num_cpus > 1) +- format_counters(&average.threads, &average.cores, +- &average.packages); ++ format_counters(&average.threads, &average.cores, &average.packages); + + printed = 1; + +@@ -3202,7 +3200,9 @@ void process_cpuid() + family = (fms >> 8) & 0xf; + model = (fms >> 4) & 0xf; + stepping = fms & 0xf; +- if (family == 6 || family == 0xf) ++ if (family == 0xf) ++ family += (fms >> 20) & 0xff; ++ if (family >= 6) + model += ((fms >> 16) & 0xf) << 4; + + if (debug) { +diff --git a/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc +new file mode 100644 +index 000000000000..3b1f45e13a2e +--- /dev/null ++++ b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc +@@ -0,0 +1,28 @@ ++#!/bin/sh ++# description: Snapshot and tracing setting ++# flags: instance ++ ++[ ! -f snapshot ] && exit_unsupported ++ ++echo "Set tracing off" ++echo 0 > tracing_on ++ ++echo "Allocate and take a snapshot" ++echo 1 > snapshot ++ ++# Since trace buffer is empty, snapshot is also empty, but allocated ++grep -q "Snapshot is allocated" snapshot ++ ++echo "Ensure keep tracing off" ++test `cat tracing_on` -eq 0 ++ ++echo "Set tracing on" ++echo 1 > tracing_on ++ ++echo "Take a snapshot again" ++echo 1 > snapshot ++ ++echo "Ensure keep tracing on" ++test `cat tracing_on` -eq 1 ++ ++exit 0 +diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c +index 88d5e71be044..47dfa0b0fcd7 100644 +--- a/tools/usb/ffs-test.c ++++ b/tools/usb/ffs-test.c +@@ -44,12 +44,25 @@ + + /******************** Little Endian Handling ********************************/ + +-#define cpu_to_le16(x) htole16(x) +-#define cpu_to_le32(x) htole32(x) ++/* ++ * cpu_to_le16/32 are used when initializing structures, a context where a ++ * function call is not allowed. To solve this, we code cpu_to_le16/32 in a way ++ * that allows them to be used when initializing structures. ++ */ ++ ++#if __BYTE_ORDER == __LITTLE_ENDIAN ++#define cpu_to_le16(x) (x) ++#define cpu_to_le32(x) (x) ++#else ++#define cpu_to_le16(x) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)) ++#define cpu_to_le32(x) \ ++ ((((x) & 0xff000000u) >> 24) | (((x) & 0x00ff0000u) >> 8) | \ ++ (((x) & 0x0000ff00u) << 8) | (((x) & 0x000000ffu) << 24)) ++#endif ++ + #define le32_to_cpu(x) le32toh(x) + #define le16_to_cpu(x) le16toh(x) + +- + /******************** Messages and Errors ***********************************/ + + static const char argv0[] = "ffs-test"; diff --git a/omitted-patches/omit-4.9.125.patch b/omitted-patches/omit-4.9.125.patch new file mode 100644 index 0000000..14c412e --- /dev/null +++ b/omitted-patches/omit-4.9.125.patch @@ -0,0 +1,307 @@ +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c +index 7f868d9bb5ed..b3d268a79f05 100644 +--- a/arch/arm/kvm/mmu.c ++++ b/arch/arm/kvm/mmu.c +@@ -894,19 +894,35 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache + pmd = stage2_get_pmd(kvm, cache, addr); + VM_BUG_ON(!pmd); + +- /* +- * Mapping in huge pages should only happen through a fault. If a +- * page is merged into a transparent huge page, the individual +- * subpages of that huge page should be unmapped through MMU +- * notifiers before we get here. +- * +- * Merging of CompoundPages is not supported; they should become +- * splitting first, unmapped, merged, and mapped back in on-demand. +- */ +- VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd)); +- + old_pmd = *pmd; + if (pmd_present(old_pmd)) { ++ /* ++ * Multiple vcpus faulting on the same PMD entry, can ++ * lead to them sequentially updating the PMD with the ++ * same value. Following the break-before-make ++ * (pmd_clear() followed by tlb_flush()) process can ++ * hinder forward progress due to refaults generated ++ * on missing translations. ++ * ++ * Skip updating the page table if the entry is ++ * unchanged. ++ */ ++ if (pmd_val(old_pmd) == pmd_val(*new_pmd)) ++ return 0; ++ ++ /* ++ * Mapping in huge pages should only happen through a ++ * fault. If a page is merged into a transparent huge ++ * page, the individual subpages of that huge page ++ * should be unmapped through MMU notifiers before we ++ * get here. ++ * ++ * Merging of CompoundPages is not supported; they ++ * should become splitting first, unmapped, merged, ++ * and mapped back in on-demand. ++ */ ++ VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd)); ++ + pmd_clear(pmd); + kvm_tlb_flush_vmid_ipa(kvm, addr); + } else { +@@ -962,6 +978,10 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + /* Create 2nd stage page table mapping - Level 3 */ + old_pte = *pte; + if (pte_present(old_pte)) { ++ /* Skip page table update if there is no change */ ++ if (pte_val(old_pte) == pte_val(*new_pte)) ++ return 0; ++ + kvm_set_pte(pte, __pte(0)); + kvm_tlb_flush_vmid_ipa(kvm, addr); + } else { +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c +index 9d07b421f090..fa6b2fad7a3d 100644 +--- a/arch/arm64/mm/init.c ++++ b/arch/arm64/mm/init.c +@@ -147,7 +147,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) + #ifdef CONFIG_HAVE_ARCH_PFN_VALID + int pfn_valid(unsigned long pfn) + { +- return memblock_is_map_memory(pfn << PAGE_SHIFT); ++ phys_addr_t addr = pfn << PAGE_SHIFT; ++ ++ if ((addr >> PAGE_SHIFT) != pfn) ++ return 0; ++ return memblock_is_map_memory(addr); + } + EXPORT_SYMBOL(pfn_valid); + #endif +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index ac67a76550bd..8103adacbc83 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -651,6 +651,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation); + enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; + EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); + ++/* ++ * These CPUs all support 44bits physical address space internally in the ++ * cache but CPUID can report a smaller number of physical address bits. ++ * ++ * The L1TF mitigation uses the top most address bit for the inversion of ++ * non present PTEs. When the installed memory reaches into the top most ++ * address bit due to memory holes, which has been observed on machines ++ * which report 36bits physical address bits and have 32G RAM installed, ++ * then the mitigation range check in l1tf_select_mitigation() triggers. ++ * This is a false positive because the mitigation is still possible due to ++ * the fact that the cache uses 44bit internally. Use the cache bits ++ * instead of the reported physical bits and adjust them on the affected ++ * machines to 44bit if the reported bits are less than 44. ++ */ ++static void override_cache_bits(struct cpuinfo_x86 *c) ++{ ++ if (c->x86 != 6) ++ return; ++ ++ switch (c->x86_model) { ++ case INTEL_FAM6_NEHALEM: ++ case INTEL_FAM6_WESTMERE: ++ case INTEL_FAM6_SANDYBRIDGE: ++ case INTEL_FAM6_IVYBRIDGE: ++ case INTEL_FAM6_HASWELL_CORE: ++ case INTEL_FAM6_HASWELL_ULT: ++ case INTEL_FAM6_HASWELL_GT3E: ++ case INTEL_FAM6_BROADWELL_CORE: ++ case INTEL_FAM6_BROADWELL_GT3E: ++ case INTEL_FAM6_SKYLAKE_MOBILE: ++ case INTEL_FAM6_SKYLAKE_DESKTOP: ++ case INTEL_FAM6_KABYLAKE_MOBILE: ++ case INTEL_FAM6_KABYLAKE_DESKTOP: ++ if (c->x86_cache_bits < 44) ++ c->x86_cache_bits = 44; ++ break; ++ } ++} ++ + static void __init l1tf_select_mitigation(void) + { + u64 half_pa; +@@ -658,6 +697,8 @@ static void __init l1tf_select_mitigation(void) + if (!boot_cpu_has_bug(X86_BUG_L1TF)) + return; + ++ override_cache_bits(&boot_cpu_data); ++ + switch (l1tf_mitigation) { + case L1TF_MITIGATION_OFF: + case L1TF_MITIGATION_FLUSH_NOWARN: +@@ -677,14 +718,13 @@ static void __init l1tf_select_mitigation(void) + return; + #endif + +- /* +- * This is extremely unlikely to happen because almost all +- * systems have far more MAX_PA/2 than RAM can be fit into +- * DIMM slots. +- */ + half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; + if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) { + pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); ++ pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", ++ half_pa); ++ pr_info("However, doing so will make a part of your RAM unusable.\n"); ++ pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n"); + return; + } + +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 13471b71bec7..dc0850bb74be 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -882,6 +882,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) + } + } + #endif ++ c->x86_cache_bits = c->x86_phys_bits; + } + + static const __initconst struct x86_cpu_id cpu_no_speculation[] = { +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c +index 9ad86c4bf360..cee0fec0d232 100644 +--- a/arch/x86/kernel/cpu/intel.c ++++ b/arch/x86/kernel/cpu/intel.c +@@ -109,6 +109,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c) + if (cpu_has(c, X86_FEATURE_HYPERVISOR)) + return false; + ++ if (c->x86 != 6) ++ return false; ++ + for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { + if (c->x86_model == spectre_bad_microcodes[i].model && + c->x86_stepping == spectre_bad_microcodes[i].stepping) +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index c855080c7a71..5f44d63a9d69 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -4973,8 +4973,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) + + clgi(); + +- local_irq_enable(); +- + /* + * If this vCPU has touched SPEC_CTRL, restore the guest's value if + * it's non-zero. Since vmentry is serialising on affected CPUs, there +@@ -4983,6 +4981,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) + */ + x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); + ++ local_irq_enable(); ++ + asm volatile ( + "push %%" _ASM_BP "; \n\t" + "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t" +@@ -5105,12 +5105,12 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) + if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) + svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); + +- x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); +- + reload_tss(vcpu); + + local_irq_disable(); + ++ x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); ++ + vcpu->arch.cr2 = svm->vmcb->save.cr2; + vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; + vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 12826607a995..8e4ac0a91309 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -8670,9 +8670,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) + * information but as all relevant affected CPUs have 32KiB L1D cache size + * there is no point in doing so. + */ +-#define L1D_CACHE_ORDER 4 +-static void *vmx_l1d_flush_pages; +- + static void vmx_l1d_flush(struct kvm_vcpu *vcpu) + { + int size = PAGE_SIZE << L1D_CACHE_ORDER; +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c +index 5d35b555115a..90801a8f19c9 100644 +--- a/arch/x86/mm/init.c ++++ b/arch/x86/mm/init.c +@@ -792,7 +792,7 @@ unsigned long max_swapfile_size(void) + + if (boot_cpu_has_bug(X86_BUG_L1TF)) { + /* Limit the swap file size to MAX_PA/2 for L1TF workaround */ +- unsigned long l1tf_limit = l1tf_pfn_limit() + 1; ++ unsigned long long l1tf_limit = l1tf_pfn_limit(); + /* + * We encode swap offsets also with 3 bits below those for pfn + * which makes the usable limit higher. +@@ -800,7 +800,7 @@ unsigned long max_swapfile_size(void) + #if CONFIG_PGTABLE_LEVELS > 2 + l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT; + #endif +- pages = min_t(unsigned long, l1tf_limit, pages); ++ pages = min_t(unsigned long long, l1tf_limit, pages); + } + return pages; + } +diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c +index 5aad869fa205..74609a957c49 100644 +--- a/arch/x86/mm/mmap.c ++++ b/arch/x86/mm/mmap.c +@@ -138,7 +138,7 @@ bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) + /* If it's real memory always allow */ + if (pfn_valid(pfn)) + return true; +- if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN)) ++ if (pfn >= l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN)) + return false; + return true; + } +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c +index 949a871e9506..8bd25aebf488 100644 +--- a/arch/s390/net/bpf_jit_comp.c ++++ b/arch/s390/net/bpf_jit_comp.c +@@ -517,8 +517,6 @@ static void bpf_jit_epilogue(struct bpf_jit *jit) + /* br %r1 */ + _EMIT2(0x07f1); + } else { +- /* larl %r1,.+14 */ +- EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14); + /* ex 0,S390_lowcore.br_r1_tampoline */ + EMIT4_DISP(0x44000000, REG_0, REG_0, + offsetof(struct lowcore, br_r1_trampoline)); +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h +index d5525a7e119e..ee8c6290c421 100644 +--- a/arch/x86/include/asm/processor.h ++++ b/arch/x86/include/asm/processor.h +@@ -136,6 +136,8 @@ struct cpuinfo_x86 { + /* Index into per_cpu list: */ + u16 cpu_index; + u32 microcode; ++ /* Address space bits used by the cache internally */ ++ u8 x86_cache_bits; + }; + + #define X86_VENDOR_INTEL 0 +@@ -173,9 +175,9 @@ extern const struct seq_operations cpuinfo_op; + + extern void cpu_detect(struct cpuinfo_x86 *c); + +-static inline unsigned long l1tf_pfn_limit(void) ++static inline unsigned long long l1tf_pfn_limit(void) + { +- return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1; ++ return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT); + } + + extern void early_cpu_init(void); diff --git a/omitted-patches/omit-patches.sh b/omitted-patches/omit-patches.sh index f081e6e..3f1c9da 100755 --- a/omitted-patches/omit-patches.sh +++ b/omitted-patches/omit-patches.sh @@ -4,6 +4,7 @@ # ADD NEWEST FIRST +patch -F 0 -R -p1 < ../../omitted-patches/omit-4.9.125.patch patch -F 0 -R -p1 < ../../omitted-patches/omit-4.9.124.patch patch -F 0 -R -p1 < ../../omitted-patches/omit-4.9.123.patch patch -F 0 -R -p1 < ../../omitted-patches/omit-4.9.122.patch diff --git a/patch-differences/diff-4.9.125.patch b/patch-differences/diff-4.9.125.patch new file mode 100644 index 0000000..d758815 --- /dev/null +++ b/patch-differences/diff-4.9.125.patch @@ -0,0 +1,3239 @@ +diff --git a/Makefile b/Makefile +index 53d57acfc17e..aef09ca7a924 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 124 ++SUBLEVEL = 125 + EXTRAVERSION = + NAME = Roaring Lionus + +diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h +index d5da2115d78a..03d6bb0f4e13 100644 +--- a/arch/arc/include/asm/delay.h ++++ b/arch/arc/include/asm/delay.h +@@ -17,8 +17,11 @@ + #ifndef __ASM_ARC_UDELAY_H + #define __ASM_ARC_UDELAY_H + ++#include + #include /* HZ */ + ++extern unsigned long loops_per_jiffy; ++ + static inline void __delay(unsigned long loops) + { + __asm__ __volatile__( +diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c +index bbdfeb31dee6..fefe357c3d31 100644 +--- a/arch/arc/mm/cache.c ++++ b/arch/arc/mm/cache.c +@@ -840,7 +840,7 @@ void flush_cache_mm(struct mm_struct *mm) + void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, + unsigned long pfn) + { +- unsigned int paddr = pfn << PAGE_SHIFT; ++ phys_addr_t paddr = pfn << PAGE_SHIFT; + + u_vaddr &= PAGE_MASK; + +@@ -860,8 +860,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, + unsigned long u_vaddr) + { + /* TBD: do we really need to clear the kernel mapping */ +- __flush_dcache_page(page_address(page), u_vaddr); +- __flush_dcache_page(page_address(page), page_address(page)); ++ __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr); ++ __flush_dcache_page((phys_addr_t)page_address(page), ++ (phys_addr_t)page_address(page)); + + } + +diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h +index 9d6718c1a199..3c401ce0351e 100644 +--- a/arch/arc/plat-eznps/include/plat/ctop.h ++++ b/arch/arc/plat-eznps/include/plat/ctop.h +@@ -21,6 +21,7 @@ + #error "Incorrect ctop.h include" + #endif + ++#include + #include + + /* core auxiliary registers */ +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c +index 7f868d9bb5ed..b3d268a79f05 100644 +--- a/arch/arm/kvm/mmu.c ++++ b/arch/arm/kvm/mmu.c +@@ -894,19 +894,35 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache + pmd = stage2_get_pmd(kvm, cache, addr); + VM_BUG_ON(!pmd); + +- /* +- * Mapping in huge pages should only happen through a fault. If a +- * page is merged into a transparent huge page, the individual +- * subpages of that huge page should be unmapped through MMU +- * notifiers before we get here. +- * +- * Merging of CompoundPages is not supported; they should become +- * splitting first, unmapped, merged, and mapped back in on-demand. +- */ +- VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd)); +- + old_pmd = *pmd; + if (pmd_present(old_pmd)) { ++ /* ++ * Multiple vcpus faulting on the same PMD entry, can ++ * lead to them sequentially updating the PMD with the ++ * same value. Following the break-before-make ++ * (pmd_clear() followed by tlb_flush()) process can ++ * hinder forward progress due to refaults generated ++ * on missing translations. ++ * ++ * Skip updating the page table if the entry is ++ * unchanged. ++ */ ++ if (pmd_val(old_pmd) == pmd_val(*new_pmd)) ++ return 0; ++ ++ /* ++ * Mapping in huge pages should only happen through a ++ * fault. If a page is merged into a transparent huge ++ * page, the individual subpages of that huge page ++ * should be unmapped through MMU notifiers before we ++ * get here. ++ * ++ * Merging of CompoundPages is not supported; they ++ * should become splitting first, unmapped, merged, ++ * and mapped back in on-demand. ++ */ ++ VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd)); ++ + pmd_clear(pmd); + kvm_tlb_flush_vmid_ipa(kvm, addr); + } else { +@@ -962,6 +978,10 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + /* Create 2nd stage page table mapping - Level 3 */ + old_pte = *pte; + if (pte_present(old_pte)) { ++ /* Skip page table update if there is no change */ ++ if (pte_val(old_pte) == pte_val(*new_pte)) ++ return 0; ++ + kvm_set_pte(pte, __pte(0)); + kvm_tlb_flush_vmid_ipa(kvm, addr); + } else { +diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c +index f5077ea7af6d..30bcae0aef2a 100644 +--- a/arch/arm64/kernel/probes/kprobes.c ++++ b/arch/arm64/kernel/probes/kprobes.c +@@ -274,7 +274,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, + break; + case KPROBE_HIT_SS: + case KPROBE_REENTER: +- pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr); ++ pr_warn("Unrecoverable kprobe detected.\n"); + dump_kprobe(p); + BUG(); + break; +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c +index 9d07b421f090..fa6b2fad7a3d 100644 +--- a/arch/arm64/mm/init.c ++++ b/arch/arm64/mm/init.c +@@ -147,7 +147,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) + #ifdef CONFIG_HAVE_ARCH_PFN_VALID + int pfn_valid(unsigned long pfn) + { +- return memblock_is_map_memory(pfn << PAGE_SHIFT); ++ phys_addr_t addr = pfn << PAGE_SHIFT; ++ ++ if ((addr >> PAGE_SHIFT) != pfn) ++ return 0; ++ return memblock_is_map_memory(addr); + } + EXPORT_SYMBOL(pfn_valid); + #endif +diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c +index 8c9cbf13d32a..6054d49e608e 100644 +--- a/arch/mips/bcm47xx/setup.c ++++ b/arch/mips/bcm47xx/setup.c +@@ -212,12 +212,6 @@ static int __init bcm47xx_cpu_fixes(void) + */ + if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706) + cpu_wait = NULL; +- +- /* +- * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail" +- * Enable ExternalSync for sync instruction to take effect +- */ +- set_c0_config7(MIPS_CONF7_ES); + break; + #endif + } +diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h +index 22a6782f84f5..df78b2ca70eb 100644 +--- a/arch/mips/include/asm/mipsregs.h ++++ b/arch/mips/include/asm/mipsregs.h +@@ -663,8 +663,6 @@ + #define MIPS_CONF7_WII (_ULCAST_(1) << 31) + + #define MIPS_CONF7_RPS (_ULCAST_(1) << 2) +-/* ExternalSync */ +-#define MIPS_CONF7_ES (_ULCAST_(1) << 8) + + #define MIPS_CONF7_IAR (_ULCAST_(1) << 10) + #define MIPS_CONF7_AR (_ULCAST_(1) << 16) +@@ -2643,7 +2641,6 @@ __BUILD_SET_C0(status) + __BUILD_SET_C0(cause) + __BUILD_SET_C0(config) + __BUILD_SET_C0(config5) +-__BUILD_SET_C0(config7) + __BUILD_SET_C0(intcontrol) + __BUILD_SET_C0(intctl) + __BUILD_SET_C0(srsmap) +diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h +index 0d36c87acbe2..ad6f019ff776 100644 +--- a/arch/mips/include/asm/processor.h ++++ b/arch/mips/include/asm/processor.h +@@ -141,7 +141,7 @@ struct mips_fpu_struct { + + #define NUM_DSP_REGS 6 + +-typedef __u32 dspreg_t; ++typedef unsigned long dspreg_t; + + struct mips_dsp_state { + dspreg_t dspr[NUM_DSP_REGS]; +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c +index 4f64913b4b4c..b702ba3a0df3 100644 +--- a/arch/mips/kernel/ptrace.c ++++ b/arch/mips/kernel/ptrace.c +@@ -876,7 +876,7 @@ long arch_ptrace(struct task_struct *child, long request, + goto out; + } + dregs = __get_dsp_regs(child); +- tmp = (unsigned long) (dregs[addr - DSP_BASE]); ++ tmp = dregs[addr - DSP_BASE]; + break; + } + case DSP_CONTROL: +diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c +index b1e945738138..4840af169683 100644 +--- a/arch/mips/kernel/ptrace32.c ++++ b/arch/mips/kernel/ptrace32.c +@@ -140,7 +140,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + goto out; + } + dregs = __get_dsp_regs(child); +- tmp = (unsigned long) (dregs[addr - DSP_BASE]); ++ tmp = dregs[addr - DSP_BASE]; + break; + } + case DSP_CONTROL: +diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c +index 111ad475aa0c..4c2483f410c2 100644 +--- a/arch/mips/lib/multi3.c ++++ b/arch/mips/lib/multi3.c +@@ -4,12 +4,12 @@ + #include "libgcc.h" + + /* +- * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that +- * specific case only we'll implement it here. ++ * GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for ++ * that specific case only we implement that intrinsic here. + * + * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981 + */ +-#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7) ++#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8) + + /* multiply 64-bit values, low 64-bits returned */ + static inline long long notrace dmulu(long long a, long long b) +diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c +index c0e817f35e69..bdbbc320b006 100644 +--- a/arch/powerpc/net/bpf_jit_comp64.c ++++ b/arch/powerpc/net/bpf_jit_comp64.c +@@ -326,6 +326,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, + u64 imm64; + u8 *func; + u32 true_cond; ++ u32 tmp_idx; + + /* + * addrs[] maps a BPF bytecode address into a real offset from +@@ -685,11 +686,7 @@ emit_clear: + case BPF_STX | BPF_XADD | BPF_W: + /* Get EA into TMP_REG_1 */ + PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); +- /* error if EA is not word-aligned */ +- PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03); +- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12); +- PPC_LI(b2p[BPF_REG_0], 0); +- PPC_JMP(exit_addr); ++ tmp_idx = ctx->idx * 4; + /* load value from memory into TMP_REG_2 */ + PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); + /* add value from src_reg into this */ +@@ -697,32 +694,16 @@ emit_clear: + /* store result back */ + PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); + /* we're done if this succeeded */ +- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4)); +- /* otherwise, let's try once more */ +- PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); +- PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); +- PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); +- /* exit if the store was not successful */ +- PPC_LI(b2p[BPF_REG_0], 0); +- PPC_BCC(COND_NE, exit_addr); ++ PPC_BCC_SHORT(COND_NE, tmp_idx); + break; + /* *(u64 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_DW: + PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); +- /* error if EA is not doubleword-aligned */ +- PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07); +- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4)); +- PPC_LI(b2p[BPF_REG_0], 0); +- PPC_JMP(exit_addr); +- PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); +- PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); +- PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); +- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4)); ++ tmp_idx = ctx->idx * 4; + PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); + PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); + PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); +- PPC_LI(b2p[BPF_REG_0], 0); +- PPC_BCC(COND_NE, exit_addr); ++ PPC_BCC_SHORT(COND_NE, tmp_idx); + break; + + /* +diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h +index 998b61cd0e56..4b39ba700d32 100644 +--- a/arch/s390/include/asm/qdio.h ++++ b/arch/s390/include/asm/qdio.h +@@ -261,7 +261,6 @@ struct qdio_outbuf_state { + void *user; + }; + +-#define QDIO_OUTBUF_STATE_FLAG_NONE 0x00 + #define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01 + + #define CHSC_AC1_INITIATE_INPUTQ 0x80 +diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c +index 661d9fe63c43..ba2f21873cbd 100644 +--- a/arch/s390/mm/fault.c ++++ b/arch/s390/mm/fault.c +@@ -462,6 +462,8 @@ retry: + /* No reason to continue if interrupted by SIGKILL. */ + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { + fault = VM_FAULT_SIGNAL; ++ if (flags & FAULT_FLAG_RETRY_NOWAIT) ++ goto out_up; + goto out; + } + if (unlikely(fault & VM_FAULT_ERROR)) +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c +index 949a871e9506..8bd25aebf488 100644 +--- a/arch/s390/net/bpf_jit_comp.c ++++ b/arch/s390/net/bpf_jit_comp.c +@@ -517,8 +517,6 @@ static void bpf_jit_epilogue(struct bpf_jit *jit) + /* br %r1 */ + _EMIT2(0x07f1); + } else { +- /* larl %r1,.+14 */ +- EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14); + /* ex 0,S390_lowcore.br_r1_tampoline */ + EMIT4_DISP(0x44000000, REG_0, REG_0, + offsetof(struct lowcore, br_r1_trampoline)); +diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c +index f576f1073378..0dac2640c3a7 100644 +--- a/arch/s390/numa/numa.c ++++ b/arch/s390/numa/numa.c +@@ -133,26 +133,14 @@ void __init numa_setup(void) + { + pr_info("NUMA mode: %s\n", mode->name); + nodes_clear(node_possible_map); ++ /* Initially attach all possible CPUs to node 0. */ ++ cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask); + if (mode->setup) + mode->setup(); + numa_setup_memory(); + memblock_dump_all(); + } + +-/* +- * numa_init_early() - Initialization initcall +- * +- * This runs when only one CPU is online and before the first +- * topology update is called for by the scheduler. +- */ +-static int __init numa_init_early(void) +-{ +- /* Attach all possible CPUs to node 0 for now. */ +- cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask); +- return 0; +-} +-early_initcall(numa_init_early); +- + /* + * numa_init_late() - Initialization initcall + * +diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c +index 03a1d5976ff5..87574110394d 100644 +--- a/arch/s390/pci/pci.c ++++ b/arch/s390/pci/pci.c +@@ -407,6 +407,8 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) + hwirq = 0; + for_each_pci_msi_entry(msi, pdev) { + rc = -EIO; ++ if (hwirq >= msi_vecs) ++ break; + irq = irq_alloc_desc(0); /* Alloc irq on node 0 */ + if (irq < 0) + goto out_msi; +diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c +index 24384e1dc33d..a7aeb036b070 100644 +--- a/arch/sparc/kernel/pcic.c ++++ b/arch/sparc/kernel/pcic.c +@@ -602,7 +602,7 @@ void pcibios_fixup_bus(struct pci_bus *bus) + { + struct pci_dev *dev; + int i, has_io, has_mem; +- unsigned int cmd; ++ unsigned int cmd = 0; + struct linux_pcic *pcic; + /* struct linux_pbm_info* pbm = &pcic->pbm; */ + int node; +diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile +index 4669b3a931ed..cda8e14bd72a 100644 +--- a/arch/x86/boot/compressed/Makefile ++++ b/arch/x86/boot/compressed/Makefile +@@ -101,9 +101,13 @@ define cmd_check_data_rel + done + endef + ++# We need to run two commands under "if_changed", so merge them into a ++# single invocation. ++quiet_cmd_check-and-link-vmlinux = LD $@ ++ cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld) ++ + $(obj)/vmlinux: $(vmlinux-objs-y) FORCE +- $(call if_changed,check_data_rel) +- $(call if_changed,ld) ++ $(call if_changed,check-and-link-vmlinux) + + OBJCOPYFLAGS_vmlinux.bin := -R .comment -S + $(obj)/vmlinux.bin: vmlinux FORCE +diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c +index b26ee32f73e8..fd4484ae3ffc 100644 +--- a/arch/x86/events/amd/ibs.c ++++ b/arch/x86/events/amd/ibs.c +@@ -578,7 +578,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) + { + struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu); + struct perf_event *event = pcpu->event; +- struct hw_perf_event *hwc = &event->hw; ++ struct hw_perf_event *hwc; + struct perf_sample_data data; + struct perf_raw_record raw; + struct pt_regs regs; +@@ -601,6 +601,10 @@ fail: + return 0; + } + ++ if (WARN_ON_ONCE(!event)) ++ goto fail; ++ ++ hwc = &event->hw; + msr = hwc->config_base; + buf = ibs_data.regs; + rdmsrl(msr, *buf); +diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h +index 5b1177f5a963..508a062e6cf1 100644 +--- a/arch/x86/include/asm/irqflags.h ++++ b/arch/x86/include/asm/irqflags.h +@@ -32,7 +32,8 @@ extern inline unsigned long native_save_fl(void) + return flags; + } + +-static inline void native_restore_fl(unsigned long flags) ++extern inline void native_restore_fl(unsigned long flags); ++extern inline void native_restore_fl(unsigned long flags) + { + asm volatile("push %0 ; popf" + : /* no output */ +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h +index d5525a7e119e..ee8c6290c421 100644 +--- a/arch/x86/include/asm/processor.h ++++ b/arch/x86/include/asm/processor.h +@@ -136,6 +136,8 @@ struct cpuinfo_x86 { + /* Index into per_cpu list: */ + u16 cpu_index; + u32 microcode; ++ /* Address space bits used by the cache internally */ ++ u8 x86_cache_bits; + }; + + #define X86_VENDOR_INTEL 0 +@@ -173,9 +175,9 @@ extern const struct seq_operations cpuinfo_op; + + extern void cpu_detect(struct cpuinfo_x86 *c); + +-static inline unsigned long l1tf_pfn_limit(void) ++static inline unsigned long long l1tf_pfn_limit(void) + { +- return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1; ++ return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT); + } + + extern void early_cpu_init(void); +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index ac67a76550bd..8103adacbc83 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -651,6 +651,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation); + enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; + EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); + ++/* ++ * These CPUs all support 44bits physical address space internally in the ++ * cache but CPUID can report a smaller number of physical address bits. ++ * ++ * The L1TF mitigation uses the top most address bit for the inversion of ++ * non present PTEs. When the installed memory reaches into the top most ++ * address bit due to memory holes, which has been observed on machines ++ * which report 36bits physical address bits and have 32G RAM installed, ++ * then the mitigation range check in l1tf_select_mitigation() triggers. ++ * This is a false positive because the mitigation is still possible due to ++ * the fact that the cache uses 44bit internally. Use the cache bits ++ * instead of the reported physical bits and adjust them on the affected ++ * machines to 44bit if the reported bits are less than 44. ++ */ ++static void override_cache_bits(struct cpuinfo_x86 *c) ++{ ++ if (c->x86 != 6) ++ return; ++ ++ switch (c->x86_model) { ++ case INTEL_FAM6_NEHALEM: ++ case INTEL_FAM6_WESTMERE: ++ case INTEL_FAM6_SANDYBRIDGE: ++ case INTEL_FAM6_IVYBRIDGE: ++ case INTEL_FAM6_HASWELL_CORE: ++ case INTEL_FAM6_HASWELL_ULT: ++ case INTEL_FAM6_HASWELL_GT3E: ++ case INTEL_FAM6_BROADWELL_CORE: ++ case INTEL_FAM6_BROADWELL_GT3E: ++ case INTEL_FAM6_SKYLAKE_MOBILE: ++ case INTEL_FAM6_SKYLAKE_DESKTOP: ++ case INTEL_FAM6_KABYLAKE_MOBILE: ++ case INTEL_FAM6_KABYLAKE_DESKTOP: ++ if (c->x86_cache_bits < 44) ++ c->x86_cache_bits = 44; ++ break; ++ } ++} ++ + static void __init l1tf_select_mitigation(void) + { + u64 half_pa; +@@ -658,6 +697,8 @@ static void __init l1tf_select_mitigation(void) + if (!boot_cpu_has_bug(X86_BUG_L1TF)) + return; + ++ override_cache_bits(&boot_cpu_data); ++ + switch (l1tf_mitigation) { + case L1TF_MITIGATION_OFF: + case L1TF_MITIGATION_FLUSH_NOWARN: +@@ -677,14 +718,13 @@ static void __init l1tf_select_mitigation(void) + return; + #endif + +- /* +- * This is extremely unlikely to happen because almost all +- * systems have far more MAX_PA/2 than RAM can be fit into +- * DIMM slots. +- */ + half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; + if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) { + pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); ++ pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", ++ half_pa); ++ pr_info("However, doing so will make a part of your RAM unusable.\n"); ++ pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n"); + return; + } + +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 13471b71bec7..dc0850bb74be 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -882,6 +882,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) + } + } + #endif ++ c->x86_cache_bits = c->x86_phys_bits; + } + + static const __initconst struct x86_cpu_id cpu_no_speculation[] = { +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c +index 9ad86c4bf360..cee0fec0d232 100644 +--- a/arch/x86/kernel/cpu/intel.c ++++ b/arch/x86/kernel/cpu/intel.c +@@ -109,6 +109,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c) + if (cpu_has(c, X86_FEATURE_HYPERVISOR)) + return false; + ++ if (c->x86 != 6) ++ return false; ++ + for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { + if (c->x86_model == spectre_bad_microcodes[i].model && + c->x86_stepping == spectre_bad_microcodes[i].stepping) +diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c +index 85f854b98a9d..3576ece9ef88 100644 +--- a/arch/x86/kernel/dumpstack.c ++++ b/arch/x86/kernel/dumpstack.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -229,7 +230,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr) + * We're not going to return, but we might be on an IST stack or + * have very little stack space left. Rewind the stack and kill + * the task. ++ * Before we rewind the stack, we have to tell KASAN that we're going to ++ * reuse the task stack and that existing poisons are invalid. + */ ++ kasan_unpoison_task_stack(current); + rewind_stack_do_exit(signr); + } + NOKPROBE_SYMBOL(oops_end); +diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c +index dffe81d3c261..a2661814bde0 100644 +--- a/arch/x86/kernel/process_64.c ++++ b/arch/x86/kernel/process_64.c +@@ -360,6 +360,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) + start_thread_common(regs, new_ip, new_sp, + __USER_CS, __USER_DS, 0); + } ++EXPORT_SYMBOL_GPL(start_thread); + + #ifdef CONFIG_COMPAT + void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp) +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index c855080c7a71..5f44d63a9d69 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -4973,8 +4973,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) + + clgi(); + +- local_irq_enable(); +- + /* + * If this vCPU has touched SPEC_CTRL, restore the guest's value if + * it's non-zero. Since vmentry is serialising on affected CPUs, there +@@ -4983,6 +4981,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) + */ + x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); + ++ local_irq_enable(); ++ + asm volatile ( + "push %%" _ASM_BP "; \n\t" + "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t" +@@ -5105,12 +5105,12 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) + if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) + svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); + +- x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); +- + reload_tss(vcpu); + + local_irq_disable(); + ++ x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); ++ + vcpu->arch.cr2 = svm->vmcb->save.cr2; + vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; + vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 12826607a995..8e4ac0a91309 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -8670,9 +8670,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) + * information but as all relevant affected CPUs have 32KiB L1D cache size + * there is no point in doing so. + */ +-#define L1D_CACHE_ORDER 4 +-static void *vmx_l1d_flush_pages; +- + static void vmx_l1d_flush(struct kvm_vcpu *vcpu) + { + int size = PAGE_SIZE << L1D_CACHE_ORDER; +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c +index 5d35b555115a..90801a8f19c9 100644 +--- a/arch/x86/mm/init.c ++++ b/arch/x86/mm/init.c +@@ -792,7 +792,7 @@ unsigned long max_swapfile_size(void) + + if (boot_cpu_has_bug(X86_BUG_L1TF)) { + /* Limit the swap file size to MAX_PA/2 for L1TF workaround */ +- unsigned long l1tf_limit = l1tf_pfn_limit() + 1; ++ unsigned long long l1tf_limit = l1tf_pfn_limit(); + /* + * We encode swap offsets also with 3 bits below those for pfn + * which makes the usable limit higher. +@@ -800,7 +800,7 @@ unsigned long max_swapfile_size(void) + #if CONFIG_PGTABLE_LEVELS > 2 + l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT; + #endif +- pages = min_t(unsigned long, l1tf_limit, pages); ++ pages = min_t(unsigned long long, l1tf_limit, pages); + } + return pages; + } +diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c +index 5aad869fa205..74609a957c49 100644 +--- a/arch/x86/mm/mmap.c ++++ b/arch/x86/mm/mmap.c +@@ -138,7 +138,7 @@ bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) + /* If it's real memory always allow */ + if (pfn_valid(pfn)) + return true; +- if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN)) ++ if (pfn >= l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN)) + return false; + return true; + } +diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c +index 8e2e4757adcb..5a42ae4078c2 100644 +--- a/drivers/base/power/clock_ops.c ++++ b/drivers/base/power/clock_ops.c +@@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk); + int of_pm_clk_add_clks(struct device *dev) + { + struct clk **clks; +- unsigned int i, count; ++ int i, count; + int ret; + + if (!dev || !dev->of_node) +diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c +index 07b77fb102a1..987e8f503522 100644 +--- a/drivers/cdrom/cdrom.c ++++ b/drivers/cdrom/cdrom.c +@@ -2536,7 +2536,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi, + if (!CDROM_CAN(CDC_SELECT_DISC) || + (arg == CDSL_CURRENT || arg == CDSL_NONE)) + return cdi->ops->drive_status(cdi, CDSL_CURRENT); +- if (((int)arg >= cdi->capacity)) ++ if (arg >= cdi->capacity) + return -EINVAL; + return cdrom_slot_status(cdi, arg); + } +diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c +index 8387c7a40bda..05671c03efe2 100644 +--- a/drivers/clk/rockchip/clk-rk3399.c ++++ b/drivers/clk/rockchip/clk-rk3399.c +@@ -629,7 +629,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { + MUX(0, "clk_i2sout_src", mux_i2sch_p, CLK_SET_RATE_PARENT, + RK3399_CLKSEL_CON(31), 0, 2, MFLAGS), + COMPOSITE_NODIV(SCLK_I2S_8CH_OUT, "clk_i2sout", mux_i2sout_p, CLK_SET_RATE_PARENT, +- RK3399_CLKSEL_CON(30), 8, 2, MFLAGS, ++ RK3399_CLKSEL_CON(31), 2, 1, MFLAGS, + RK3399_CLKGATE_CON(8), 12, GFLAGS), + + /* uart */ +diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +index a68f94daf9b6..32ab5c32834b 100644 +--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c ++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +@@ -424,6 +424,18 @@ static void adv7511_hpd_work(struct work_struct *work) + else + status = connector_status_disconnected; + ++ /* ++ * The bridge resets its registers on unplug. So when we get a plug ++ * event and we're already supposed to be powered, cycle the bridge to ++ * restore its state. ++ */ ++ if (status == connector_status_connected && ++ adv7511->connector.status == connector_status_disconnected && ++ adv7511->powered) { ++ regcache_mark_dirty(adv7511->regmap); ++ adv7511_power_on(adv7511); ++ } ++ + if (adv7511->connector.status != status) { + adv7511->connector.status = status; + drm_kms_helper_hotplug_event(adv7511->connector.dev); +diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c +index 3ce391c239b0..67881e5517fb 100644 +--- a/drivers/gpu/drm/imx/imx-ldb.c ++++ b/drivers/gpu/drm/imx/imx-ldb.c +@@ -634,6 +634,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) + return PTR_ERR(imx_ldb->regmap); + } + ++ /* disable LDB by resetting the control register to POR default */ ++ regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0); ++ + imx_ldb->dev = dev; + + if (of_id) +@@ -675,14 +678,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) + if (ret || i < 0 || i > 1) + return -EINVAL; + ++ if (!of_device_is_available(child)) ++ continue; ++ + if (dual && i > 0) { + dev_warn(dev, "dual-channel mode, ignoring second output\n"); + continue; + } + +- if (!of_device_is_available(child)) +- continue; +- + channel = &imx_ldb->channel[i]; + channel->ldb = imx_ldb; + channel->chno = i; +diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c +index 39d0fdcb17d2..6a7994a79f55 100644 +--- a/drivers/gpu/drm/udl/udl_fb.c ++++ b/drivers/gpu/drm/udl/udl_fb.c +@@ -217,7 +217,7 @@ static int udl_fb_open(struct fb_info *info, int user) + + struct fb_deferred_io *fbdefio; + +- fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); ++ fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); + + if (fbdefio) { + fbdefio->delay = DL_DEFIO_WRITE_DELAY; +diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c +index 873f010d9616..10e2c198ad72 100644 +--- a/drivers/gpu/drm/udl/udl_main.c ++++ b/drivers/gpu/drm/udl/udl_main.c +@@ -169,18 +169,13 @@ static void udl_free_urb_list(struct drm_device *dev) + struct list_head *node; + struct urb_node *unode; + struct urb *urb; +- int ret; + unsigned long flags; + + DRM_DEBUG("Waiting for completes and freeing all render urbs\n"); + + /* keep waiting and freeing, until we've got 'em all */ + while (count--) { +- +- /* Getting interrupted means a leak, but ok at shutdown*/ +- ret = down_interruptible(&udl->urbs.limit_sem); +- if (ret) +- break; ++ down(&udl->urbs.limit_sem); + + spin_lock_irqsave(&udl->urbs.lock, flags); + +@@ -204,17 +199,22 @@ static void udl_free_urb_list(struct drm_device *dev) + static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) + { + struct udl_device *udl = dev->dev_private; +- int i = 0; + struct urb *urb; + struct urb_node *unode; + char *buf; ++ size_t wanted_size = count * size; + + spin_lock_init(&udl->urbs.lock); + ++retry: + udl->urbs.size = size; + INIT_LIST_HEAD(&udl->urbs.list); + +- while (i < count) { ++ sema_init(&udl->urbs.limit_sem, 0); ++ udl->urbs.count = 0; ++ udl->urbs.available = 0; ++ ++ while (udl->urbs.count * size < wanted_size) { + unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL); + if (!unode) + break; +@@ -230,11 +230,16 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) + } + unode->urb = urb; + +- buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL, ++ buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL, + &urb->transfer_dma); + if (!buf) { + kfree(unode); + usb_free_urb(urb); ++ if (size > PAGE_SIZE) { ++ size /= 2; ++ udl_free_urb_list(dev); ++ goto retry; ++ } + break; + } + +@@ -245,16 +250,14 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) + + list_add_tail(&unode->entry, &udl->urbs.list); + +- i++; ++ up(&udl->urbs.limit_sem); ++ udl->urbs.count++; ++ udl->urbs.available++; + } + +- sema_init(&udl->urbs.limit_sem, i); +- udl->urbs.count = i; +- udl->urbs.available = i; +- +- DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size); ++ DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size); + +- return i; ++ return udl->urbs.count; + } + + struct urb *udl_get_urb(struct drm_device *dev) +diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c +index 9e7ef5cf5d49..b2d8b63176db 100644 +--- a/drivers/i2c/busses/i2c-davinci.c ++++ b/drivers/i2c/busses/i2c-davinci.c +@@ -234,12 +234,16 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev) + /* + * It's not always possible to have 1 to 2 ratio when d=7, so fall back + * to minimal possible clkh in this case. ++ * ++ * Note: ++ * CLKH is not allowed to be 0, in this case I2C clock is not generated ++ * at all + */ +- if (clk >= clkl + d) { ++ if (clk > clkl + d) { + clkh = clk - clkl - d; + clkl -= d; + } else { +- clkh = 0; ++ clkh = 1; + clkl = clk - (d << 1); + } + +diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c +index 60f5a8ded8dd..8904491dfda4 100644 +--- a/drivers/misc/mei/main.c ++++ b/drivers/misc/mei/main.c +@@ -304,7 +304,6 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, + goto out; + } + +- *offset = 0; + cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file); + if (!cb) { + rets = -ENOMEM; +diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c +index c7427bdd3a4b..2949a381a94d 100644 +--- a/drivers/net/can/mscan/mpc5xxx_can.c ++++ b/drivers/net/can/mscan/mpc5xxx_can.c +@@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev, + return 0; + } + cdm = of_iomap(np_cdm, 0); ++ if (!cdm) { ++ of_node_put(np_cdm); ++ dev_err(&ofdev->dev, "can't map clock node!\n"); ++ return 0; ++ } + + if (in_8(&cdm->ipb_clk_sel) & 0x1) + freq *= 2; +diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig +index 5b7658bcf020..5c3ef9fc8207 100644 +--- a/drivers/net/ethernet/3com/Kconfig ++++ b/drivers/net/ethernet/3com/Kconfig +@@ -32,7 +32,7 @@ config EL3 + + config 3C515 + tristate "3c515 ISA \"Fast EtherLink\"" +- depends on ISA && ISA_DMA_API ++ depends on ISA && ISA_DMA_API && !PPC32 + ---help--- + If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet + network card, say Y here. +diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig +index 0038709fd317..ec59425fdbff 100644 +--- a/drivers/net/ethernet/amd/Kconfig ++++ b/drivers/net/ethernet/amd/Kconfig +@@ -44,7 +44,7 @@ config AMD8111_ETH + + config LANCE + tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" +- depends on ISA && ISA_DMA_API && !ARM ++ depends on ISA && ISA_DMA_API && !ARM && !PPC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y here. + Some LinkSys cards are of this type. +@@ -138,7 +138,7 @@ config PCMCIA_NMCLAN + + config NI65 + tristate "NI6510 support" +- depends on ISA && ISA_DMA_API && !ARM ++ depends on ISA && ISA_DMA_API && !ARM && !PPC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y here. + +diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +index a3200ea6d765..85e7177c479f 100644 +--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c ++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +@@ -1678,6 +1678,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter) + skb = build_skb(page_address(page) + adapter->rx_page_offset, + adapter->rx_frag_size); + if (likely(skb)) { ++ skb_reserve(skb, NET_SKB_PAD); + adapter->rx_page_offset += adapter->rx_frag_size; + if (adapter->rx_page_offset >= PAGE_SIZE) + adapter->rx_page = NULL; +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +index 5f19427c7b27..8aecd8ef6542 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +@@ -3367,14 +3367,18 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) + DP(BNX2X_MSG_ETHTOOL, + "rss re-configured, UDP 4-tupple %s\n", + udp_rss_requested ? "enabled" : "disabled"); +- return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); ++ if (bp->state == BNX2X_STATE_OPEN) ++ return bnx2x_rss(bp, &bp->rss_conf_obj, false, ++ true); + } else if ((info->flow_type == UDP_V6_FLOW) && + (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) { + bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested; + DP(BNX2X_MSG_ETHTOOL, + "rss re-configured, UDP 4-tupple %s\n", + udp_rss_requested ? "enabled" : "disabled"); +- return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); ++ if (bp->state == BNX2X_STATE_OPEN) ++ return bnx2x_rss(bp, &bp->rss_conf_obj, false, ++ true); + } + return 0; + +@@ -3488,7 +3492,10 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir, + bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id; + } + +- return bnx2x_config_rss_eth(bp, false); ++ if (bp->state == BNX2X_STATE_OPEN) ++ return bnx2x_config_rss_eth(bp, false); ++ ++ return 0; + } + + /** +diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig +index 5ab912937aff..ec0b545197e2 100644 +--- a/drivers/net/ethernet/cirrus/Kconfig ++++ b/drivers/net/ethernet/cirrus/Kconfig +@@ -19,6 +19,7 @@ if NET_VENDOR_CIRRUS + config CS89x0 + tristate "CS89x0 support" + depends on ISA || EISA || ARM ++ depends on !PPC32 + ---help--- + Support for CS89x0 chipset based Ethernet cards. If you have a + network (Ethernet) card of this type, say Y and read the file +diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c +index 2e9bab45d419..f7e7b79c6050 100644 +--- a/drivers/net/ethernet/cisco/enic/enic_main.c ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c +@@ -1842,10 +1842,32 @@ static int enic_stop(struct net_device *netdev) + return 0; + } + ++static int _enic_change_mtu(struct net_device *netdev, int new_mtu) ++{ ++ bool running = netif_running(netdev); ++ int err = 0; ++ ++ ASSERT_RTNL(); ++ if (running) { ++ err = enic_stop(netdev); ++ if (err) ++ return err; ++ } ++ ++ netdev->mtu = new_mtu; ++ ++ if (running) { ++ err = enic_open(netdev); ++ if (err) ++ return err; ++ } ++ ++ return 0; ++} ++ + static int enic_change_mtu(struct net_device *netdev, int new_mtu) + { + struct enic *enic = netdev_priv(netdev); +- int running = netif_running(netdev); + + if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU) + return -EINVAL; +@@ -1853,20 +1875,12 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu) + if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) + return -EOPNOTSUPP; + +- if (running) +- enic_stop(netdev); +- +- netdev->mtu = new_mtu; +- + if (netdev->mtu > enic->port_mtu) + netdev_warn(netdev, +- "interface MTU (%d) set higher than port MTU (%d)\n", +- netdev->mtu, enic->port_mtu); ++ "interface MTU (%d) set higher than port MTU (%d)\n", ++ netdev->mtu, enic->port_mtu); + +- if (running) +- enic_open(netdev); +- +- return 0; ++ return _enic_change_mtu(netdev, new_mtu); + } + + static void enic_change_mtu_work(struct work_struct *work) +@@ -1874,47 +1888,9 @@ static void enic_change_mtu_work(struct work_struct *work) + struct enic *enic = container_of(work, struct enic, change_mtu_work); + struct net_device *netdev = enic->netdev; + int new_mtu = vnic_dev_mtu(enic->vdev); +- int err; +- unsigned int i; +- +- new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu)); + + rtnl_lock(); +- +- /* Stop RQ */ +- del_timer_sync(&enic->notify_timer); +- +- for (i = 0; i < enic->rq_count; i++) +- napi_disable(&enic->napi[i]); +- +- vnic_intr_mask(&enic->intr[0]); +- enic_synchronize_irqs(enic); +- err = vnic_rq_disable(&enic->rq[0]); +- if (err) { +- rtnl_unlock(); +- netdev_err(netdev, "Unable to disable RQ.\n"); +- return; +- } +- vnic_rq_clean(&enic->rq[0], enic_free_rq_buf); +- vnic_cq_clean(&enic->cq[0]); +- vnic_intr_clean(&enic->intr[0]); +- +- /* Fill RQ with new_mtu-sized buffers */ +- netdev->mtu = new_mtu; +- vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); +- /* Need at least one buffer on ring to get going */ +- if (vnic_rq_desc_used(&enic->rq[0]) == 0) { +- rtnl_unlock(); +- netdev_err(netdev, "Unable to alloc receive buffers.\n"); +- return; +- } +- +- /* Start RQ */ +- vnic_rq_enable(&enic->rq[0]); +- napi_enable(&enic->napi[0]); +- vnic_intr_unmask(&enic->intr[0]); +- enic_notify_timer_start(enic); +- ++ (void)_enic_change_mtu(netdev, new_mtu); + rtnl_unlock(); + + netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu); +diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c +index ddd410a91e13..715776e2cfe5 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c +@@ -313,7 +313,7 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, + + p_ramrod->common.update_approx_mcast_flg = 1; + for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { +- u32 *p_bins = (u32 *)p_params->bins; ++ u32 *p_bins = p_params->bins; + + p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]); + } +@@ -1182,8 +1182,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) + { +- unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; + struct vport_update_ramrod_data *p_ramrod = NULL; ++ u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + u8 abs_vport_id = 0; +@@ -1219,26 +1219,25 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, + /* explicitly clear out the entire vector */ + memset(&p_ramrod->approx_mcast.bins, 0, + sizeof(p_ramrod->approx_mcast.bins)); +- memset(bins, 0, sizeof(unsigned long) * +- ETH_MULTICAST_MAC_BINS_IN_REGS); ++ memset(bins, 0, sizeof(bins)); + /* filter ADD op is explicit set op and it removes + * any existing filters for the vport + */ + if (p_filter_cmd->opcode == QED_FILTER_ADD) { + for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { +- u32 bit; ++ u32 bit, nbits; + + bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); +- __set_bit(bit, bins); ++ nbits = sizeof(u32) * BITS_PER_BYTE; ++ bins[bit / nbits] |= 1 << (bit % nbits); + } + + /* Convert to correct endianity */ + for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { + struct vport_update_ramrod_mcast *p_ramrod_bins; +- u32 *p_bins = (u32 *)bins; + + p_ramrod_bins = &p_ramrod->approx_mcast; +- p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]); ++ p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]); + } + } + +diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h +index e495d62fcc03..14d00173cad0 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h ++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h +@@ -156,7 +156,7 @@ struct qed_sp_vport_update_params { + u8 anti_spoofing_en; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; +- unsigned long bins[8]; ++ u32 bins[8]; + struct qed_rss_params *rss_params; + struct qed_filter_accept_flags accept_flags; + struct qed_sge_tpa_params *sge_tpa_params; +diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c +index 8b7d2f963ee1..eaa242df4131 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c +@@ -613,6 +613,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, + break; + default: + p_link->speed = 0; ++ p_link->link_up = 0; + } + + if (p_link->link_up && p_link->speed) +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c +index 48bc5c151336..6379bfedc9f0 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c +@@ -2157,7 +2157,7 @@ qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn, + + p_data->update_approx_mcast_flg = 1; + memcpy(p_data->bins, p_mcast_tlv->bins, +- sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); ++ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; + } + +diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c +index 0645124a887b..faf8215872de 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c +@@ -786,7 +786,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, + resp_size += sizeof(struct pfvf_def_resp_tlv); + + memcpy(p_mcast_tlv->bins, p_params->bins, +- sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); ++ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); + } + + update_rx = p_params->accept_flags.update_rx_mode_config; +@@ -972,7 +972,7 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, + u32 bit; + + bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); +- __set_bit(bit, sp_params.bins); ++ sp_params.bins[bit / 32] |= 1 << (bit % 32); + } + } + +diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h +index 35db7a28aa13..b962ef8e98ef 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h ++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h +@@ -336,7 +336,12 @@ struct vfpf_vport_update_mcast_bin_tlv { + struct channel_tlv tl; + u8 padding[4]; + +- u64 bins[8]; ++ /* There are only 256 approx bins, and in HSI they're divided into ++ * 32-bit values. As old VFs used to set-bit to the values on its side, ++ * the upper half of the array is never expected to contain any data. ++ */ ++ u64 bins[4]; ++ u64 obsolete_bins[4]; + }; + + struct vfpf_vport_update_accept_param_tlv { +diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +index 63307ea97846..9beea13e2e1f 100644 +--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c ++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +@@ -217,6 +217,7 @@ issue: + ret = of_mdiobus_register(bus, np1); + if (ret) { + mdiobus_free(bus); ++ lp->mii_bus = NULL; + return ret; + } + return 0; +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index 31a6d87b61b2..0d4440f28f6b 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -946,7 +946,7 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ + {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ + {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ +- {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */ ++ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ + {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ + {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ + {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ +diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c +index 299140c04556..04b60ed59ea0 100644 +--- a/drivers/net/wan/lmc/lmc_main.c ++++ b/drivers/net/wan/lmc/lmc_main.c +@@ -1372,7 +1372,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ + case 0x001: + printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name); + break; +- case 0x010: ++ case 0x002: + printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name); + break; + default: +diff --git a/drivers/net/wireless/broadcom/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c +index cb987c2ecc6b..87131f663292 100644 +--- a/drivers/net/wireless/broadcom/b43/leds.c ++++ b/drivers/net/wireless/broadcom/b43/leds.c +@@ -131,7 +131,7 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led, + led->wl = dev->wl; + led->index = led_index; + led->activelow = activelow; +- strncpy(led->name, name, sizeof(led->name)); ++ strlcpy(led->name, name, sizeof(led->name)); + atomic_set(&led->state, 0); + + led->led_dev.name = led->name; +diff --git a/drivers/net/wireless/broadcom/b43legacy/leds.c b/drivers/net/wireless/broadcom/b43legacy/leds.c +index fd4565389c77..bc922118b6ac 100644 +--- a/drivers/net/wireless/broadcom/b43legacy/leds.c ++++ b/drivers/net/wireless/broadcom/b43legacy/leds.c +@@ -101,7 +101,7 @@ static int b43legacy_register_led(struct b43legacy_wldev *dev, + led->dev = dev; + led->index = led_index; + led->activelow = activelow; +- strncpy(led->name, name, sizeof(led->name)); ++ strlcpy(led->name, name, sizeof(led->name)); + + led->led_dev.name = led->name; + led->led_dev.default_trigger = default_trigger; +diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c +index a4e9f430d452..e2cca91fd266 100644 +--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c ++++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c +@@ -433,7 +433,7 @@ static void imx1_pinconf_group_dbg_show(struct pinctrl_dev *pctldev, + const char *name; + int i, ret; + +- if (group > info->ngroups) ++ if (group >= info->ngroups) + return; + + seq_puts(s, "\n"); +diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c +index edb36bf781b0..f627b39f64bf 100644 +--- a/drivers/power/supply/generic-adc-battery.c ++++ b/drivers/power/supply/generic-adc-battery.c +@@ -243,10 +243,10 @@ static int gab_probe(struct platform_device *pdev) + struct power_supply_desc *psy_desc; + struct power_supply_config psy_cfg = {}; + struct gab_platform_data *pdata = pdev->dev.platform_data; +- enum power_supply_property *properties; + int ret = 0; + int chan; +- int index = 0; ++ int index = ARRAY_SIZE(gab_props); ++ bool any = false; + + adc_bat = devm_kzalloc(&pdev->dev, sizeof(*adc_bat), GFP_KERNEL); + if (!adc_bat) { +@@ -280,8 +280,6 @@ static int gab_probe(struct platform_device *pdev) + } + + memcpy(psy_desc->properties, gab_props, sizeof(gab_props)); +- properties = (enum power_supply_property *) +- ((char *)psy_desc->properties + sizeof(gab_props)); + + /* + * getting channel from iio and copying the battery properties +@@ -295,15 +293,22 @@ static int gab_probe(struct platform_device *pdev) + adc_bat->channel[chan] = NULL; + } else { + /* copying properties for supported channels only */ +- memcpy(properties + sizeof(*(psy_desc->properties)) * index, +- &gab_dyn_props[chan], +- sizeof(gab_dyn_props[chan])); +- index++; ++ int index2; ++ ++ for (index2 = 0; index2 < index; index2++) { ++ if (psy_desc->properties[index2] == ++ gab_dyn_props[chan]) ++ break; /* already known */ ++ } ++ if (index2 == index) /* really new */ ++ psy_desc->properties[index++] = ++ gab_dyn_props[chan]; ++ any = true; + } + } + + /* none of the channels are supported so let's bail out */ +- if (index == 0) { ++ if (!any) { + ret = -ENODEV; + goto second_mem_fail; + } +@@ -314,7 +319,7 @@ static int gab_probe(struct platform_device *pdev) + * as come channels may be not be supported by the device.So + * we need to take care of that. + */ +- psy_desc->num_properties = ARRAY_SIZE(gab_props) + index; ++ psy_desc->num_properties = index; + + adc_bat->psy = power_supply_register(&pdev->dev, psy_desc, &psy_cfg); + if (IS_ERR(adc_bat->psy)) { +diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c +index 66e9bb053629..18ab84e9c6b2 100644 +--- a/drivers/s390/cio/qdio_main.c ++++ b/drivers/s390/cio/qdio_main.c +@@ -640,21 +640,20 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q, + unsigned long phys_aob = 0; + + if (!q->use_cq) +- goto out; ++ return 0; + + if (!q->aobs[bufnr]) { + struct qaob *aob = qdio_allocate_aob(); + q->aobs[bufnr] = aob; + } + if (q->aobs[bufnr]) { +- q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE; + q->sbal_state[bufnr].aob = q->aobs[bufnr]; + q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user; + phys_aob = virt_to_phys(q->aobs[bufnr]); + WARN_ON_ONCE(phys_aob & 0xFF); + } + +-out: ++ q->sbal_state[bufnr].flags = 0; + return phys_aob; + } + +diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c +index dcf36537a767..cc3994d4e7bc 100644 +--- a/drivers/scsi/fcoe/fcoe_ctlr.c ++++ b/drivers/scsi/fcoe/fcoe_ctlr.c +@@ -755,9 +755,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, + case ELS_LOGO: + if (fip->mode == FIP_MODE_VN2VN) { + if (fip->state != FIP_ST_VNMP_UP) +- return -EINVAL; ++ goto drop; + if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI) +- return -EINVAL; ++ goto drop; + } else { + if (fip->state != FIP_ST_ENABLED) + return 0; +diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c +index 97aeaddd600d..e3ffd244603e 100644 +--- a/drivers/scsi/libfc/fc_rport.c ++++ b/drivers/scsi/libfc/fc_rport.c +@@ -1935,6 +1935,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) + FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", + fc_rport_state(rdata)); + ++ rdata->flags &= ~FC_RP_STARTED; + fc_rport_enter_delete(rdata, RPORT_EV_STOP); + mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c +index c2b682916337..cc8f2a7c2463 100644 +--- a/drivers/scsi/libiscsi.c ++++ b/drivers/scsi/libiscsi.c +@@ -283,11 +283,11 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) + */ + if (opcode != ISCSI_OP_SCSI_DATA_OUT) { + iscsi_conn_printk(KERN_INFO, conn, +- "task [op %x/%x itt " ++ "task [op %x itt " + "0x%x/0x%x] " + "rejected.\n", +- task->hdr->opcode, opcode, +- task->itt, task->hdr_itt); ++ opcode, task->itt, ++ task->hdr_itt); + return -EACCES; + } + /* +@@ -296,10 +296,10 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) + */ + if (conn->session->fast_abort) { + iscsi_conn_printk(KERN_INFO, conn, +- "task [op %x/%x itt " ++ "task [op %x itt " + "0x%x/0x%x] fast abort.\n", +- task->hdr->opcode, opcode, +- task->itt, task->hdr_itt); ++ opcode, task->itt, ++ task->hdr_itt); + return -EACCES; + } + break; +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c +index 3a6f557ec128..56b65b85b121 100644 +--- a/drivers/scsi/scsi_sysfs.c ++++ b/drivers/scsi/scsi_sysfs.c +@@ -709,8 +709,24 @@ static ssize_t + sdev_store_delete(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) + { +- if (device_remove_file_self(dev, attr)) +- scsi_remove_device(to_scsi_device(dev)); ++ struct kernfs_node *kn; ++ ++ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr); ++ WARN_ON_ONCE(!kn); ++ /* ++ * Concurrent writes into the "delete" sysfs attribute may trigger ++ * concurrent calls to device_remove_file() and scsi_remove_device(). ++ * device_remove_file() handles concurrent removal calls by ++ * serializing these and by ignoring the second and later removal ++ * attempts. Concurrent calls of scsi_remove_device() are ++ * serialized. The second and later calls of scsi_remove_device() are ++ * ignored because the first call of that function changes the device ++ * state into SDEV_DEL. ++ */ ++ device_remove_file(dev, attr); ++ scsi_remove_device(to_scsi_device(dev)); ++ if (kn) ++ sysfs_unbreak_active_protection(kn); + return count; + }; + static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete); +diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c +index 15ca09cd16f3..874e9f085326 100644 +--- a/drivers/scsi/vmw_pvscsi.c ++++ b/drivers/scsi/vmw_pvscsi.c +@@ -564,9 +564,14 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter, + (btstat == BTSTAT_SUCCESS || + btstat == BTSTAT_LINKED_COMMAND_COMPLETED || + btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) { +- cmd->result = (DID_OK << 16) | sdstat; +- if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer) +- cmd->result |= (DRIVER_SENSE << 24); ++ if (sdstat == SAM_STAT_COMMAND_TERMINATED) { ++ cmd->result = (DID_RESET << 16); ++ } else { ++ cmd->result = (DID_OK << 16) | sdstat; ++ if (sdstat == SAM_STAT_CHECK_CONDITION && ++ cmd->sense_buffer) ++ cmd->result |= (DRIVER_SENSE << 24); ++ } + } else + switch (btstat) { + case BTSTAT_SUCCESS: +diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c +index 2b700e8455c6..e3596855a703 100644 +--- a/drivers/staging/android/ion/ion-ioctl.c ++++ b/drivers/staging/android/ion/ion-ioctl.c +@@ -128,11 +128,15 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + { + struct ion_handle *handle; + +- handle = ion_handle_get_by_id(client, data.handle.handle); +- if (IS_ERR(handle)) ++ mutex_lock(&client->lock); ++ handle = ion_handle_get_by_id_nolock(client, data.handle.handle); ++ if (IS_ERR(handle)) { ++ mutex_unlock(&client->lock); + return PTR_ERR(handle); +- data.fd.fd = ion_share_dma_buf_fd(client, handle); +- ion_handle_put(handle); ++ } ++ data.fd.fd = ion_share_dma_buf_fd_nolock(client, handle); ++ ion_handle_put_nolock(handle); ++ mutex_unlock(&client->lock); + if (data.fd.fd < 0) + ret = data.fd.fd; + break; +diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c +index 6f9974cb0e15..806e9b30b9dc 100644 +--- a/drivers/staging/android/ion/ion.c ++++ b/drivers/staging/android/ion/ion.c +@@ -15,6 +15,7 @@ + * + */ + ++#include + #include + #include + #include +@@ -305,6 +306,16 @@ static void ion_handle_get(struct ion_handle *handle) + kref_get(&handle->ref); + } + ++/* Must hold the client lock */ ++static struct ion_handle *ion_handle_get_check_overflow( ++ struct ion_handle *handle) ++{ ++ if (atomic_read(&handle->ref.refcount) + 1 == 0) ++ return ERR_PTR(-EOVERFLOW); ++ ion_handle_get(handle); ++ return handle; ++} ++ + int ion_handle_put_nolock(struct ion_handle *handle) + { + return kref_put(&handle->ref, ion_handle_destroy); +@@ -347,21 +358,9 @@ struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client, + + handle = idr_find(&client->idr, id); + if (handle) +- ion_handle_get(handle); +- +- return handle ? handle : ERR_PTR(-EINVAL); +-} +- +-struct ion_handle *ion_handle_get_by_id(struct ion_client *client, +- int id) +-{ +- struct ion_handle *handle; ++ return ion_handle_get_check_overflow(handle); + +- mutex_lock(&client->lock); +- handle = ion_handle_get_by_id_nolock(client, id); +- mutex_unlock(&client->lock); +- +- return handle; ++ return ERR_PTR(-EINVAL); + } + + static bool ion_handle_validate(struct ion_client *client, +@@ -1029,24 +1028,28 @@ static struct dma_buf_ops dma_buf_ops = { + .kunmap = ion_dma_buf_kunmap, + }; + +-struct dma_buf *ion_share_dma_buf(struct ion_client *client, +- struct ion_handle *handle) ++static struct dma_buf *__ion_share_dma_buf(struct ion_client *client, ++ struct ion_handle *handle, ++ bool lock_client) + { + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + struct ion_buffer *buffer; + struct dma_buf *dmabuf; + bool valid_handle; + +- mutex_lock(&client->lock); ++ if (lock_client) ++ mutex_lock(&client->lock); + valid_handle = ion_handle_validate(client, handle); + if (!valid_handle) { + WARN(1, "%s: invalid handle passed to share.\n", __func__); +- mutex_unlock(&client->lock); ++ if (lock_client) ++ mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); + } + buffer = handle->buffer; + ion_buffer_get(buffer); +- mutex_unlock(&client->lock); ++ if (lock_client) ++ mutex_unlock(&client->lock); + + exp_info.ops = &dma_buf_ops; + exp_info.size = buffer->size; +@@ -1061,14 +1064,21 @@ struct dma_buf *ion_share_dma_buf(struct ion_client *client, + + return dmabuf; + } ++ ++struct dma_buf *ion_share_dma_buf(struct ion_client *client, ++ struct ion_handle *handle) ++{ ++ return __ion_share_dma_buf(client, handle, true); ++} + EXPORT_SYMBOL(ion_share_dma_buf); + +-int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) ++static int __ion_share_dma_buf_fd(struct ion_client *client, ++ struct ion_handle *handle, bool lock_client) + { + struct dma_buf *dmabuf; + int fd; + +- dmabuf = ion_share_dma_buf(client, handle); ++ dmabuf = __ion_share_dma_buf(client, handle, lock_client); + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + +@@ -1078,8 +1088,19 @@ int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) + + return fd; + } ++ ++int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) ++{ ++ return __ion_share_dma_buf_fd(client, handle, true); ++} + EXPORT_SYMBOL(ion_share_dma_buf_fd); + ++int ion_share_dma_buf_fd_nolock(struct ion_client *client, ++ struct ion_handle *handle) ++{ ++ return __ion_share_dma_buf_fd(client, handle, false); ++} ++ + struct ion_handle *ion_import_dma_buf(struct ion_client *client, + struct dma_buf *dmabuf) + { +@@ -1100,7 +1121,7 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, + /* if a handle exists for this buffer just take a reference to it */ + handle = ion_handle_lookup(client, buffer); + if (!IS_ERR(handle)) { +- ion_handle_get(handle); ++ handle = ion_handle_get_check_overflow(handle); + mutex_unlock(&client->lock); + goto end; + } +diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h +index 3c3b3245275d..760e41885448 100644 +--- a/drivers/staging/android/ion/ion_priv.h ++++ b/drivers/staging/android/ion/ion_priv.h +@@ -463,11 +463,11 @@ void ion_free_nolock(struct ion_client *client, struct ion_handle *handle); + + int ion_handle_put_nolock(struct ion_handle *handle); + +-struct ion_handle *ion_handle_get_by_id(struct ion_client *client, +- int id); +- + int ion_handle_put(struct ion_handle *handle); + + int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query); + ++int ion_share_dma_buf_fd_nolock(struct ion_client *client, ++ struct ion_handle *handle); ++ + #endif /* _ION_PRIV_H */ +diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c +index c16927ac8eb0..395c7a2244ff 100644 +--- a/drivers/staging/media/omap4iss/iss_video.c ++++ b/drivers/staging/media/omap4iss/iss_video.c +@@ -11,7 +11,6 @@ + * (at your option) any later version. + */ + +-#include + #include + #include + #include +@@ -24,6 +23,8 @@ + #include + #include + ++#include ++ + #include "iss_video.h" + #include "iss.h" + +diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c +index 9ccd5da8f204..d2f82aaf6a85 100644 +--- a/drivers/target/iscsi/iscsi_target_login.c ++++ b/drivers/target/iscsi/iscsi_target_login.c +@@ -333,8 +333,7 @@ static int iscsi_login_zero_tsih_s1( + pr_err("idr_alloc() for sess_idr failed\n"); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); +- kfree(sess); +- return -ENOMEM; ++ goto free_sess; + } + + sess->creation_time = get_jiffies_64(); +@@ -350,20 +349,28 @@ static int iscsi_login_zero_tsih_s1( + ISCSI_LOGIN_STATUS_NO_RESOURCES); + pr_err("Unable to allocate memory for" + " struct iscsi_sess_ops.\n"); +- kfree(sess); +- return -ENOMEM; ++ goto remove_idr; + } + + sess->se_sess = transport_init_session(TARGET_PROT_NORMAL); + if (IS_ERR(sess->se_sess)) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); +- kfree(sess->sess_ops); +- kfree(sess); +- return -ENOMEM; ++ goto free_ops; + } + + return 0; ++ ++free_ops: ++ kfree(sess->sess_ops); ++remove_idr: ++ spin_lock_bh(&sess_idr_lock); ++ idr_remove(&sess_idr, sess->session_index); ++ spin_unlock_bh(&sess_idr_lock); ++free_sess: ++ kfree(sess); ++ conn->sess = NULL; ++ return -ENOMEM; + } + + static int iscsi_login_zero_tsih_s2( +@@ -1152,13 +1159,13 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn, + ISCSI_LOGIN_STATUS_INIT_ERR); + if (!zero_tsih || !conn->sess) + goto old_sess_out; +- if (conn->sess->se_sess) +- transport_free_session(conn->sess->se_sess); +- if (conn->sess->session_index != 0) { +- spin_lock_bh(&sess_idr_lock); +- idr_remove(&sess_idr, conn->sess->session_index); +- spin_unlock_bh(&sess_idr_lock); +- } ++ ++ transport_free_session(conn->sess->se_sess); ++ ++ spin_lock_bh(&sess_idr_lock); ++ idr_remove(&sess_idr, conn->sess->session_index); ++ spin_unlock_bh(&sess_idr_lock); ++ + kfree(conn->sess->sess_ops); + kfree(conn->sess); + conn->sess = NULL; +diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c +index 5474b5187be0..f4bd08cfac11 100644 +--- a/drivers/usb/gadget/function/f_uac2.c ++++ b/drivers/usb/gadget/function/f_uac2.c +@@ -929,14 +929,14 @@ static struct usb_descriptor_header *hs_audio_desc[] = { + }; + + struct cntrl_cur_lay3 { +- __u32 dCUR; ++ __le32 dCUR; + }; + + struct cntrl_range_lay3 { +- __u16 wNumSubRanges; +- __u32 dMIN; +- __u32 dMAX; +- __u32 dRES; ++ __le16 wNumSubRanges; ++ __le32 dMIN; ++ __le32 dMAX; ++ __le32 dRES; + } __packed; + + static inline void +@@ -1285,9 +1285,9 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr) + memset(&c, 0, sizeof(struct cntrl_cur_lay3)); + + if (entity_id == USB_IN_CLK_ID) +- c.dCUR = p_srate; ++ c.dCUR = cpu_to_le32(p_srate); + else if (entity_id == USB_OUT_CLK_ID) +- c.dCUR = c_srate; ++ c.dCUR = cpu_to_le32(c_srate); + + value = min_t(unsigned, w_length, sizeof c); + memcpy(req->buf, &c, value); +@@ -1325,15 +1325,15 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr) + + if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) { + if (entity_id == USB_IN_CLK_ID) +- r.dMIN = p_srate; ++ r.dMIN = cpu_to_le32(p_srate); + else if (entity_id == USB_OUT_CLK_ID) +- r.dMIN = c_srate; ++ r.dMIN = cpu_to_le32(c_srate); + else + return -EOPNOTSUPP; + + r.dMAX = r.dMIN; + r.dRES = 0; +- r.wNumSubRanges = 1; ++ r.wNumSubRanges = cpu_to_le16(1); + + value = min_t(unsigned, w_length, sizeof r); + memcpy(req->buf, &r, value); +diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c +index f2c8862093a2..230e3248f386 100644 +--- a/drivers/usb/gadget/udc/r8a66597-udc.c ++++ b/drivers/usb/gadget/udc/r8a66597-udc.c +@@ -835,11 +835,11 @@ static void init_controller(struct r8a66597 *r8a66597) + + r8a66597_bset(r8a66597, XCKE, SYSCFG0); + +- msleep(3); ++ mdelay(3); + + r8a66597_bset(r8a66597, PLLC, SYSCFG0); + +- msleep(1); ++ mdelay(1); + + r8a66597_bset(r8a66597, SCKE, SYSCFG0); + +@@ -1193,7 +1193,7 @@ __acquires(r8a66597->lock) + r8a66597->ep0_req->length = 2; + /* AV: what happens if we get called again before that gets through? */ + spin_unlock(&r8a66597->lock); +- r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL); ++ r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC); + spin_lock(&r8a66597->lock); + } + +diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c +index 94eb2923afed..85d031ce85c1 100644 +--- a/drivers/usb/phy/phy-fsl-usb.c ++++ b/drivers/usb/phy/phy-fsl-usb.c +@@ -879,6 +879,7 @@ int usb_otg_start(struct platform_device *pdev) + if (pdata->init && pdata->init(pdev) != 0) + return -EINVAL; + ++#ifdef CONFIG_PPC32 + if (pdata->big_endian_mmio) { + _fsl_readl = _fsl_readl_be; + _fsl_writel = _fsl_writel_be; +@@ -886,6 +887,7 @@ int usb_otg_start(struct platform_device *pdev) + _fsl_readl = _fsl_readl_le; + _fsl_writel = _fsl_writel_le; + } ++#endif + + /* request irq */ + p_otg->irq = platform_get_irq(pdev, 0); +@@ -976,7 +978,7 @@ int usb_otg_start(struct platform_device *pdev) + /* + * state file in sysfs + */ +-static int show_fsl_usb2_otg_state(struct device *dev, ++static ssize_t show_fsl_usb2_otg_state(struct device *dev, + struct device_attribute *attr, char *buf) + { + struct otg_fsm *fsm = &fsl_otg_dev->fsm; +diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c +index 41df8a27d7eb..2026885702a2 100644 +--- a/fs/cachefiles/namei.c ++++ b/fs/cachefiles/namei.c +@@ -195,7 +195,6 @@ wait_for_old_object: + pr_err("\n"); + pr_err("Error: Unexpected object collision\n"); + cachefiles_printk_object(object, xobject); +- BUG(); + } + atomic_inc(&xobject->usage); + write_unlock(&cache->active_lock); +diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c +index afbdc418966d..5e3bc9de7a16 100644 +--- a/fs/cachefiles/rdwr.c ++++ b/fs/cachefiles/rdwr.c +@@ -27,6 +27,7 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode, + struct cachefiles_one_read *monitor = + container_of(wait, struct cachefiles_one_read, monitor); + struct cachefiles_object *object; ++ struct fscache_retrieval *op = monitor->op; + struct wait_bit_key *key = _key; + struct page *page = wait->private; + +@@ -51,16 +52,22 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode, + list_del(&wait->task_list); + + /* move onto the action list and queue for FS-Cache thread pool */ +- ASSERT(monitor->op); ++ ASSERT(op); + +- object = container_of(monitor->op->op.object, +- struct cachefiles_object, fscache); ++ /* We need to temporarily bump the usage count as we don't own a ref ++ * here otherwise cachefiles_read_copier() may free the op between the ++ * monitor being enqueued on the op->to_do list and the op getting ++ * enqueued on the work queue. ++ */ ++ fscache_get_retrieval(op); + ++ object = container_of(op->op.object, struct cachefiles_object, fscache); + spin_lock(&object->work_lock); +- list_add_tail(&monitor->op_link, &monitor->op->to_do); ++ list_add_tail(&monitor->op_link, &op->to_do); + spin_unlock(&object->work_lock); + +- fscache_enqueue_retrieval(monitor->op); ++ fscache_enqueue_retrieval(op); ++ fscache_put_retrieval(op); + return 0; + } + +diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c +index 3d03e48a9213..ad8bd96093f7 100644 +--- a/fs/cifs/cifs_debug.c ++++ b/fs/cifs/cifs_debug.c +@@ -123,25 +123,41 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) + seq_printf(m, "CIFS Version %s\n", CIFS_VERSION); + seq_printf(m, "Features:"); + #ifdef CONFIG_CIFS_DFS_UPCALL +- seq_printf(m, " dfs"); ++ seq_printf(m, " DFS"); + #endif + #ifdef CONFIG_CIFS_FSCACHE +- seq_printf(m, " fscache"); ++ seq_printf(m, ",FSCACHE"); ++#endif ++#ifdef CONFIG_CIFS_SMB_DIRECT ++ seq_printf(m, ",SMB_DIRECT"); ++#endif ++#ifdef CONFIG_CIFS_STATS2 ++ seq_printf(m, ",STATS2"); ++#elif defined(CONFIG_CIFS_STATS) ++ seq_printf(m, ",STATS"); ++#endif ++#ifdef CONFIG_CIFS_DEBUG2 ++ seq_printf(m, ",DEBUG2"); ++#elif defined(CONFIG_CIFS_DEBUG) ++ seq_printf(m, ",DEBUG"); ++#endif ++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY ++ seq_printf(m, ",ALLOW_INSECURE_LEGACY"); + #endif + #ifdef CONFIG_CIFS_WEAK_PW_HASH +- seq_printf(m, " lanman"); ++ seq_printf(m, ",WEAK_PW_HASH"); + #endif + #ifdef CONFIG_CIFS_POSIX +- seq_printf(m, " posix"); ++ seq_printf(m, ",CIFS_POSIX"); + #endif + #ifdef CONFIG_CIFS_UPCALL +- seq_printf(m, " spnego"); ++ seq_printf(m, ",UPCALL(SPNEGO)"); + #endif + #ifdef CONFIG_CIFS_XATTR +- seq_printf(m, " xattr"); ++ seq_printf(m, ",XATTR"); + #endif + #ifdef CONFIG_CIFS_ACL +- seq_printf(m, " acl"); ++ seq_printf(m, ",ACL"); + #endif + seq_putc(m, '\n'); + seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid); +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c +index 24c19eb94fa3..a012f70bba5c 100644 +--- a/fs/cifs/inode.c ++++ b/fs/cifs/inode.c +@@ -1116,6 +1116,8 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, unsigned int xid, + if (!server->ops->set_file_info) + return -ENOSYS; + ++ info_buf.Pad = 0; ++ + if (attrs->ia_valid & ATTR_ATIME) { + set_time = true; + info_buf.LastAccessTime = +diff --git a/fs/cifs/link.c b/fs/cifs/link.c +index d031af8d3d4d..38d26cbcad07 100644 +--- a/fs/cifs/link.c ++++ b/fs/cifs/link.c +@@ -419,7 +419,7 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, + struct cifs_io_parms io_parms; + int buf_type = CIFS_NO_BUFFER; + __le16 *utf16_path; +- __u8 oplock = SMB2_OPLOCK_LEVEL_II; ++ __u8 oplock = SMB2_OPLOCK_LEVEL_NONE; + struct smb2_file_all_info *pfile_info = NULL; + + oparms.tcon = tcon; +@@ -481,7 +481,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, + struct cifs_io_parms io_parms; + int create_options = CREATE_NOT_DIR; + __le16 *utf16_path; +- __u8 oplock = SMB2_OPLOCK_LEVEL_EXCLUSIVE; ++ __u8 oplock = SMB2_OPLOCK_LEVEL_NONE; + struct kvec iov[2]; + + if (backup_cred(cifs_sb)) +diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c +index c3db2a882aee..bb208076cb71 100644 +--- a/fs/cifs/sess.c ++++ b/fs/cifs/sess.c +@@ -398,6 +398,12 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer, + goto setup_ntlmv2_ret; + } + *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL); ++ if (!*pbuffer) { ++ rc = -ENOMEM; ++ cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc); ++ *buflen = 0; ++ goto setup_ntlmv2_ret; ++ } + sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer; + + memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8); +diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c +index 1238cd3552f9..0267d8cbc996 100644 +--- a/fs/cifs/smb2inode.c ++++ b/fs/cifs/smb2inode.c +@@ -267,7 +267,7 @@ smb2_set_file_info(struct inode *inode, const char *full_path, + int rc; + + if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) && +- (buf->LastWriteTime == 0) && (buf->ChangeTime) && ++ (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) && + (buf->Attributes == 0)) + return 0; /* would be a no op, no sense sending this */ + +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c +index 812e4884c392..68622f1e706b 100644 +--- a/fs/cifs/smb2ops.c ++++ b/fs/cifs/smb2ops.c +@@ -894,6 +894,13 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon, + + } + ++/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */ ++#define GMT_TOKEN_SIZE 50 ++ ++/* ++ * Input buffer contains (empty) struct smb_snapshot array with size filled in ++ * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2 ++ */ + static int + smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon, + struct cifsFileInfo *cfile, void __user *ioc_buf) +@@ -922,14 +929,27 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon, + kfree(retbuf); + return rc; + } +- if (snapshot_in.snapshot_array_size < sizeof(struct smb_snapshot_array)) { +- rc = -ERANGE; +- kfree(retbuf); +- return rc; +- } + +- if (ret_data_len > snapshot_in.snapshot_array_size) +- ret_data_len = snapshot_in.snapshot_array_size; ++ /* ++ * Check for min size, ie not large enough to fit even one GMT ++ * token (snapshot). On the first ioctl some users may pass in ++ * smaller size (or zero) to simply get the size of the array ++ * so the user space caller can allocate sufficient memory ++ * and retry the ioctl again with larger array size sufficient ++ * to hold all of the snapshot GMT tokens on the second try. ++ */ ++ if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE) ++ ret_data_len = sizeof(struct smb_snapshot_array); ++ ++ /* ++ * We return struct SRV_SNAPSHOT_ARRAY, followed by ++ * the snapshot array (of 50 byte GMT tokens) each ++ * representing an available previous version of the data ++ */ ++ if (ret_data_len > (snapshot_in.snapshot_array_size + ++ sizeof(struct smb_snapshot_array))) ++ ret_data_len = snapshot_in.snapshot_array_size + ++ sizeof(struct smb_snapshot_array); + + if (copy_to_user(ioc_buf, retbuf, ret_data_len)) + rc = -EFAULT; +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c +index 248c43b63f13..a225a21d04ad 100644 +--- a/fs/ext4/namei.c ++++ b/fs/ext4/namei.c +@@ -1415,6 +1415,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir, + goto cleanup_and_exit; + dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, " + "falling back\n")); ++ ret = NULL; + } + nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); + if (!nblocks) { +diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c +index 5dc655e410b4..54942d60e72a 100644 +--- a/fs/ext4/sysfs.c ++++ b/fs/ext4/sysfs.c +@@ -277,8 +277,12 @@ static ssize_t ext4_attr_show(struct kobject *kobj, + case attr_pointer_ui: + if (!ptr) + return 0; +- return snprintf(buf, PAGE_SIZE, "%u\n", +- *((unsigned int *) ptr)); ++ if (a->attr_ptr == ptr_ext4_super_block_offset) ++ return snprintf(buf, PAGE_SIZE, "%u\n", ++ le32_to_cpup(ptr)); ++ else ++ return snprintf(buf, PAGE_SIZE, "%u\n", ++ *((unsigned int *) ptr)); + case attr_pointer_atomic: + if (!ptr) + return 0; +@@ -311,7 +315,10 @@ static ssize_t ext4_attr_store(struct kobject *kobj, + ret = kstrtoul(skip_spaces(buf), 0, &t); + if (ret) + return ret; +- *((unsigned int *) ptr) = t; ++ if (a->attr_ptr == ptr_ext4_super_block_offset) ++ *((__le32 *) ptr) = cpu_to_le32(t); ++ else ++ *((unsigned int *) ptr) = t; + return len; + case attr_inode_readahead: + return inode_readahead_blks_store(a, sbi, buf, len); +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c +index 3fadfabcac39..fdcbe0f2814f 100644 +--- a/fs/ext4/xattr.c ++++ b/fs/ext4/xattr.c +@@ -184,6 +184,8 @@ ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end, + struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e); + if ((void *)next >= end) + return -EFSCORRUPTED; ++ if (strnlen(e->e_name, e->e_name_len) != e->e_name_len) ++ return -EFSCORRUPTED; + e = next; + } + +diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c +index de67745e1cd7..77946d6f617d 100644 +--- a/fs/fscache/operation.c ++++ b/fs/fscache/operation.c +@@ -66,7 +66,8 @@ void fscache_enqueue_operation(struct fscache_operation *op) + ASSERT(op->processor != NULL); + ASSERT(fscache_object_is_available(op->object)); + ASSERTCMP(atomic_read(&op->usage), >, 0); +- ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS); ++ ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS, ++ op->state, ==, FSCACHE_OP_ST_CANCELLED); + + fscache_stat(&fscache_n_op_enqueue); + switch (op->flags & FSCACHE_OP_TYPE) { +@@ -481,7 +482,8 @@ void fscache_put_operation(struct fscache_operation *op) + struct fscache_cache *cache; + + _enter("{OBJ%x OP%x,%d}", +- op->object->debug_id, op->debug_id, atomic_read(&op->usage)); ++ op->object ? op->object->debug_id : 0, ++ op->debug_id, atomic_read(&op->usage)); + + ASSERTCMP(atomic_read(&op->usage), >, 0); + +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c +index f11792672977..c94bab6103f5 100644 +--- a/fs/fuse/dev.c ++++ b/fs/fuse/dev.c +@@ -130,6 +130,16 @@ static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) + return !fc->initialized || (for_background && fc->blocked); + } + ++static void fuse_drop_waiting(struct fuse_conn *fc) ++{ ++ if (fc->connected) { ++ atomic_dec(&fc->num_waiting); ++ } else if (atomic_dec_and_test(&fc->num_waiting)) { ++ /* wake up aborters */ ++ wake_up_all(&fc->blocked_waitq); ++ } ++} ++ + static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, + bool for_background) + { +@@ -170,7 +180,7 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, + return req; + + out: +- atomic_dec(&fc->num_waiting); ++ fuse_drop_waiting(fc); + return ERR_PTR(err); + } + +@@ -277,7 +287,7 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) + + if (test_bit(FR_WAITING, &req->flags)) { + __clear_bit(FR_WAITING, &req->flags); +- atomic_dec(&fc->num_waiting); ++ fuse_drop_waiting(fc); + } + + if (req->stolen_file) +@@ -363,7 +373,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) + struct fuse_iqueue *fiq = &fc->iq; + + if (test_and_set_bit(FR_FINISHED, &req->flags)) +- return; ++ goto put_request; + + spin_lock(&fiq->waitq.lock); + list_del_init(&req->intr_entry); +@@ -393,6 +403,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) + wake_up(&req->waitq); + if (req->end) + req->end(fc, req); ++put_request: + fuse_put_request(fc, req); + } + +@@ -1935,11 +1946,14 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, + if (!fud) + return -EPERM; + ++ pipe_lock(pipe); ++ + bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL); +- if (!bufs) ++ if (!bufs) { ++ pipe_unlock(pipe); + return -ENOMEM; ++ } + +- pipe_lock(pipe); + nbuf = 0; + rem = 0; + for (idx = 0; idx < pipe->nrbufs && rem < len; idx++) +@@ -2094,6 +2108,7 @@ void fuse_abort_conn(struct fuse_conn *fc) + set_bit(FR_ABORTED, &req->flags); + if (!test_bit(FR_LOCKED, &req->flags)) { + set_bit(FR_PRIVATE, &req->flags); ++ __fuse_get_request(req); + list_move(&req->list, &to_end1); + } + spin_unlock(&req->waitq.lock); +@@ -2120,7 +2135,6 @@ void fuse_abort_conn(struct fuse_conn *fc) + + while (!list_empty(&to_end1)) { + req = list_first_entry(&to_end1, struct fuse_req, list); +- __fuse_get_request(req); + list_del_init(&req->list); + request_end(fc, req); + } +@@ -2131,6 +2145,11 @@ void fuse_abort_conn(struct fuse_conn *fc) + } + EXPORT_SYMBOL_GPL(fuse_abort_conn); + ++void fuse_wait_aborted(struct fuse_conn *fc) ++{ ++ wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0); ++} ++ + int fuse_dev_release(struct inode *inode, struct file *file) + { + struct fuse_dev *fud = fuse_get_dev(file); +@@ -2138,9 +2157,15 @@ int fuse_dev_release(struct inode *inode, struct file *file) + if (fud) { + struct fuse_conn *fc = fud->fc; + struct fuse_pqueue *fpq = &fud->pq; ++ LIST_HEAD(to_end); + ++ spin_lock(&fpq->lock); + WARN_ON(!list_empty(&fpq->io)); +- end_requests(fc, &fpq->processing); ++ list_splice_init(&fpq->processing, &to_end); ++ spin_unlock(&fpq->lock); ++ ++ end_requests(fc, &to_end); ++ + /* Are we the last open device? */ + if (atomic_dec_and_test(&fc->dev_count)) { + WARN_ON(fc->iq.fasync != NULL); +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c +index cca8dd3bda09..60dd2bc10776 100644 +--- a/fs/fuse/dir.c ++++ b/fs/fuse/dir.c +@@ -355,11 +355,12 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, + struct inode *inode; + struct dentry *newent; + bool outarg_valid = true; ++ bool locked; + +- fuse_lock_inode(dir); ++ locked = fuse_lock_inode(dir); + err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name, + &outarg, &inode); +- fuse_unlock_inode(dir); ++ fuse_unlock_inode(dir, locked); + if (err == -ENOENT) { + outarg_valid = false; + err = 0; +@@ -1336,6 +1337,7 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx) + struct fuse_conn *fc = get_fuse_conn(inode); + struct fuse_req *req; + u64 attr_version = 0; ++ bool locked; + + if (is_bad_inode(inode)) + return -EIO; +@@ -1363,9 +1365,9 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx) + fuse_read_fill(req, file, ctx->pos, PAGE_SIZE, + FUSE_READDIR); + } +- fuse_lock_inode(inode); ++ locked = fuse_lock_inode(inode); + fuse_request_send(fc, req); +- fuse_unlock_inode(inode); ++ fuse_unlock_inode(inode, locked); + nbytes = req->out.args[0].size; + err = req->out.h.error; + fuse_put_request(fc, req); +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index 996aa23c409e..4408abf6675b 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -868,6 +868,7 @@ static int fuse_readpages_fill(void *_data, struct page *page) + } + + if (WARN_ON(req->num_pages >= req->max_pages)) { ++ unlock_page(page); + fuse_put_request(fc, req); + return -EIO; + } +diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h +index 91307940c8ac..1c905c7666de 100644 +--- a/fs/fuse/fuse_i.h ++++ b/fs/fuse/fuse_i.h +@@ -854,6 +854,7 @@ void fuse_request_send_background_locked(struct fuse_conn *fc, + + /* Abort all requests */ + void fuse_abort_conn(struct fuse_conn *fc); ++void fuse_wait_aborted(struct fuse_conn *fc); + + /** + * Invalidate inode attributes +@@ -967,8 +968,8 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr, + + void fuse_set_initialized(struct fuse_conn *fc); + +-void fuse_unlock_inode(struct inode *inode); +-void fuse_lock_inode(struct inode *inode); ++void fuse_unlock_inode(struct inode *inode, bool locked); ++bool fuse_lock_inode(struct inode *inode); + + int fuse_setxattr(struct inode *inode, const char *name, const void *value, + size_t size, int flags); +diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c +index f95e1d49b048..7a9b1069d267 100644 +--- a/fs/fuse/inode.c ++++ b/fs/fuse/inode.c +@@ -356,15 +356,21 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, + return 0; + } + +-void fuse_lock_inode(struct inode *inode) ++bool fuse_lock_inode(struct inode *inode) + { +- if (!get_fuse_conn(inode)->parallel_dirops) ++ bool locked = false; ++ ++ if (!get_fuse_conn(inode)->parallel_dirops) { + mutex_lock(&get_fuse_inode(inode)->mutex); ++ locked = true; ++ } ++ ++ return locked; + } + +-void fuse_unlock_inode(struct inode *inode) ++void fuse_unlock_inode(struct inode *inode, bool locked) + { +- if (!get_fuse_conn(inode)->parallel_dirops) ++ if (locked) + mutex_unlock(&get_fuse_inode(inode)->mutex); + } + +@@ -396,9 +402,6 @@ static void fuse_put_super(struct super_block *sb) + { + struct fuse_conn *fc = get_fuse_conn_super(sb); + +- fuse_send_destroy(fc); +- +- fuse_abort_conn(fc); + mutex_lock(&fuse_mutex); + list_del(&fc->entry); + fuse_ctl_remove_conn(fc); +@@ -1198,16 +1201,25 @@ static struct dentry *fuse_mount(struct file_system_type *fs_type, + return mount_nodev(fs_type, flags, raw_data, fuse_fill_super); + } + +-static void fuse_kill_sb_anon(struct super_block *sb) ++static void fuse_sb_destroy(struct super_block *sb) + { + struct fuse_conn *fc = get_fuse_conn_super(sb); + + if (fc) { ++ fuse_send_destroy(fc); ++ ++ fuse_abort_conn(fc); ++ fuse_wait_aborted(fc); ++ + down_write(&fc->killsb); + fc->sb = NULL; + up_write(&fc->killsb); + } ++} + ++static void fuse_kill_sb_anon(struct super_block *sb) ++{ ++ fuse_sb_destroy(sb); + kill_anon_super(sb); + } + +@@ -1230,14 +1242,7 @@ static struct dentry *fuse_mount_blk(struct file_system_type *fs_type, + + static void fuse_kill_sb_blk(struct super_block *sb) + { +- struct fuse_conn *fc = get_fuse_conn_super(sb); +- +- if (fc) { +- down_write(&fc->killsb); +- fc->sb = NULL; +- up_write(&fc->killsb); +- } +- ++ fuse_sb_destroy(sb); + kill_block_super(sb); + } + +diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c +index fcff2e0487fe..f1c1430ae721 100644 +--- a/fs/squashfs/file.c ++++ b/fs/squashfs/file.c +@@ -374,13 +374,29 @@ static int read_blocklist(struct inode *inode, int index, u64 *block) + return squashfs_block_size(size); + } + ++void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail) ++{ ++ int copied; ++ void *pageaddr; ++ ++ pageaddr = kmap_atomic(page); ++ copied = squashfs_copy_data(pageaddr, buffer, offset, avail); ++ memset(pageaddr + copied, 0, PAGE_SIZE - copied); ++ kunmap_atomic(pageaddr); ++ ++ flush_dcache_page(page); ++ if (copied == avail) ++ SetPageUptodate(page); ++ else ++ SetPageError(page); ++} ++ + /* Copy data into page cache */ + void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, + int bytes, int offset) + { + struct inode *inode = page->mapping->host; + struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; +- void *pageaddr; + int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; + int start_index = page->index & ~mask, end_index = start_index | mask; + +@@ -406,12 +422,7 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, + if (PageUptodate(push_page)) + goto skip_page; + +- pageaddr = kmap_atomic(push_page); +- squashfs_copy_data(pageaddr, buffer, offset, avail); +- memset(pageaddr + avail, 0, PAGE_SIZE - avail); +- kunmap_atomic(pageaddr); +- flush_dcache_page(push_page); +- SetPageUptodate(push_page); ++ squashfs_fill_page(push_page, buffer, offset, avail); + skip_page: + unlock_page(push_page); + if (i != page->index) +@@ -420,10 +431,9 @@ skip_page: + } + + /* Read datablock stored packed inside a fragment (tail-end packed block) */ +-static int squashfs_readpage_fragment(struct page *page) ++static int squashfs_readpage_fragment(struct page *page, int expected) + { + struct inode *inode = page->mapping->host; +- struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; + struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, + squashfs_i(inode)->fragment_block, + squashfs_i(inode)->fragment_size); +@@ -434,23 +444,16 @@ static int squashfs_readpage_fragment(struct page *page) + squashfs_i(inode)->fragment_block, + squashfs_i(inode)->fragment_size); + else +- squashfs_copy_cache(page, buffer, i_size_read(inode) & +- (msblk->block_size - 1), ++ squashfs_copy_cache(page, buffer, expected, + squashfs_i(inode)->fragment_offset); + + squashfs_cache_put(buffer); + return res; + } + +-static int squashfs_readpage_sparse(struct page *page, int index, int file_end) ++static int squashfs_readpage_sparse(struct page *page, int expected) + { +- struct inode *inode = page->mapping->host; +- struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; +- int bytes = index == file_end ? +- (i_size_read(inode) & (msblk->block_size - 1)) : +- msblk->block_size; +- +- squashfs_copy_cache(page, NULL, bytes, 0); ++ squashfs_copy_cache(page, NULL, expected, 0); + return 0; + } + +@@ -460,6 +463,9 @@ static int squashfs_readpage(struct file *file, struct page *page) + struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; + int index = page->index >> (msblk->block_log - PAGE_SHIFT); + int file_end = i_size_read(inode) >> msblk->block_log; ++ int expected = index == file_end ? ++ (i_size_read(inode) & (msblk->block_size - 1)) : ++ msblk->block_size; + int res; + void *pageaddr; + +@@ -478,11 +484,11 @@ static int squashfs_readpage(struct file *file, struct page *page) + goto error_out; + + if (bsize == 0) +- res = squashfs_readpage_sparse(page, index, file_end); ++ res = squashfs_readpage_sparse(page, expected); + else +- res = squashfs_readpage_block(page, block, bsize); ++ res = squashfs_readpage_block(page, block, bsize, expected); + } else +- res = squashfs_readpage_fragment(page); ++ res = squashfs_readpage_fragment(page, expected); + + if (!res) + return 0; +diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c +index f2310d2a2019..a9ba8d96776a 100644 +--- a/fs/squashfs/file_cache.c ++++ b/fs/squashfs/file_cache.c +@@ -20,7 +20,7 @@ + #include "squashfs.h" + + /* Read separately compressed datablock and memcopy into page cache */ +-int squashfs_readpage_block(struct page *page, u64 block, int bsize) ++int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected) + { + struct inode *i = page->mapping->host; + struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, +@@ -31,7 +31,7 @@ int squashfs_readpage_block(struct page *page, u64 block, int bsize) + ERROR("Unable to read page, block %llx, size %x\n", block, + bsize); + else +- squashfs_copy_cache(page, buffer, buffer->length, 0); ++ squashfs_copy_cache(page, buffer, expected, 0); + + squashfs_cache_put(buffer); + return res; +diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c +index cb485d8e0e91..80db1b86a27c 100644 +--- a/fs/squashfs/file_direct.c ++++ b/fs/squashfs/file_direct.c +@@ -21,10 +21,11 @@ + #include "page_actor.h" + + static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, +- int pages, struct page **page); ++ int pages, struct page **page, int bytes); + + /* Read separately compressed datablock directly into page cache */ +-int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) ++int squashfs_readpage_block(struct page *target_page, u64 block, int bsize, ++ int expected) + + { + struct inode *inode = target_page->mapping->host; +@@ -83,7 +84,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) + * using an intermediate buffer. + */ + res = squashfs_read_cache(target_page, block, bsize, pages, +- page); ++ page, expected); + if (res < 0) + goto mark_errored; + +@@ -95,6 +96,11 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) + if (res < 0) + goto mark_errored; + ++ if (res != expected) { ++ res = -EIO; ++ goto mark_errored; ++ } ++ + /* Last page may have trailing bytes not filled */ + bytes = res % PAGE_SIZE; + if (bytes) { +@@ -138,13 +144,12 @@ out: + + + static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, +- int pages, struct page **page) ++ int pages, struct page **page, int bytes) + { + struct inode *i = target_page->mapping->host; + struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, + block, bsize); +- int bytes = buffer->length, res = buffer->error, n, offset = 0; +- void *pageaddr; ++ int res = buffer->error, n, offset = 0; + + if (res) { + ERROR("Unable to read page, block %llx, size %x\n", block, +@@ -159,12 +164,7 @@ static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, + if (page[n] == NULL) + continue; + +- pageaddr = kmap_atomic(page[n]); +- squashfs_copy_data(pageaddr, buffer, offset, avail); +- memset(pageaddr + avail, 0, PAGE_SIZE - avail); +- kunmap_atomic(pageaddr); +- flush_dcache_page(page[n]); +- SetPageUptodate(page[n]); ++ squashfs_fill_page(page[n], buffer, offset, avail); + unlock_page(page[n]); + if (page[n] != target_page) + put_page(page[n]); +diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h +index 887d6d270080..f89f8a74c6ce 100644 +--- a/fs/squashfs/squashfs.h ++++ b/fs/squashfs/squashfs.h +@@ -67,11 +67,12 @@ extern __le64 *squashfs_read_fragment_index_table(struct super_block *, + u64, u64, unsigned int); + + /* file.c */ ++void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int); + void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int, + int); + + /* file_xxx.c */ +-extern int squashfs_readpage_block(struct page *, u64, int); ++extern int squashfs_readpage_block(struct page *, u64, int, int); + + /* id.c */ + extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *); +diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c +index 39c75a86c67f..666986b95c5d 100644 +--- a/fs/sysfs/file.c ++++ b/fs/sysfs/file.c +@@ -407,6 +407,50 @@ int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr, + } + EXPORT_SYMBOL_GPL(sysfs_chmod_file); + ++/** ++ * sysfs_break_active_protection - break "active" protection ++ * @kobj: The kernel object @attr is associated with. ++ * @attr: The attribute to break the "active" protection for. ++ * ++ * With sysfs, just like kernfs, deletion of an attribute is postponed until ++ * all active .show() and .store() callbacks have finished unless this function ++ * is called. Hence this function is useful in methods that implement self ++ * deletion. ++ */ ++struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, ++ const struct attribute *attr) ++{ ++ struct kernfs_node *kn; ++ ++ kobject_get(kobj); ++ kn = kernfs_find_and_get(kobj->sd, attr->name); ++ if (kn) ++ kernfs_break_active_protection(kn); ++ return kn; ++} ++EXPORT_SYMBOL_GPL(sysfs_break_active_protection); ++ ++/** ++ * sysfs_unbreak_active_protection - restore "active" protection ++ * @kn: Pointer returned by sysfs_break_active_protection(). ++ * ++ * Undo the effects of sysfs_break_active_protection(). Since this function ++ * calls kernfs_put() on the kernfs node that corresponds to the 'attr' ++ * argument passed to sysfs_break_active_protection() that attribute may have ++ * been removed between the sysfs_break_active_protection() and ++ * sysfs_unbreak_active_protection() calls, it is not safe to access @kn after ++ * this function has returned. ++ */ ++void sysfs_unbreak_active_protection(struct kernfs_node *kn) ++{ ++ struct kobject *kobj = kn->parent->priv; ++ ++ kernfs_unbreak_active_protection(kn); ++ kernfs_put(kn); ++ kobject_put(kobj); ++} ++EXPORT_SYMBOL_GPL(sysfs_unbreak_active_protection); ++ + /** + * sysfs_remove_file_ns - remove an object attribute with a custom ns tag + * @kobj: object we're acting for +diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h +index 00a1f330f93a..d3c19f8c4564 100644 +--- a/include/linux/sysfs.h ++++ b/include/linux/sysfs.h +@@ -238,6 +238,9 @@ int __must_check sysfs_create_files(struct kobject *kobj, + const struct attribute **attr); + int __must_check sysfs_chmod_file(struct kobject *kobj, + const struct attribute *attr, umode_t mode); ++struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, ++ const struct attribute *attr); ++void sysfs_unbreak_active_protection(struct kernfs_node *kn); + void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, + const void *ns); + bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr); +@@ -351,6 +354,17 @@ static inline int sysfs_chmod_file(struct kobject *kobj, + return 0; + } + ++static inline struct kernfs_node * ++sysfs_break_active_protection(struct kobject *kobj, ++ const struct attribute *attr) ++{ ++ return NULL; ++} ++ ++static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn) ++{ ++} ++ + static inline void sysfs_remove_file_ns(struct kobject *kobj, + const struct attribute *attr, + const void *ns) +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index 69485183af79..b9e966bcdd20 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -2441,7 +2441,7 @@ static int __init debugfs_kprobe_init(void) + if (!dir) + return -ENOMEM; + +- file = debugfs_create_file("list", 0444, dir, NULL, ++ file = debugfs_create_file("list", 0400, dir, NULL, + &debugfs_kprobes_operations); + if (!file) + goto error; +@@ -2451,7 +2451,7 @@ static int __init debugfs_kprobe_init(void) + if (!file) + goto error; + +- file = debugfs_create_file("blacklist", 0444, dir, NULL, ++ file = debugfs_create_file("blacklist", 0400, dir, NULL, + &debugfs_kprobe_blacklist_ops); + if (!file) + goto error; +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index 24d603d29512..7df6be31be36 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -345,7 +345,8 @@ static struct ctl_table kern_table[] = { + .data = &sysctl_sched_time_avg, + .maxlen = sizeof(unsigned int), + .mode = 0644, +- .proc_handler = proc_dointvec, ++ .proc_handler = proc_dointvec_minmax, ++ .extra1 = &one, + }, + { + .procname = "sched_shares_window_ns", +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index 349f4a8e3c4f..86a6b331b964 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -4072,6 +4072,14 @@ static struct cftype mem_cgroup_legacy_files[] = { + + static DEFINE_IDR(mem_cgroup_idr); + ++static void mem_cgroup_id_remove(struct mem_cgroup *memcg) ++{ ++ if (memcg->id.id > 0) { ++ idr_remove(&mem_cgroup_idr, memcg->id.id); ++ memcg->id.id = 0; ++ } ++} ++ + static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) + { + VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0); +@@ -4082,8 +4090,7 @@ static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) + { + VM_BUG_ON(atomic_read(&memcg->id.ref) < n); + if (atomic_sub_and_test(n, &memcg->id.ref)) { +- idr_remove(&mem_cgroup_idr, memcg->id.id); +- memcg->id.id = 0; ++ mem_cgroup_id_remove(memcg); + + /* Memcg ID pins CSS */ + css_put(&memcg->css); +@@ -4208,8 +4215,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void) + idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); + return memcg; + fail: +- if (memcg->id.id > 0) +- idr_remove(&mem_cgroup_idr, memcg->id.id); ++ mem_cgroup_id_remove(memcg); + __mem_cgroup_free(memcg); + return NULL; + } +@@ -4268,6 +4274,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) + + return &memcg->css; + fail: ++ mem_cgroup_id_remove(memcg); + mem_cgroup_free(memcg); + return ERR_PTR(-ENOMEM); + } +diff --git a/mm/memory.c b/mm/memory.c +index 88f8d6a2af05..0ff735601654 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -3861,6 +3861,9 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, + return -EINVAL; + + maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); ++ if (!maddr) ++ return -ENOMEM; ++ + if (write) + memcpy_toio(maddr + offset, buf, len); + else +diff --git a/mm/zswap.c b/mm/zswap.c +index ded051e3433d..c2b5435fe617 100644 +--- a/mm/zswap.c ++++ b/mm/zswap.c +@@ -1018,6 +1018,15 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, + ret = -ENOMEM; + goto reject; + } ++ ++ /* A second zswap_is_full() check after ++ * zswap_shrink() to make sure it's now ++ * under the max_pool_percent ++ */ ++ if (zswap_is_full()) { ++ ret = -ENOMEM; ++ goto reject; ++ } + } + + /* allocate entry */ +diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c +index d730a0f68f46..a0443d40d677 100644 +--- a/net/caif/caif_dev.c ++++ b/net/caif/caif_dev.c +@@ -131,8 +131,10 @@ static void caif_flow_cb(struct sk_buff *skb) + caifd = caif_get(skb->dev); + + WARN_ON(caifd == NULL); +- if (caifd == NULL) ++ if (!caifd) { ++ rcu_read_unlock(); + return; ++ } + + caifd_hold(caifd); + rcu_read_unlock(); +diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c +index 972353cd1778..65a15889d432 100644 +--- a/net/ipv4/cipso_ipv4.c ++++ b/net/ipv4/cipso_ipv4.c +@@ -1523,9 +1523,17 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb) + int taglen; + + for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) { +- if (optptr[0] == IPOPT_CIPSO) ++ switch (optptr[0]) { ++ case IPOPT_CIPSO: + return optptr; +- taglen = optptr[1]; ++ case IPOPT_END: ++ return NULL; ++ case IPOPT_NOOP: ++ taglen = 1; ++ break; ++ default: ++ taglen = optptr[1]; ++ } + optlen -= taglen; + optptr += taglen; + } +diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c +index beae93fd66d5..a5aeeb613fac 100644 +--- a/net/ipv6/ip6_vti.c ++++ b/net/ipv6/ip6_vti.c +@@ -480,10 +480,6 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) + goto tx_err_dst_release; + } + +- skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); +- skb_dst_set(skb, dst); +- skb->dev = skb_dst(skb)->dev; +- + mtu = dst_mtu(dst); + if (!skb->ignore_df && skb->len > mtu) { + skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu); +@@ -498,9 +494,14 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) + htonl(mtu)); + } + +- return -EMSGSIZE; ++ err = -EMSGSIZE; ++ goto tx_err_dst_release; + } + ++ skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); ++ skb_dst_set(skb, dst); ++ skb->dev = skb_dst(skb)->dev; ++ + err = dst_output(t->net, skb->sk, skb); + if (net_xmit_eval(err) == 0) { + struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); +diff --git a/net/mac80211/util.c b/net/mac80211/util.c +index a2756096b94a..ca7de02e0a6e 100644 +--- a/net/mac80211/util.c ++++ b/net/mac80211/util.c +@@ -2061,7 +2061,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) + if (!sta->uploaded) + continue; + +- if (sta->sdata->vif.type != NL80211_IFTYPE_AP) ++ if (sta->sdata->vif.type != NL80211_IFTYPE_AP && ++ sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN) + continue; + + for (state = IEEE80211_STA_NOTEXIST; +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c +index 5b75468b5acd..146d83785b37 100644 +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -4058,6 +4058,7 @@ static int parse_station_flags(struct genl_info *info, + params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) | + BIT(NL80211_STA_FLAG_MFP) | + BIT(NL80211_STA_FLAG_AUTHORIZED); ++ break; + default: + return -EINVAL; + } +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 5b8fa6832687..1f943d97dc29 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -2354,6 +2354,9 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, + if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) + return make_blackhole(net, dst_orig->ops->family, dst_orig); + ++ if (IS_ERR(dst)) ++ dst_release(dst_orig); ++ + return dst; + } + EXPORT_SYMBOL(xfrm_lookup_route); +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c +index bb61956c0f9c..6e768093d7c8 100644 +--- a/net/xfrm/xfrm_user.c ++++ b/net/xfrm/xfrm_user.c +@@ -984,10 +984,12 @@ static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb, + { + struct sock *nlsk = rcu_dereference(net->xfrm.nlsk); + +- if (nlsk) +- return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); +- else +- return -1; ++ if (!nlsk) { ++ kfree_skb(skb); ++ return -EPIPE; ++ } ++ ++ return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); + } + + static inline size_t xfrm_spdinfo_msgsize(void) +diff --git a/sound/soc/sirf/sirf-usp.c b/sound/soc/sirf/sirf-usp.c +index 45fc06c0e0e5..6b504f407079 100644 +--- a/sound/soc/sirf/sirf-usp.c ++++ b/sound/soc/sirf/sirf-usp.c +@@ -367,10 +367,9 @@ static int sirf_usp_pcm_probe(struct platform_device *pdev) + platform_set_drvdata(pdev, usp); + + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +- base = devm_ioremap(&pdev->dev, mem_res->start, +- resource_size(mem_res)); +- if (base == NULL) +- return -ENOMEM; ++ base = devm_ioremap_resource(&pdev->dev, mem_res); ++ if (IS_ERR(base)) ++ return PTR_ERR(base); + usp->regmap = devm_regmap_init_mmio(&pdev->dev, base, + &sirf_usp_regmap_config); + if (IS_ERR(usp->regmap)) +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c +index 20680a490897..b111ecda6439 100644 +--- a/sound/soc/soc-pcm.c ++++ b/sound/soc/soc-pcm.c +@@ -1621,6 +1621,14 @@ static u64 dpcm_runtime_base_format(struct snd_pcm_substream *substream) + int i; + + for (i = 0; i < be->num_codecs; i++) { ++ /* ++ * Skip CODECs which don't support the current stream ++ * type. See soc_pcm_init_runtime_hw() for more details ++ */ ++ if (!snd_soc_dai_stream_valid(be->codec_dais[i], ++ stream)) ++ continue; ++ + codec_dai_drv = be->codec_dais[i]->driver; + if (stream == SNDRV_PCM_STREAM_PLAYBACK) + codec_stream = &codec_dai_drv->playback; +diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c +index 9664b1ff4285..5ec2de8f49b4 100644 +--- a/tools/power/x86/turbostat/turbostat.c ++++ b/tools/power/x86/turbostat/turbostat.c +@@ -733,9 +733,7 @@ void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_ + if (!printed || !summary_only) + print_header(); + +- if (topo.num_cpus > 1) +- format_counters(&average.threads, &average.cores, +- &average.packages); ++ format_counters(&average.threads, &average.cores, &average.packages); + + printed = 1; + +@@ -3202,7 +3200,9 @@ void process_cpuid() + family = (fms >> 8) & 0xf; + model = (fms >> 4) & 0xf; + stepping = fms & 0xf; +- if (family == 6 || family == 0xf) ++ if (family == 0xf) ++ family += (fms >> 20) & 0xff; ++ if (family >= 6) + model += ((fms >> 16) & 0xf) << 4; + + if (debug) { +diff --git a/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc +new file mode 100644 +index 000000000000..3b1f45e13a2e +--- /dev/null ++++ b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc +@@ -0,0 +1,28 @@ ++#!/bin/sh ++# description: Snapshot and tracing setting ++# flags: instance ++ ++[ ! -f snapshot ] && exit_unsupported ++ ++echo "Set tracing off" ++echo 0 > tracing_on ++ ++echo "Allocate and take a snapshot" ++echo 1 > snapshot ++ ++# Since trace buffer is empty, snapshot is also empty, but allocated ++grep -q "Snapshot is allocated" snapshot ++ ++echo "Ensure keep tracing off" ++test `cat tracing_on` -eq 0 ++ ++echo "Set tracing on" ++echo 1 > tracing_on ++ ++echo "Take a snapshot again" ++echo 1 > snapshot ++ ++echo "Ensure keep tracing on" ++test `cat tracing_on` -eq 1 ++ ++exit 0 +diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c +index 88d5e71be044..47dfa0b0fcd7 100644 +--- a/tools/usb/ffs-test.c ++++ b/tools/usb/ffs-test.c +@@ -44,12 +44,25 @@ + + /******************** Little Endian Handling ********************************/ + +-#define cpu_to_le16(x) htole16(x) +-#define cpu_to_le32(x) htole32(x) ++/* ++ * cpu_to_le16/32 are used when initializing structures, a context where a ++ * function call is not allowed. To solve this, we code cpu_to_le16/32 in a way ++ * that allows them to be used when initializing structures. ++ */ ++ ++#if __BYTE_ORDER == __LITTLE_ENDIAN ++#define cpu_to_le16(x) (x) ++#define cpu_to_le32(x) (x) ++#else ++#define cpu_to_le16(x) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)) ++#define cpu_to_le32(x) \ ++ ((((x) & 0xff000000u) >> 24) | (((x) & 0x00ff0000u) >> 8) | \ ++ (((x) & 0x0000ff00u) << 8) | (((x) & 0x000000ffu) << 24)) ++#endif ++ + #define le32_to_cpu(x) le32toh(x) + #define le16_to_cpu(x) le16toh(x) + +- + /******************** Messages and Errors ***********************************/ + + static const char argv0[] = "ffs-test"; diff --git a/patches/a_arch_x86_include_asm_irqflags.h.patch b/patches/a_arch_x86_include_asm_irqflags.h.patch index a4949dc..d9f1026 100644 --- a/patches/a_arch_x86_include_asm_irqflags.h.patch +++ b/patches/a_arch_x86_include_asm_irqflags.h.patch @@ -13,7 +13,7 @@ index ac7692d..90e119c 100644 { unsigned long flags; -@@ -27,23 +27,29 @@ static inline unsigned long native_save_fl(void) +@@ -29,24 +29,30 @@ extern inline unsigned long native_save_fl(void) : /* no input */ : "memory"); @@ -23,7 +23,8 @@ index ac7692d..90e119c 100644 return flags; } - static inline void native_restore_fl(unsigned long flags) + extern inline void native_restore_fl(unsigned long flags); + extern inline void native_restore_fl(unsigned long flags) { +#if !defined(CONFIG_GRKERNSEC_CONFIG_VIRT_HOST) || !defined(CONFIG_GRKERNSEC_CONFIG_VIRT_VIRTUALBOX) + BUG_ON(flags & X86_EFLAGS_AC); diff --git a/patches/a_fs_fscache_operation.c.patch b/patches/a_fs_fscache_operation.c.patch index 31163f0..6d2debb 100644 --- a/patches/a_fs_fscache_operation.c.patch +++ b/patches/a_fs_fscache_operation.c.patch @@ -26,9 +26,9 @@ index de67745..6a3a9b6 100644 } EXPORT_SYMBOL(fscache_operation_init); -@@ -68,7 +68,7 @@ void fscache_enqueue_operation(struct fscache_operation *op) - ASSERTCMP(atomic_read(&op->usage), >, 0); - ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS); +@@ -69,7 +69,7 @@ void fscache_enqueue_operation(struct fscache_operation *op) + ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS, + op->state, ==, FSCACHE_OP_ST_CANCELLED); - fscache_stat(&fscache_n_op_enqueue); + fscache_stat_unchecked(&fscache_n_op_enqueue); diff --git a/test_patch.sh b/test_patch.sh index e8fd37b..62c9ac1 100755 --- a/test_patch.sh +++ b/test_patch.sh @@ -1,6 +1,6 @@ DATE=`date +%Y-%m-%d` KERNEL_MAJOR_VERSION=4.9 -KERNEL_VERSION=4.9.124 +KERNEL_VERSION=4.9.125 echo "Removing old kernels..." rm -rf test