diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index 3c6fc2e08d0449..eeb3fc9d777b81 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt @@ -58,6 +58,7 @@ stable kernels. | ARM | Cortex-A72 | #853709 | N/A | | ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 | | ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 | +| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 | | ARM | MMU-500 | #841119,#826419 | N/A | | | | | | | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | diff --git a/Documentation/conf.py b/Documentation/conf.py index b691af4831fadc..22c1a6d96f9eaa 100644 --- a/Documentation/conf.py +++ b/Documentation/conf.py @@ -37,7 +37,7 @@ extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include', 'cdomain', 'kfigure', 'sphinx.ext.ifconfig'] # The name of the math extension changed on Sphinx 1.4 -if major == 1 and minor > 3: +if (major == 1 and minor > 3) or (major > 1): extensions.append("sphinx.ext.imgmath") else: extensions.append("sphinx.ext.pngmath") diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting index 7b7b845c490a4b..32b5186be41264 100644 --- a/Documentation/filesystems/porting +++ b/Documentation/filesystems/porting @@ -622,3 +622,8 @@ in your dentry operations instead. alloc_file_clone(file, flags, ops) does not affect any caller's references. On success you get a new struct file sharing the mount/dentry with the original, on failure - ERR_PTR(). +-- +[mandatory] + DCACHE_RCUACCESS is gone; having an RCU delay on dentry freeing is the + default. DCACHE_NORCU opts out, and only d_alloc_pseudo() has any + business doing so. diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 2c31208528d565..7eb9366422f54b 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -250,6 +250,14 @@ tcp_base_mss - INTEGER Path MTU discovery (MTU probing). If MTU probing is enabled, this is the initial MSS used by the connection. +tcp_min_snd_mss - INTEGER + TCP SYN and SYNACK messages usually advertise an ADVMSS option, + as described in RFC 1122 and RFC 6691. + If this ADVMSS option is smaller than tcp_min_snd_mss, + it is silently capped to tcp_min_snd_mss. + + Default : 48 (at least 8 bytes of payload per segment) + tcp_congestion_control - STRING Set the congestion control algorithm to be used for new connections. The algorithm "reno" is always available, but diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py index 9d0a7f08f93bfc..1159405cb920ce 100644 --- a/Documentation/sphinx/kerneldoc.py +++ b/Documentation/sphinx/kerneldoc.py @@ -37,7 +37,19 @@ from docutils import nodes, statemachine from docutils.statemachine import ViewList from docutils.parsers.rst import directives, Directive -from sphinx.ext.autodoc import AutodocReporter + +# +# AutodocReporter is only good up to Sphinx 1.7 +# +import sphinx + +Use_SSI = sphinx.__version__[:3] >= '1.7' +if Use_SSI: + from sphinx.util.docutils import switch_source_input +else: + from sphinx.ext.autodoc import AutodocReporter + +import kernellog __version__ = '1.0' @@ -90,7 +102,8 @@ def run(self): cmd += [filename] try: - env.app.verbose('calling kernel-doc \'%s\'' % (" ".join(cmd))) + kernellog.verbose(env.app, + 'calling kernel-doc \'%s\'' % (" ".join(cmd))) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() @@ -100,7 +113,8 @@ def run(self): if p.returncode != 0: sys.stderr.write(err) - env.app.warn('kernel-doc \'%s\' failed with return code %d' % (" ".join(cmd), p.returncode)) + kernellog.warn(env.app, + 'kernel-doc \'%s\' failed with return code %d' % (" ".join(cmd), p.returncode)) return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))] elif env.config.kerneldoc_verbosity > 0: sys.stderr.write(err) @@ -121,20 +135,28 @@ def run(self): lineoffset += 1 node = nodes.section() - buf = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter + self.do_parse(result, node) + + return node.children + + except Exception as e: # pylint: disable=W0703 + kernellog.warn(env.app, 'kernel-doc \'%s\' processing failed with: %s' % + (" ".join(cmd), str(e))) + return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))] + + def do_parse(self, result, node): + if Use_SSI: + with switch_source_input(self.state, result): + self.state.nested_parse(result, 0, node, match_titles=1) + else: + save = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter self.state.memo.reporter = AutodocReporter(result, self.state.memo.reporter) self.state.memo.title_styles, self.state.memo.section_level = [], 0 try: self.state.nested_parse(result, 0, node, match_titles=1) finally: - self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter = buf + self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter = save - return node.children - - except Exception as e: # pylint: disable=W0703 - env.app.warn('kernel-doc \'%s\' processing failed with: %s' % - (" ".join(cmd), str(e))) - return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))] def setup(app): app.add_config_value('kerneldoc_bin', None, 'env') diff --git a/Documentation/sphinx/kernellog.py b/Documentation/sphinx/kernellog.py new file mode 100644 index 00000000000000..af924f51a7dcc5 --- /dev/null +++ b/Documentation/sphinx/kernellog.py @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Sphinx has deprecated its older logging interface, but the replacement +# only goes back to 1.6. So here's a wrapper layer to keep around for +# as long as we support 1.4. +# +import sphinx + +if sphinx.__version__[:3] >= '1.6': + UseLogging = True + from sphinx.util import logging + logger = logging.getLogger('kerneldoc') +else: + UseLogging = False + +def warn(app, message): + if UseLogging: + logger.warning(message) + else: + app.warn(message) + +def verbose(app, message): + if UseLogging: + logger.verbose(message) + else: + app.verbose(message) + + diff --git a/Documentation/sphinx/kfigure.py b/Documentation/sphinx/kfigure.py index b97228d2cc0eef..fbfe6693bb60aa 100644 --- a/Documentation/sphinx/kfigure.py +++ b/Documentation/sphinx/kfigure.py @@ -60,6 +60,8 @@ from sphinx.util.nodes import clean_astext from six import iteritems +import kernellog + PY3 = sys.version_info[0] == 3 if PY3: @@ -171,20 +173,20 @@ def setupTools(app): This function is called once, when the builder is initiated. """ global dot_cmd, convert_cmd # pylint: disable=W0603 - app.verbose("kfigure: check installed tools ...") + kernellog.verbose(app, "kfigure: check installed tools ...") dot_cmd = which('dot') convert_cmd = which('convert') if dot_cmd: - app.verbose("use dot(1) from: " + dot_cmd) + kernellog.verbose(app, "use dot(1) from: " + dot_cmd) else: - app.warn("dot(1) not found, for better output quality install " - "graphviz from http://www.graphviz.org") + kernellog.warn(app, "dot(1) not found, for better output quality install " + "graphviz from http://www.graphviz.org") if convert_cmd: - app.verbose("use convert(1) from: " + convert_cmd) + kernellog.verbose(app, "use convert(1) from: " + convert_cmd) else: - app.warn( + kernellog.warn(app, "convert(1) not found, for SVG to PDF conversion install " "ImageMagick (https://www.imagemagick.org)") @@ -220,12 +222,13 @@ def convert_image(img_node, translator, src_fname=None): # in kernel builds, use 'make SPHINXOPTS=-v' to see verbose messages - app.verbose('assert best format for: ' + img_node['uri']) + kernellog.verbose(app, 'assert best format for: ' + img_node['uri']) if in_ext == '.dot': if not dot_cmd: - app.verbose("dot from graphviz not available / include DOT raw.") + kernellog.verbose(app, + "dot from graphviz not available / include DOT raw.") img_node.replace_self(file2literal(src_fname)) elif translator.builder.format == 'latex': @@ -252,7 +255,8 @@ def convert_image(img_node, translator, src_fname=None): if translator.builder.format == 'latex': if convert_cmd is None: - app.verbose("no SVG to PDF conversion available / include SVG raw.") + kernellog.verbose(app, + "no SVG to PDF conversion available / include SVG raw.") img_node.replace_self(file2literal(src_fname)) else: dst_fname = path.join(translator.builder.outdir, fname + '.pdf') @@ -265,18 +269,19 @@ def convert_image(img_node, translator, src_fname=None): _name = dst_fname[len(translator.builder.outdir) + 1:] if isNewer(dst_fname, src_fname): - app.verbose("convert: {out}/%s already exists and is newer" % _name) + kernellog.verbose(app, + "convert: {out}/%s already exists and is newer" % _name) else: ok = False mkdir(path.dirname(dst_fname)) if in_ext == '.dot': - app.verbose('convert DOT to: {out}/' + _name) + kernellog.verbose(app, 'convert DOT to: {out}/' + _name) ok = dot2format(app, src_fname, dst_fname) elif in_ext == '.svg': - app.verbose('convert SVG to: {out}/' + _name) + kernellog.verbose(app, 'convert SVG to: {out}/' + _name) ok = svg2pdf(app, src_fname, dst_fname) if not ok: @@ -305,7 +310,8 @@ def dot2format(app, dot_fname, out_fname): with open(out_fname, "w") as out: exit_code = subprocess.call(cmd, stdout = out) if exit_code != 0: - app.warn("Error #%d when calling: %s" % (exit_code, " ".join(cmd))) + kernellog.warn(app, + "Error #%d when calling: %s" % (exit_code, " ".join(cmd))) return bool(exit_code == 0) def svg2pdf(app, svg_fname, pdf_fname): @@ -322,7 +328,7 @@ def svg2pdf(app, svg_fname, pdf_fname): # use stdout and stderr from parent exit_code = subprocess.call(cmd) if exit_code != 0: - app.warn("Error #%d when calling: %s" % (exit_code, " ".join(cmd))) + kernellog.warn(app, "Error #%d when calling: %s" % (exit_code, " ".join(cmd))) return bool(exit_code == 0) @@ -415,15 +421,15 @@ def visit_kernel_render(self, node): app = self.builder.app srclang = node.get('srclang') - app.verbose('visit kernel-render node lang: "%s"' % (srclang)) + kernellog.verbose(app, 'visit kernel-render node lang: "%s"' % (srclang)) tmp_ext = RENDER_MARKUP_EXT.get(srclang, None) if tmp_ext is None: - app.warn('kernel-render: "%s" unknown / include raw.' % (srclang)) + kernellog.warn(app, 'kernel-render: "%s" unknown / include raw.' % (srclang)) return if not dot_cmd and tmp_ext == '.dot': - app.verbose("dot from graphviz not available / include raw.") + kernellog.verbose(app, "dot from graphviz not available / include raw.") return literal_block = node[0] diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index 9ecde517728c31..2793d4eac55fd7 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -92,6 +92,14 @@ Values : 0 - disable JIT kallsyms export (default value) 1 - enable JIT kallsyms export for privileged users only +bpf_jit_limit +------------- + +This enforces a global limit for memory allocations to the BPF JIT +compiler in order to reject unprivileged JIT requests once it has +been surpassed. bpf_jit_limit contains the value of the global limit +in bytes. + dev_weight -------------- diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst index 534e9baa4e1d19..5d4330be200f98 100644 --- a/Documentation/x86/mds.rst +++ b/Documentation/x86/mds.rst @@ -142,45 +142,13 @@ Mitigation points mds_user_clear. The mitigation is invoked in prepare_exit_to_usermode() which covers - most of the kernel to user space transitions. There are a few exceptions - which are not invoking prepare_exit_to_usermode() on return to user - space. These exceptions use the paranoid exit code. + all but one of the kernel to user space transitions. The exception + is when we return from a Non Maskable Interrupt (NMI), which is + handled directly in do_nmi(). - - Non Maskable Interrupt (NMI): - - Access to sensible data like keys, credentials in the NMI context is - mostly theoretical: The CPU can do prefetching or execute a - misspeculated code path and thereby fetching data which might end up - leaking through a buffer. - - But for mounting other attacks the kernel stack address of the task is - already valuable information. So in full mitigation mode, the NMI is - mitigated on the return from do_nmi() to provide almost complete - coverage. - - - Double fault (#DF): - - A double fault is usually fatal, but the ESPFIX workaround, which can - be triggered from user space through modify_ldt(2) is a recoverable - double fault. #DF uses the paranoid exit path, so explicit mitigation - in the double fault handler is required. - - - Machine Check Exception (#MC): - - Another corner case is a #MC which hits between the CPU buffer clear - invocation and the actual return to user. As this still is in kernel - space it takes the paranoid exit path which does not clear the CPU - buffers. So the #MC handler repopulates the buffers to some - extent. Machine checks are not reliably controllable and the window is - extremly small so mitigation would just tick a checkbox that this - theoretical corner case is covered. To keep the amount of special - cases small, ignore #MC. - - - Debug Exception (#DB): - - This takes the paranoid exit path only when the INT1 breakpoint is in - kernel space. #DB on a user space address takes the regular exit path, - so no extra mitigation required. + (The reason that NMI is special is that prepare_exit_to_usermode() can + enable IRQs. In NMI context, NMIs are blocked, and we don't want to + enable IRQs with NMIs blocked.) 2. C-State transition diff --git a/Makefile b/Makefile index 024ae3f9545016..a76c61f77bcd38 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 44 -EXTRAVERSION = -linux4sam-6.1-rc2 +SUBLEVEL = 56 +EXTRAVERSION = NAME = "People's Front" # *DOCUMENTATION* @@ -508,13 +508,6 @@ export RETPOLINE_VDSO_CFLAGS KBUILD_CFLAGS += $(call cc-option,-fno-PIE) KBUILD_AFLAGS += $(call cc-option,-fno-PIE) -# check for 'asm goto' -ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) - CC_HAVE_ASM_GOTO := 1 - KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO - KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO -endif - # The expansion should be delayed until arch/$(SRCARCH)/Makefile is included. # Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile. # CC_VERSION_TEXT is referenced from Kconfig (so it needs export), @@ -623,7 +616,7 @@ ifeq ($(may-sync-config),1) # Read in dependencies to all Kconfig* files, make sure to run syncconfig if # changes are detected. This should be included after arch/$(SRCARCH)/Makefile # because some architectures define CROSS_COMPILE there. --include include/config/auto.conf.cmd +include include/config/auto.conf.cmd # To avoid any implicit rule to kick in, define an empty command $(KCONFIG_CONFIG): ; @@ -659,6 +652,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) +KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,) @@ -703,7 +697,6 @@ ifeq ($(cc-name),clang) KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,) KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) -KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) # Quiet clang warning: comparison of unsigned expression < 0 is always false KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare) # CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the diff --git a/arch/Kconfig b/arch/Kconfig index 6801123932a503..a336548487e698 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -71,6 +71,7 @@ config KPROBES config JUMP_LABEL bool "Optimize very unlikely/likely branches" depends on HAVE_ARCH_JUMP_LABEL + depends on CC_HAS_ASM_GOTO help This option enables a transparent branch optimization that makes certain almost-always-true or almost-always-false branch diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts index ef149f59929ae3..d131c54acd3ec0 100644 --- a/arch/arc/boot/dts/hsdk.dts +++ b/arch/arc/boot/dts/hsdk.dts @@ -175,6 +175,7 @@ interrupt-names = "macirq"; phy-mode = "rgmii"; snps,pbl = <32>; + snps,multicast-filter-bins = <256>; clocks = <&gmacclk>; clock-names = "stmmaceth"; phy-handle = <&phy0>; @@ -183,6 +184,9 @@ mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */ dma-coherent; + tx-fifo-depth = <4096>; + rx-fifo-depth = <4096>; + mdio { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h index d819de1c5d10ed..3ea4112c8302a8 100644 --- a/arch/arc/include/asm/cmpxchg.h +++ b/arch/arc/include/asm/cmpxchg.h @@ -92,8 +92,11 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) #endif /* CONFIG_ARC_HAS_LLSC */ -#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \ - (unsigned long)(o), (unsigned long)(n))) +#define cmpxchg(ptr, o, n) ({ \ + (typeof(*(ptr)))__cmpxchg((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n)); \ +}) /* * atomic_cmpxchg is same as cmpxchg @@ -198,8 +201,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, return __xchg_bad_pointer(); } -#define xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \ - sizeof(*(ptr)))) +#define xchg(ptr, with) ({ \ + (typeof(*(ptr)))__xchg((unsigned long)(with), \ + (ptr), \ + sizeof(*(ptr))); \ +}) #endif /* CONFIG_ARC_PLAT_EZNPS */ diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index 4097764fea2349..fa18c00b0cfd7d 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -911,9 +911,11 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, struct pt_regs *regs) { struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; - unsigned int pd0[mmu->ways]; unsigned long flags; - int set; + int set, n_ways = mmu->ways; + + n_ways = min(n_ways, 4); + BUG_ON(mmu->ways > 4); local_irq_save(flags); @@ -921,9 +923,10 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, for (set = 0; set < mmu->sets; set++) { int is_valid, way; + unsigned int pd0[4]; /* read out all the ways of current set */ - for (way = 0, is_valid = 0; way < mmu->ways; way++) { + for (way = 0, is_valid = 0; way < n_ways; way++) { write_aux_reg(ARC_REG_TLBINDEX, SET_WAY_TO_IDX(mmu, set, way)); write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead); @@ -937,14 +940,14 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, continue; /* Scan the set for duplicate ways: needs a nested loop */ - for (way = 0; way < mmu->ways - 1; way++) { + for (way = 0; way < n_ways - 1; way++) { int n; if (!pd0[way]) continue; - for (n = way + 1; n < mmu->ways; n++) { + for (n = way + 1; n < n_ways; n++) { if (pd0[way] != pd0[n]) continue; diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi index c9063ffca524c8..3fd9a1676d881a 100644 --- a/arch/arm/boot/dts/am57xx-idk-common.dtsi +++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi @@ -410,6 +410,7 @@ vqmmc-supply = <&ldo1_reg>; bus-width = <4>; cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */ + no-1-8-v; }; &mmc2 { diff --git a/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi b/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi index baba7b00eca797..fdca4818691609 100644 --- a/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi +++ b/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi @@ -22,7 +22,7 @@ * * Datamanual Revisions: * - * DRA76x Silicon Revision 1.0: SPRS993A, Revised July 2017 + * DRA76x Silicon Revision 1.0: SPRS993E, Revised December 2018 * */ @@ -169,25 +169,25 @@ /* Corresponds to MMC2_HS200_MANUAL1 in datamanual */ mmc2_iodelay_hs200_conf: mmc2_iodelay_hs200_conf { pinctrl-pin-array = < - 0x190 A_DELAY_PS(384) G_DELAY_PS(0) /* CFG_GPMC_A19_OEN */ - 0x194 A_DELAY_PS(0) G_DELAY_PS(174) /* CFG_GPMC_A19_OUT */ - 0x1a8 A_DELAY_PS(410) G_DELAY_PS(0) /* CFG_GPMC_A20_OEN */ - 0x1ac A_DELAY_PS(85) G_DELAY_PS(0) /* CFG_GPMC_A20_OUT */ - 0x1b4 A_DELAY_PS(468) G_DELAY_PS(0) /* CFG_GPMC_A21_OEN */ - 0x1b8 A_DELAY_PS(139) G_DELAY_PS(0) /* CFG_GPMC_A21_OUT */ - 0x1c0 A_DELAY_PS(676) G_DELAY_PS(0) /* CFG_GPMC_A22_OEN */ - 0x1c4 A_DELAY_PS(69) G_DELAY_PS(0) /* CFG_GPMC_A22_OUT */ - 0x1d0 A_DELAY_PS(1062) G_DELAY_PS(154) /* CFG_GPMC_A23_OUT */ - 0x1d8 A_DELAY_PS(640) G_DELAY_PS(0) /* CFG_GPMC_A24_OEN */ - 0x1dc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A24_OUT */ - 0x1e4 A_DELAY_PS(356) G_DELAY_PS(0) /* CFG_GPMC_A25_OEN */ - 0x1e8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A25_OUT */ - 0x1f0 A_DELAY_PS(579) G_DELAY_PS(0) /* CFG_GPMC_A26_OEN */ - 0x1f4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A26_OUT */ - 0x1fc A_DELAY_PS(435) G_DELAY_PS(0) /* CFG_GPMC_A27_OEN */ - 0x200 A_DELAY_PS(36) G_DELAY_PS(0) /* CFG_GPMC_A27_OUT */ - 0x364 A_DELAY_PS(759) G_DELAY_PS(0) /* CFG_GPMC_CS1_OEN */ - 0x368 A_DELAY_PS(72) G_DELAY_PS(0) /* CFG_GPMC_CS1_OUT */ + 0x190 A_DELAY_PS(384) G_DELAY_PS(0) /* CFG_GPMC_A19_OEN */ + 0x194 A_DELAY_PS(350) G_DELAY_PS(174) /* CFG_GPMC_A19_OUT */ + 0x1a8 A_DELAY_PS(410) G_DELAY_PS(0) /* CFG_GPMC_A20_OEN */ + 0x1ac A_DELAY_PS(335) G_DELAY_PS(0) /* CFG_GPMC_A20_OUT */ + 0x1b4 A_DELAY_PS(468) G_DELAY_PS(0) /* CFG_GPMC_A21_OEN */ + 0x1b8 A_DELAY_PS(339) G_DELAY_PS(0) /* CFG_GPMC_A21_OUT */ + 0x1c0 A_DELAY_PS(676) G_DELAY_PS(0) /* CFG_GPMC_A22_OEN */ + 0x1c4 A_DELAY_PS(219) G_DELAY_PS(0) /* CFG_GPMC_A22_OUT */ + 0x1d0 A_DELAY_PS(1062) G_DELAY_PS(154) /* CFG_GPMC_A23_OUT */ + 0x1d8 A_DELAY_PS(640) G_DELAY_PS(0) /* CFG_GPMC_A24_OEN */ + 0x1dc A_DELAY_PS(150) G_DELAY_PS(0) /* CFG_GPMC_A24_OUT */ + 0x1e4 A_DELAY_PS(356) G_DELAY_PS(0) /* CFG_GPMC_A25_OEN */ + 0x1e8 A_DELAY_PS(150) G_DELAY_PS(0) /* CFG_GPMC_A25_OUT */ + 0x1f0 A_DELAY_PS(579) G_DELAY_PS(0) /* CFG_GPMC_A26_OEN */ + 0x1f4 A_DELAY_PS(200) G_DELAY_PS(0) /* CFG_GPMC_A26_OUT */ + 0x1fc A_DELAY_PS(435) G_DELAY_PS(0) /* CFG_GPMC_A27_OEN */ + 0x200 A_DELAY_PS(236) G_DELAY_PS(0) /* CFG_GPMC_A27_OUT */ + 0x364 A_DELAY_PS(759) G_DELAY_PS(0) /* CFG_GPMC_CS1_OEN */ + 0x368 A_DELAY_PS(372) G_DELAY_PS(0) /* CFG_GPMC_CS1_OUT */ >; }; diff --git a/arch/arm/boot/dts/exynos5260.dtsi b/arch/arm/boot/dts/exynos5260.dtsi index 55167850619cb0..33a085ffc44718 100644 --- a/arch/arm/boot/dts/exynos5260.dtsi +++ b/arch/arm/boot/dts/exynos5260.dtsi @@ -223,7 +223,7 @@ wakeup-interrupt-controller { compatible = "samsung,exynos4210-wakeup-eint"; interrupt-parent = <&gic>; - interrupts = ; + interrupts = ; }; }; diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts index cdda614e417e74..a370857beac0d3 100644 --- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts +++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts @@ -106,6 +106,7 @@ regulator-name = "PVDD_APIO_1V8"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; }; ldo3_reg: LDO3 { @@ -144,6 +145,7 @@ regulator-name = "PVDD_ABB_1V8"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; }; ldo9_reg: LDO9 { diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi index e84544b220b9e2..b90cea8b73687e 100644 --- a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi +++ b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi @@ -22,7 +22,7 @@ "Headphone Jack", "HPL", "Headphone Jack", "HPR", "Headphone Jack", "MICBIAS", - "IN1", "Headphone Jack", + "IN12", "Headphone Jack", "Speakers", "SPKL", "Speakers", "SPKR"; diff --git a/arch/arm/boot/dts/imx50.dtsi b/arch/arm/boot/dts/imx50.dtsi index 7fae2ffb76fe2e..ab522c2da6df66 100644 --- a/arch/arm/boot/dts/imx50.dtsi +++ b/arch/arm/boot/dts/imx50.dtsi @@ -420,7 +420,7 @@ reg = <0x63fb0000 0x4000>; interrupts = <6>; clocks = <&clks IMX5_CLK_SDMA_GATE>, - <&clks IMX5_CLK_SDMA_GATE>; + <&clks IMX5_CLK_AHB>; clock-names = "ipg", "ahb"; #dma-cells = <3>; fsl,sdma-ram-script-name = "imx/sdma/sdma-imx50.bin"; diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi index 5c4ba91e43ba4b..ef2abc0978439d 100644 --- a/arch/arm/boot/dts/imx51.dtsi +++ b/arch/arm/boot/dts/imx51.dtsi @@ -481,7 +481,7 @@ reg = <0x83fb0000 0x4000>; interrupts = <6>; clocks = <&clks IMX5_CLK_SDMA_GATE>, - <&clks IMX5_CLK_SDMA_GATE>; + <&clks IMX5_CLK_AHB>; clock-names = "ipg", "ahb"; #dma-cells = <3>; fsl,sdma-ram-script-name = "imx/sdma/sdma-imx51.bin"; diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index 6386185ae23403..b6b0818343c4e5 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi @@ -701,7 +701,7 @@ reg = <0x63fb0000 0x4000>; interrupts = <6>; clocks = <&clks IMX5_CLK_SDMA_GATE>, - <&clks IMX5_CLK_SDMA_GATE>; + <&clks IMX5_CLK_AHB>; clock-names = "ipg", "ahb"; #dma-cells = <3>; fsl,sdma-ram-script-name = "imx/sdma/sdma-imx53.bin"; diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index 61d2d26afbf4d9..00d44a60972f7a 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -905,7 +905,7 @@ compatible = "fsl,imx6q-sdma", "fsl,imx35-sdma"; reg = <0x020ec000 0x4000>; interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&clks IMX6QDL_CLK_SDMA>, + clocks = <&clks IMX6QDL_CLK_IPG>, <&clks IMX6QDL_CLK_SDMA>; clock-names = "ipg", "ahb"; #dma-cells = <3>; diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi index 7a4f5dace9026b..2fa88c6f188201 100644 --- a/arch/arm/boot/dts/imx6sl.dtsi +++ b/arch/arm/boot/dts/imx6sl.dtsi @@ -739,7 +739,7 @@ reg = <0x020ec000 0x4000>; interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clks IMX6SL_CLK_SDMA>, - <&clks IMX6SL_CLK_SDMA>; + <&clks IMX6SL_CLK_AHB>; clock-names = "ipg", "ahb"; #dma-cells = <3>; /* imx6sl reuses imx6q sdma firmware */ diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi index 3e6ffaf5f104f0..7c7d5c47578e20 100644 --- a/arch/arm/boot/dts/imx6sll.dtsi +++ b/arch/arm/boot/dts/imx6sll.dtsi @@ -591,7 +591,7 @@ compatible = "fsl,imx6sll-sdma", "fsl,imx35-sdma"; reg = <0x020ec000 0x4000>; interrupts = ; - clocks = <&clks IMX6SLL_CLK_SDMA>, + clocks = <&clks IMX6SLL_CLK_IPG>, <&clks IMX6SLL_CLK_SDMA>; clock-names = "ipg", "ahb"; #dma-cells = <3>; diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index 50083cecc6c970..7b62e6fb47ebe7 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi @@ -803,7 +803,7 @@ compatible = "fsl,imx6sx-sdma", "fsl,imx6q-sdma"; reg = <0x020ec000 0x4000>; interrupts = ; - clocks = <&clks IMX6SX_CLK_SDMA>, + clocks = <&clks IMX6SX_CLK_IPG>, <&clks IMX6SX_CLK_SDMA>; clock-names = "ipg", "ahb"; #dma-cells = <3>; diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi index 6dc0b569acdf4c..2366f093cc76d8 100644 --- a/arch/arm/boot/dts/imx6ul.dtsi +++ b/arch/arm/boot/dts/imx6ul.dtsi @@ -707,7 +707,7 @@ "fsl,imx35-sdma"; reg = <0x020ec000 0x4000>; interrupts = ; - clocks = <&clks IMX6UL_CLK_SDMA>, + clocks = <&clks IMX6UL_CLK_IPG>, <&clks IMX6UL_CLK_SDMA>; clock-names = "ipg", "ahb"; #dma-cells = <3>; diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi index a052198f6e9631..a7f697b0290fff 100644 --- a/arch/arm/boot/dts/imx7s.dtsi +++ b/arch/arm/boot/dts/imx7s.dtsi @@ -1050,8 +1050,8 @@ compatible = "fsl,imx7d-sdma", "fsl,imx35-sdma"; reg = <0x30bd0000 0x10000>; interrupts = ; - clocks = <&clks IMX7D_SDMA_CORE_CLK>, - <&clks IMX7D_AHB_CHANNEL_ROOT_CLK>; + clocks = <&clks IMX7D_IPG_ROOT_CLK>, + <&clks IMX7D_SDMA_CORE_CLK>; clock-names = "ipg", "ahb"; #dma-cells = <3>; fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin"; diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c index 07e31941dc6743..617c2c99ebfb30 100644 --- a/arch/arm/crypto/aes-neonbs-glue.c +++ b/arch/arm/crypto/aes-neonbs-glue.c @@ -278,6 +278,8 @@ static int __xts_crypt(struct skcipher_request *req, int err; err = skcipher_walk_virt(&walk, req, true); + if (err) + return err; crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv); diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h index 07e27f212dc754..d2453e2d3f1f38 100644 --- a/arch/arm/include/asm/cp15.h +++ b/arch/arm/include/asm/cp15.h @@ -68,6 +68,8 @@ #define BPIALL __ACCESS_CP15(c7, 0, c5, 6) #define ICIALLU __ACCESS_CP15(c7, 0, c5, 0) +#define CNTVCT __ACCESS_CP15_64(1, c14) + extern unsigned long cr_alignment; /* defined in entry-armv.S */ static inline unsigned long get_cr(void) diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h index cba23eaa607215..7a88f160b1fbe9 100644 --- a/arch/arm/include/asm/hardirq.h +++ b/arch/arm/include/asm/hardirq.h @@ -6,6 +6,7 @@ #include #include +/* number of IPIS _not_ including IPI_CPU_BACKTRACE */ #define NR_IPI 7 typedef struct { diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c index 90bce3d9928e2f..303b3ab87f7e89 100644 --- a/arch/arm/kernel/jump_label.c +++ b/arch/arm/kernel/jump_label.c @@ -4,8 +4,6 @@ #include #include -#ifdef HAVE_JUMP_LABEL - static void __arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type, bool is_static) @@ -35,5 +33,3 @@ void arch_jump_label_transform_static(struct jump_entry *entry, { __arch_jump_label_transform(entry, type, true); } - -#endif diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index a3ce7c5365fa8e..bada66ef441934 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -76,6 +76,10 @@ enum ipi_msg_type { IPI_CPU_STOP, IPI_IRQ_WORK, IPI_COMPLETION, + /* + * CPU_BACKTRACE is special and not included in NR_IPI + * or tracable with trace_ipi_* + */ IPI_CPU_BACKTRACE, /* * SGI8-15 can be reserved by secure firmware, and thus may @@ -803,7 +807,7 @@ core_initcall(register_cpufreq_notifier); static void raise_nmi(cpumask_t *mask) { - smp_cross_call(mask, IPI_CPU_BACKTRACE); + __smp_cross_call(mask, IPI_CPU_BACKTRACE); } void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index d2b5ec9c4b9293..ba88b1eca93c4c 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -11,6 +11,7 @@ CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve) obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o obj-$(CONFIG_KVM_ARM_HOST) += tlb.o obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c index be1f20fe28f448..fbe1db61115fa3 100644 --- a/arch/arm/mach-exynos/firmware.c +++ b/arch/arm/mach-exynos/firmware.c @@ -196,6 +196,7 @@ void __init exynos_firmware_init(void) return; addr = of_get_address(nd, 0, NULL, NULL); + of_node_put(nd); if (!addr) { pr_err("%s: No address specified.\n", __func__); return; diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index 7ead3acd6fa4d4..088c34e99b02ff 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c @@ -434,8 +434,27 @@ static void exynos3250_pm_resume(void) static void exynos5420_prepare_pm_resume(void) { + unsigned int mpidr, cluster; + + mpidr = read_cpuid_mpidr(); + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); + if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM)) WARN_ON(mcpm_cpu_powered_up()); + + if (IS_ENABLED(CONFIG_HW_PERF_EVENTS) && cluster != 0) { + /* + * When system is resumed on the LITTLE/KFC core (cluster 1), + * the DSCR is not properly updated until the power is turned + * on also for the cluster 0. Enable it for a while to + * propagate the SPNIDEN and SPIDEN signals from Secure JTAG + * block and avoid undefined instruction issue on CP14 reset. + */ + pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN, + EXYNOS_COMMON_CONFIGURATION(0)); + pmu_raw_writel(0, + EXYNOS_COMMON_CONFIGURATION(0)); + } } static void exynos5420_pm_resume(void) @@ -639,8 +658,10 @@ void __init exynos_pm_init(void) if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) { pr_warn("Outdated DT detected, suspend/resume will NOT work\n"); + of_node_put(np); return; } + of_node_put(np); pm_data = (const struct exynos_pm_data *) match->data; diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c index fd0053e47a1511..3708a71f30e62b 100644 --- a/arch/arm/mach-imx/cpuidle-imx6sx.c +++ b/arch/arm/mach-imx/cpuidle-imx6sx.c @@ -15,6 +15,7 @@ #include "common.h" #include "cpuidle.h" +#include "hardware.h" static int imx6sx_idle_finish(unsigned long val) { @@ -110,7 +111,7 @@ int __init imx6sx_cpuidle_init(void) * except for power up sw2iso which need to be * larger than LDO ramp up time. */ - imx_gpc_set_arm_power_up_timing(0xf, 1); + imx_gpc_set_arm_power_up_timing(cpu_is_imx6sx() ? 0xf : 0x2, 1); imx_gpc_set_arm_power_down_timing(1, 1); return cpuidle_register(&imx6sx_cpuidle_driver, NULL); diff --git a/arch/arm/mach-omap2/pm33xx-core.c b/arch/arm/mach-omap2/pm33xx-core.c index f4971e4a86b268..ca7026958d425a 100644 --- a/arch/arm/mach-omap2/pm33xx-core.c +++ b/arch/arm/mach-omap2/pm33xx-core.c @@ -51,10 +51,12 @@ static int amx3_common_init(void) /* CEFUSE domain can be turned off post bootup */ cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm"); - if (cefuse_pwrdm) - omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF); - else + if (!cefuse_pwrdm) pr_err("PM: Failed to get cefuse_pwrdm\n"); + else if (omap_type() != OMAP2_DEVICE_TYPE_GP) + pr_info("PM: Leaving EFUSE power domain active\n"); + else + omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF); return 0; } diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c index a9dd619c6c2900..7bdbf5d5c47d3c 100644 --- a/arch/arm/vdso/vgettimeofday.c +++ b/arch/arm/vdso/vgettimeofday.c @@ -18,9 +18,9 @@ #include #include #include -#include #include #include +#include #include #include #include @@ -123,7 +123,8 @@ static notrace u64 get_ns(struct vdso_data *vdata) u64 cycle_now; u64 nsec; - cycle_now = arch_counter_get_cntvct(); + isb(); + cycle_now = read_sysreg(CNTVCT); cycle_delta = (cycle_now - vdata->cs_cycle_last) & vdata->cs_mask; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1b1a0e95c7511b..8790a29d0af43f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -479,6 +479,24 @@ config ARM64_ERRATUM_1024718 If unsure, say Y. +config ARM64_ERRATUM_1463225 + bool "Cortex-A76: Software Step might prevent interrupt recognition" + default y + help + This option adds a workaround for Arm Cortex-A76 erratum 1463225. + + On the affected Cortex-A76 cores (r0p0 to r3p1), software stepping + of a system call instruction (SVC) can prevent recognition of + subsequent interrupts when software stepping is disabled in the + exception handler of the system call and either kernel debugging + is enabled or VHE is in use. + + Work around the erratum by triggering a dummy step exception + when handling a system call from a task that is being stepped + in a VHE configuration of the kernel. + + If unsure, say Y. + config CAVIUM_ERRATUM_22375 bool "Cavium erratum 22375, 24313" default y diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 35649ee8ad56b6..c12ff63265a946 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -51,6 +51,7 @@ endif KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables +KBUILD_CFLAGS += -Wno-psabi KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi index c88e603396f610..df7e62d9a67084 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi @@ -305,6 +305,7 @@ phys = <&emmc_phy>; phy-names = "phy_arasan"; power-domains = <&power RK3399_PD_EMMC>; + disable-cqe-dcmd; status = "disabled"; }; diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c index e7a95a566462f2..5cc24896738719 100644 --- a/arch/arm64/crypto/aes-neonbs-glue.c +++ b/arch/arm64/crypto/aes-neonbs-glue.c @@ -304,6 +304,8 @@ static int __xts_crypt(struct skcipher_request *req, int err; err = skcipher_walk_virt(&walk, req, false); + if (err) + return err; kernel_neon_begin(); neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1); diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 067d8937d5af1e..1ed227bf61066c 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -418,9 +418,11 @@ static int gcm_encrypt(struct aead_request *req) put_unaligned_be32(2, iv + GCM_IV_SIZE); while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { - int blocks = walk.nbytes / AES_BLOCK_SIZE; + const int blocks = + walk.nbytes / (2 * AES_BLOCK_SIZE) * 2; u8 *dst = walk.dst.virt.addr; u8 *src = walk.src.virt.addr; + int remaining = blocks; do { __aes_arm64_encrypt(ctx->aes_key.key_enc, @@ -430,9 +432,9 @@ static int gcm_encrypt(struct aead_request *req) dst += AES_BLOCK_SIZE; src += AES_BLOCK_SIZE; - } while (--blocks > 0); + } while (--remaining > 0); - ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg, + ghash_do_update(blocks, dg, walk.dst.virt.addr, &ctx->ghash_key, NULL); @@ -553,7 +555,7 @@ static int gcm_decrypt(struct aead_request *req) put_unaligned_be32(2, iv + GCM_IV_SIZE); while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { - int blocks = walk.nbytes / AES_BLOCK_SIZE; + int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2; u8 *dst = walk.dst.virt.addr; u8 *src = walk.src.virt.addr; diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index f2a234d6516cf5..93e07512b4b61c 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h @@ -148,18 +148,47 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl) isb(); } +/* + * Ensure that reads of the counter are treated the same as memory reads + * for the purposes of ordering by subsequent memory barriers. + * + * This insanity brought to you by speculative system register reads, + * out-of-order memory accesses, sequence locks and Thomas Gleixner. + * + * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html + */ +#define arch_counter_enforce_ordering(val) do { \ + u64 tmp, _val = (val); \ + \ + asm volatile( \ + " eor %0, %1, %1\n" \ + " add %0, sp, %0\n" \ + " ldr xzr, [%0]" \ + : "=r" (tmp) : "r" (_val)); \ +} while (0) + static inline u64 arch_counter_get_cntpct(void) { + u64 cnt; + isb(); - return arch_timer_reg_read_stable(cntpct_el0); + cnt = arch_timer_reg_read_stable(cntpct_el0); + arch_counter_enforce_ordering(cnt); + return cnt; } static inline u64 arch_counter_get_cntvct(void) { + u64 cnt; + isb(); - return arch_timer_reg_read_stable(cntvct_el0); + cnt = arch_timer_reg_read_stable(cntvct_el0); + arch_counter_enforce_ordering(cnt); + return cnt; } +#undef arch_counter_enforce_ordering + static inline int arch_timer_arch_init(void) { return 0; diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index ae1f70450fb212..25ce9056cf6418 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -51,7 +51,8 @@ #define ARM64_SSBD 30 #define ARM64_MISMATCHED_CACHE_TYPE 31 #define ARM64_HAS_STAGE2_FWB 32 +#define ARM64_WORKAROUND_1463225 33 -#define ARM64_NCAPS 33 +#define ARM64_NCAPS 34 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index ea690b3562afb2..b4a48419769f28 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -86,6 +86,7 @@ #define ARM_CPU_PART_CORTEX_A75 0xD0A #define ARM_CPU_PART_CORTEX_A35 0xD04 #define ARM_CPU_PART_CORTEX_A55 0xD05 +#define ARM_CPU_PART_CORTEX_A76 0xD0B #define APM_CPU_PART_POTENZA 0x000 @@ -110,6 +111,7 @@ #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) #define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35) #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55) +#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 1bdeca8918a684..ea423db3936445 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -444,6 +444,8 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd) return __pmd_to_phys(pmd); } +static inline void pte_unmap(pte_t *pte) { } + /* Find an entry in the third-level page table. */ #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) @@ -452,7 +454,6 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd) #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) -#define pte_unmap(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0) #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 79657ad91397fb..def5a5e807f02d 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -53,7 +53,15 @@ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. */ #ifdef CONFIG_COMPAT +#ifdef CONFIG_ARM64_64K_PAGES +/* + * With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied + * by the compat vectors page. + */ #define TASK_SIZE_32 UL(0x100000000) +#else +#define TASK_SIZE_32 (UL(0x100000000) - PAGE_SIZE) +#endif /* CONFIG_ARM64_64K_PAGES */ #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ TASK_SIZE_32 : TASK_SIZE_64) #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index ad8be16a39c9d1..58102652bf9e38 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h @@ -20,7 +20,7 @@ #include #include -typedef long (*syscall_fn_t)(struct pt_regs *regs); +typedef long (*syscall_fn_t)(const struct pt_regs *regs); extern const syscall_fn_t sys_call_table[]; diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h index a4477e515b798d..507d0ee6bc6900 100644 --- a/arch/arm64/include/asm/syscall_wrapper.h +++ b/arch/arm64/include/asm/syscall_wrapper.h @@ -30,10 +30,10 @@ } \ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) -#define COMPAT_SYSCALL_DEFINE0(sname) \ - asmlinkage long __arm64_compat_sys_##sname(void); \ - ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \ - asmlinkage long __arm64_compat_sys_##sname(void) +#define COMPAT_SYSCALL_DEFINE0(sname) \ + asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused); \ + ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \ + asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused) #define COND_SYSCALL_COMPAT(name) \ cond_syscall(__arm64_compat_sys_##name); @@ -62,11 +62,11 @@ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) #ifndef SYSCALL_DEFINE0 -#define SYSCALL_DEFINE0(sname) \ - SYSCALL_METADATA(_##sname, 0); \ - asmlinkage long __arm64_sys_##sname(void); \ - ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \ - asmlinkage long __arm64_sys_##sname(void) +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_METADATA(_##sname, 0); \ + asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused); \ + ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \ + asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused) #endif #ifndef COND_SYSCALL diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h index 2b9a63771eda8c..f89263c8e11aff 100644 --- a/arch/arm64/include/asm/vdso_datapage.h +++ b/arch/arm64/include/asm/vdso_datapage.h @@ -38,6 +38,7 @@ struct vdso_data { __u32 tz_minuteswest; /* Whacky timezone stuff */ __u32 tz_dsttime; __u32 use_syscall; + __u32 hrtimer_res; }; #endif /* !__ASSEMBLY__ */ diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index ad64d2c92ef53d..5dff8eccd17d4c 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -64,8 +64,6 @@ #ifndef __ASSEMBLY__ -#include - /* * User structures for general purpose, floating point and debug registers. */ @@ -112,10 +110,10 @@ struct user_sve_header { /* * Common SVE_PT_* flags: - * These must be kept in sync with prctl interface in + * These must be kept in sync with prctl interface in */ -#define SVE_PT_VL_INHERIT (PR_SVE_VL_INHERIT >> 16) -#define SVE_PT_VL_ONEXEC (PR_SVE_SET_VL_ONEXEC >> 16) +#define SVE_PT_VL_INHERIT ((1 << 17) /* PR_SVE_VL_INHERIT */ >> 16) +#define SVE_PT_VL_ONEXEC ((1 << 18) /* PR_SVE_SET_VL_ONEXEC */ >> 16) /* diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 323aeb5f2fe62c..92fba851ce53a4 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -99,7 +99,7 @@ int main(void) DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW); - DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); + DEFINE(CLOCK_REALTIME_RES, offsetof(struct vdso_data, hrtimer_res)); DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE); DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE); DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC); diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index dec10898d68861..dc6c535cbd130c 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -411,6 +411,22 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, } #endif /* CONFIG_ARM64_SSBD */ +#ifdef CONFIG_ARM64_ERRATUM_1463225 +DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); + +static bool +has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry, + int scope) +{ + u32 midr = read_cpuid_id(); + /* Cortex-A76 r0p0 - r3p1 */ + struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1); + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode(); +} +#endif + #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ .matches = is_affected_midr_range, \ .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) @@ -679,6 +695,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .matches = has_ssbd_mitigation, }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_1463225 + { + .desc = "ARM erratum 1463225", + .capability = ARM64_WORKAROUND_1463225, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = has_cortex_a76_erratum_1463225, + }, #endif { } diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c index ea001241bdd470..00f8b8612b69f8 100644 --- a/arch/arm64/kernel/cpu_ops.c +++ b/arch/arm64/kernel/cpu_ops.c @@ -85,6 +85,7 @@ static const char *__init cpu_read_enable_method(int cpu) pr_err("%pOF: missing enable-method property\n", dn); } + of_node_put(dn); } else { enable_method = acpi_get_enable_method(cpu); if (!enable_method) { diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 06ca574495af9f..262925b98f4214 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -135,6 +135,7 @@ NOKPROBE_SYMBOL(disable_debug_monitors); */ static int clear_os_lock(unsigned int cpu) { + write_sysreg(0, osdlr_el1); write_sysreg(0, oslar_el1); isb(); return 0; diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c index e0756416e567ec..b90754aebd12a6 100644 --- a/arch/arm64/kernel/jump_label.c +++ b/arch/arm64/kernel/jump_label.c @@ -20,8 +20,6 @@ #include #include -#ifdef HAVE_JUMP_LABEL - void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { @@ -49,5 +47,3 @@ void arch_jump_label_transform_static(struct jump_entry *entry, * NOP needs to be replaced by a branch. */ } - -#endif /* HAVE_JUMP_LABEL */ diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index b09b6f75f75918..06941c1fe418e0 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -145,15 +145,15 @@ u64 __init kaslr_early_init(u64 dt_phys) if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) { /* - * Randomize the module region over a 4 GB window covering the + * Randomize the module region over a 2 GB window covering the * kernel. This reduces the risk of modules leaking information * about the address of the kernel itself, but results in * branches between modules and the core kernel that are * resolved via PLTs. (Branches between modules will be * resolved normally.) */ - module_range = SZ_4G - (u64)(_end - _stext); - module_alloc_base = max((u64)_end + offset - SZ_4G, + module_range = SZ_2G - (u64)(_end - _stext); + module_alloc_base = max((u64)_end + offset - SZ_2G, (u64)MODULES_VADDR); } else { /* diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index f0f27aeefb7362..0b368ceccee441 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -56,7 +56,7 @@ void *module_alloc(unsigned long size) * can simply omit this fallback in that case. */ p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, - module_alloc_base + SZ_4G, GFP_KERNEL, + module_alloc_base + SZ_2G, GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, __builtin_return_address(0)); diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c index 3432e5ef9f4188..388f8fc130800f 100644 --- a/arch/arm64/kernel/ssbd.c +++ b/arch/arm64/kernel/ssbd.c @@ -4,6 +4,7 @@ */ #include +#include #include #include diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c index b44065fb16160c..fe20c461582a1e 100644 --- a/arch/arm64/kernel/sys.c +++ b/arch/arm64/kernel/sys.c @@ -31,7 +31,7 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, - unsigned long, fd, off_t, off) + unsigned long, fd, unsigned long, off) { if (offset_in_page(off) != 0) return -EINVAL; @@ -47,22 +47,26 @@ SYSCALL_DEFINE1(arm64_personality, unsigned int, personality) return ksys_personality(personality); } +asmlinkage long sys_ni_syscall(void); + +asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused) +{ + return sys_ni_syscall(); +} + /* * Wrappers to pass the pt_regs argument. */ -#define sys_personality sys_arm64_personality - -asmlinkage long sys_ni_syscall(const struct pt_regs *); -#define __arm64_sys_ni_syscall sys_ni_syscall +#define __arm64_sys_personality __arm64_sys_arm64_personality #undef __SYSCALL #define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *); #include #undef __SYSCALL -#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym, +#define __SYSCALL(nr, sym) [nr] = __arm64_##sym, const syscall_fn_t sys_call_table[__NR_syscalls] = { - [0 ... __NR_syscalls - 1] = (syscall_fn_t)sys_ni_syscall, + [0 ... __NR_syscalls - 1] = __arm64_sys_ni_syscall, #include }; diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c index 0f8bcb7de70086..3c80a40c1c9d6d 100644 --- a/arch/arm64/kernel/sys32.c +++ b/arch/arm64/kernel/sys32.c @@ -133,17 +133,14 @@ COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode, return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len)); } -asmlinkage long sys_ni_syscall(const struct pt_regs *); -#define __arm64_sys_ni_syscall sys_ni_syscall - #undef __SYSCALL #define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *); #include #undef __SYSCALL -#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym, +#define __SYSCALL(nr, sym) [nr] = __arm64_##sym, const syscall_fn_t compat_sys_call_table[__NR_compat_syscalls] = { - [0 ... __NR_compat_syscalls - 1] = (syscall_fn_t)sys_ni_syscall, + [0 ... __NR_compat_syscalls - 1] = __arm64_sys_ni_syscall, #include }; diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index 5610ac01c1ec02..871c739f060aca 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -60,6 +61,35 @@ static inline bool has_syscall_work(unsigned long flags) int syscall_trace_enter(struct pt_regs *regs); void syscall_trace_exit(struct pt_regs *regs); +#ifdef CONFIG_ARM64_ERRATUM_1463225 +DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); + +static void cortex_a76_erratum_1463225_svc_handler(void) +{ + u32 reg, val; + + if (!unlikely(test_thread_flag(TIF_SINGLESTEP))) + return; + + if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225))) + return; + + __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1); + reg = read_sysreg(mdscr_el1); + val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE; + write_sysreg(val, mdscr_el1); + asm volatile("msr daifclr, #8"); + isb(); + + /* We will have taken a single-step exception by this point */ + + write_sysreg(reg, mdscr_el1); + __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0); +} +#else +static void cortex_a76_erratum_1463225_svc_handler(void) { } +#endif /* CONFIG_ARM64_ERRATUM_1463225 */ + static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, const syscall_fn_t syscall_table[]) { @@ -68,6 +98,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, regs->orig_x0 = regs->regs[0]; regs->syscallno = scno; + cortex_a76_erratum_1463225_svc_handler(); local_daif_restore(DAIF_PROCCTX); user_exit(); diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 2d419006ad4330..ec0bb588d7553e 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -232,6 +232,9 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; + /* Read without the seqlock held by clock_getres() */ + WRITE_ONCE(vdso_data->hrtimer_res, hrtimer_resolution); + if (!use_syscall) { /* tkr_mono.cycle_last == tkr_raw.cycle_last */ vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index c39872a7b03c3e..856fee6d351297 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S @@ -73,6 +73,13 @@ x_tmp .req x8 movn x_tmp, #0xff00, lsl #48 and \res, x_tmp, \res mul \res, \res, \mult + /* + * Fake address dependency from the value computed from the counter + * register to subsequent data page accesses so that the sequence + * locking also orders the read of the counter. + */ + and x_tmp, \res, xzr + add vdso_data, vdso_data, x_tmp .endm /* @@ -147,12 +154,12 @@ ENTRY(__kernel_gettimeofday) /* w11 = cs_mono_mult, w12 = cs_shift */ ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT] ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] - seqcnt_check fail=1b get_nsec_per_sec res=x9 lsl x9, x9, x12 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 + seqcnt_check fail=1b get_ts_realtime res_sec=x10, res_nsec=x11, \ clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9 @@ -211,13 +218,13 @@ realtime: /* w11 = cs_mono_mult, w12 = cs_shift */ ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT] ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] - seqcnt_check fail=realtime /* All computations are done with left-shifted nsecs. */ get_nsec_per_sec res=x9 lsl x9, x9, x12 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 + seqcnt_check fail=realtime get_ts_realtime res_sec=x10, res_nsec=x11, \ clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9 clock_gettime_return, shift=1 @@ -231,7 +238,6 @@ monotonic: ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT] ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] ldp x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC] - seqcnt_check fail=monotonic /* All computations are done with left-shifted nsecs. */ lsl x4, x4, x12 @@ -239,6 +245,7 @@ monotonic: lsl x9, x9, x12 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 + seqcnt_check fail=monotonic get_ts_realtime res_sec=x10, res_nsec=x11, \ clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9 @@ -253,13 +260,13 @@ monotonic_raw: /* w11 = cs_raw_mult, w12 = cs_shift */ ldp w12, w11, [vdso_data, #VDSO_CS_SHIFT] ldp x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC] - seqcnt_check fail=monotonic_raw /* All computations are done with left-shifted nsecs. */ get_nsec_per_sec res=x9 lsl x9, x9, x12 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 + seqcnt_check fail=monotonic_raw get_ts_clock_raw res_sec=x10, res_nsec=x11, \ clock_nsec=x15, nsec_to_sec=x9 @@ -301,13 +308,14 @@ ENTRY(__kernel_clock_getres) ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne b.ne 1f - ldr x2, 5f + adr vdso_data, _vdso_data + ldr w2, [vdso_data, #CLOCK_REALTIME_RES] b 2f 1: cmp w0, #CLOCK_REALTIME_COARSE ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne b.ne 4f - ldr x2, 6f + ldr x2, 5f 2: cbz x1, 3f stp xzr, x2, [x1] @@ -321,8 +329,6 @@ ENTRY(__kernel_clock_getres) svc #0 ret 5: - .quad CLOCK_REALTIME_RES -6: .quad CLOCK_COARSE_RES .cfi_endproc ENDPROC(__kernel_clock_getres) diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index 2fabc2dc1966f3..feef06fc7c5a8d 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -10,6 +10,7 @@ KVM=../../../../virt/kvm obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-cpuif-proxy.o obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index c389f2bef938eb..d3a5bb16f0b231 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -664,6 +664,11 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; + if (!is_vmalloc_addr(cpu_addr)) { + unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); + return __swiotlb_mmap_pfn(vma, pfn, size); + } + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { /* * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped, @@ -687,6 +692,11 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt, unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct vm_struct *area = find_vm_area(cpu_addr); + if (!is_vmalloc_addr(cpu_addr)) { + struct page *page = virt_to_page(cpu_addr); + return __swiotlb_get_sgtable_page(sgt, page, size); + } + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { /* * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped, diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index a4c13467728567..88cf0a0cb61613 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -827,14 +827,47 @@ void __init hook_debug_fault_code(int nr, debug_fault_info[nr].name = name; } +#ifdef CONFIG_ARM64_ERRATUM_1463225 +DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); + +static int __exception +cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) +{ + if (user_mode(regs)) + return 0; + + if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa)) + return 0; + + /* + * We've taken a dummy step exception from the kernel to ensure + * that interrupts are re-enabled on the syscall path. Return back + * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions + * masked so that we can safely restore the mdscr and get on with + * handling the syscall. + */ + regs->pstate |= PSR_D_BIT; + return 1; +} +#else +static int __exception +cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) +{ + return 0; +} +#endif /* CONFIG_ARM64_ERRATUM_1463225 */ + asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint, - unsigned int esr, - struct pt_regs *regs) + unsigned int esr, + struct pt_regs *regs) { const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr); unsigned long pc = instruction_pointer(regs); int rv; + if (cortex_a76_erratum_1463225_debug_handler(regs)) + return 0; + /* * Tell lockdep we disabled irqs in entry.S. Do nothing if they were * already disabled to preserve the last enabled/disabled addresses. diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 8080c9f489c3e4..0fa558176fb109 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -921,13 +921,18 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) int __init arch_ioremap_pud_supported(void) { - /* only 4k granule supports level 1 block mappings */ - return IS_ENABLED(CONFIG_ARM64_4K_PAGES); + /* + * Only 4k granule supports level 1 block mappings. + * SW table walks can't handle removal of intermediate entries. + */ + return IS_ENABLED(CONFIG_ARM64_4K_PAGES) && + !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS); } int __init arch_ioremap_pmd_supported(void) { - return 1; + /* See arch_ioremap_pud_supported() */ + return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS); } int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 03646e6a2ef4f2..8cce091b6c21e2 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -70,24 +70,25 @@ ENTRY(cpu_do_suspend) mrs x2, tpidr_el0 mrs x3, tpidrro_el0 mrs x4, contextidr_el1 - mrs x5, cpacr_el1 - mrs x6, tcr_el1 - mrs x7, vbar_el1 - mrs x8, mdscr_el1 - mrs x9, oslsr_el1 - mrs x10, sctlr_el1 + mrs x5, osdlr_el1 + mrs x6, cpacr_el1 + mrs x7, tcr_el1 + mrs x8, vbar_el1 + mrs x9, mdscr_el1 + mrs x10, oslsr_el1 + mrs x11, sctlr_el1 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN - mrs x11, tpidr_el1 + mrs x12, tpidr_el1 alternative_else - mrs x11, tpidr_el2 + mrs x12, tpidr_el2 alternative_endif - mrs x12, sp_el0 + mrs x13, sp_el0 stp x2, x3, [x0] - stp x4, xzr, [x0, #16] - stp x5, x6, [x0, #32] - stp x7, x8, [x0, #48] - stp x9, x10, [x0, #64] - stp x11, x12, [x0, #80] + stp x4, x5, [x0, #16] + stp x6, x7, [x0, #32] + stp x8, x9, [x0, #48] + stp x10, x11, [x0, #64] + stp x12, x13, [x0, #80] ret ENDPROC(cpu_do_suspend) @@ -110,8 +111,8 @@ ENTRY(cpu_do_resume) msr cpacr_el1, x6 /* Don't change t0sz here, mask those bits when restoring */ - mrs x5, tcr_el1 - bfi x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH + mrs x7, tcr_el1 + bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH msr tcr_el1, x8 msr vbar_el1, x9 @@ -135,6 +136,7 @@ alternative_endif /* * Restore oslsr_el1 by writing oslar_el1 */ + msr osdlr_el1, x5 ubfx x11, x11, #1, #1 msr oslar_el1, x11 reset_pmuserenr_el0 x0 // Disable PMU access from EL0 diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h index 783de51a6c4e5b..6c881659ee8a1d 100644 --- a/arch/arm64/net/bpf_jit.h +++ b/arch/arm64/net/bpf_jit.h @@ -100,12 +100,6 @@ #define A64_STXR(sf, Rt, Rn, Rs) \ A64_LSX(sf, Rt, Rn, Rs, STORE_EX) -/* Prefetch */ -#define A64_PRFM(Rn, type, target, policy) \ - aarch64_insn_gen_prefetch(Rn, AARCH64_INSN_PRFM_TYPE_##type, \ - AARCH64_INSN_PRFM_TARGET_##target, \ - AARCH64_INSN_PRFM_POLICY_##policy) - /* Add/subtract (immediate) */ #define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \ aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \ diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index a6fdaea07c6339..2eef156b38bb85 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -736,7 +736,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_STX | BPF_XADD | BPF_DW: emit_a64_mov_i(1, tmp, off, ctx); emit(A64_ADD(1, tmp, tmp, dst), ctx); - emit(A64_PRFM(tmp, PST, L1, STRM), ctx); emit(A64_LDXR(isdw, tmp2, tmp), ctx); emit(A64_ADD(isdw, tmp2, tmp2, src), ctx); emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx); diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c index aa19b7ac8222a2..476c7b4be3787e 100644 --- a/arch/ia64/mm/numa.c +++ b/arch/ia64/mm/numa.c @@ -49,6 +49,7 @@ paddr_to_nid(unsigned long paddr) return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0); } +EXPORT_SYMBOL(paddr_to_nid); #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA) /* diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c index 7c0b2e6cdfbd75..4c7a93f4039a0c 100644 --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c @@ -211,6 +211,12 @@ const char *get_system_type(void) return ath79_sys_type; } +int get_c0_perfcount_int(void) +{ + return ATH79_MISC_IRQ(5); +} +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); + unsigned int get_c0_compare_int(void) { return CP0_LEGACY_COMPARE_IRQ; diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c index 32e3168316cd98..ab943927f97ab2 100644 --- a/arch/mips/kernel/jump_label.c +++ b/arch/mips/kernel/jump_label.c @@ -16,8 +16,6 @@ #include #include -#ifdef HAVE_JUMP_LABEL - /* * Define parameters for the standard MIPS and the microMIPS jump * instruction encoding respectively: @@ -70,5 +68,3 @@ void arch_jump_label_transform(struct jump_entry *e, mutex_unlock(&text_mutex); } - -#endif /* HAVE_JUMP_LABEL */ diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 413863508f6fab..d67fb64e908c4b 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -64,17 +64,11 @@ struct mips_perf_event { #define CNTR_EVEN 0x55555555 #define CNTR_ODD 0xaaaaaaaa #define CNTR_ALL 0xffffffff -#ifdef CONFIG_MIPS_MT_SMP enum { T = 0, V = 1, P = 2, } range; -#else - #define T - #define V - #define P -#endif }; static struct mips_perf_event raw_event; @@ -325,9 +319,7 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) { struct perf_event *event = container_of(evt, struct perf_event, hw); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); -#ifdef CONFIG_MIPS_MT_SMP unsigned int range = evt->event_base >> 24; -#endif /* CONFIG_MIPS_MT_SMP */ WARN_ON(idx < 0 || idx >= mipspmu.num_counters); @@ -336,21 +328,15 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) /* Make sure interrupt enabled. */ MIPS_PERFCTRL_IE; -#ifdef CONFIG_CPU_BMIPS5000 - { + if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) { /* enable the counter for the calling thread */ cpuc->saved_ctrl[idx] |= (1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC; - } -#else -#ifdef CONFIG_MIPS_MT_SMP - if (range > V) { + } else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) { /* The counter is processor wide. Set it up to count all TCs. */ pr_debug("Enabling perf counter for all TCs\n"); cpuc->saved_ctrl[idx] |= M_TC_EN_ALL; - } else -#endif /* CONFIG_MIPS_MT_SMP */ - { + } else { unsigned int cpu, ctrl; /* @@ -365,7 +351,6 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) cpuc->saved_ctrl[idx] |= ctrl; pr_debug("Enabling perf counter for CPU%d\n", cpu); } -#endif /* CONFIG_CPU_BMIPS5000 */ /* * We do not actually let the counter run. Leave it until start(). */ diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index 89950b7bf536b7..bdaf3536241a2d 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -41,7 +41,19 @@ char *mips_get_machine_name(void) #ifdef CONFIG_USE_OF void __init early_init_dt_add_memory_arch(u64 base, u64 size) { - return add_memory_region(base, size, BOOT_MEM_RAM); + if (base >= PHYS_ADDR_MAX) { + pr_warn("Trying to add an invalid memory region, skipped\n"); + return; + } + + /* Truncate the passed memory region instead of type casting */ + if (base + size - 1 >= PHYS_ADDR_MAX || base + size < base) { + pr_warn("Truncate memory region %llx @ %llx to size %llx\n", + size, base, PHYS_ADDR_MAX - base); + size = PHYS_ADDR_MAX - base; + } + + add_memory_region(base, size, BOOT_MEM_RAM); } int __init early_init_dt_reserve_memory_arch(phys_addr_t base, diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c index 4aaff3b3175c4c..6dbe4eab0a0e83 100644 --- a/arch/mips/kernel/uprobes.c +++ b/arch/mips/kernel/uprobes.c @@ -112,9 +112,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) */ aup->resume_epc = regs->cp0_epc + 4; if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) { - unsigned long epc; - - epc = regs->cp0_epc; __compute_return_epc_for_insn(regs, (union mips_instruction) aup->insn[0]); aup->resume_epc = regs->cp0_epc; diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index f7ea8e21656b16..e7f5ef6bed0fe8 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -1099,6 +1099,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; + case KVM_CAP_MAX_VCPU_ID: + r = KVM_MAX_VCPU_ID; + break; case KVM_CAP_MIPS_FPU: /* We don't handle systems with inconsistent cpu_has_fpu */ r = !!raw_cpu_has_fpu; diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 2f616ebeb7e0cf..7755a1fad05adb 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -203,6 +203,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) int __virt_addr_valid(const volatile void *kaddr) { + unsigned long vaddr = (unsigned long)vaddr; + + if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE)) + return 0; + return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); } EXPORT_SYMBOL_GPL(__virt_addr_valid); diff --git a/arch/mips/pistachio/Platform b/arch/mips/pistachio/Platform index d80cd612df1f7a..c3592b374ad235 100644 --- a/arch/mips/pistachio/Platform +++ b/arch/mips/pistachio/Platform @@ -6,3 +6,4 @@ cflags-$(CONFIG_MACH_PISTACHIO) += \ -I$(srctree)/arch/mips/include/asm/mach-pistachio load-$(CONFIG_MACH_PISTACHIO) += 0xffffffff80400000 zload-$(CONFIG_MACH_PISTACHIO) += 0xffffffff81000000 +all-$(CONFIG_MACH_PISTACHIO) := uImage.gz diff --git a/arch/parisc/boot/compressed/head.S b/arch/parisc/boot/compressed/head.S index 5aba20fa48aafe..e8b798fd0cf038 100644 --- a/arch/parisc/boot/compressed/head.S +++ b/arch/parisc/boot/compressed/head.S @@ -22,7 +22,7 @@ __HEAD ENTRY(startup) - .level LEVEL + .level PA_ASM_LEVEL #define PSW_W_SM 0x200 #define PSW_W_BIT 36 @@ -63,7 +63,7 @@ $bss_loop: load32 BOOTADDR(decompress_kernel),%r3 #ifdef CONFIG_64BIT - .level LEVEL + .level PA_ASM_LEVEL ssm PSW_W_SM, %r0 /* set W-bit */ depdi 0, 31, 32, %r3 #endif @@ -72,7 +72,7 @@ $bss_loop: startup_continue: #ifdef CONFIG_64BIT - .level LEVEL + .level PA_ASM_LEVEL rsm PSW_W_SM, %r0 /* clear W-bit */ #endif diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h index e9c6385ef0d162..6f30fa5bdaedfb 100644 --- a/arch/parisc/include/asm/assembly.h +++ b/arch/parisc/include/asm/assembly.h @@ -61,14 +61,14 @@ #define LDCW ldcw,co #define BL b,l # ifdef CONFIG_64BIT -# define LEVEL 2.0w +# define PA_ASM_LEVEL 2.0w # else -# define LEVEL 2.0 +# define PA_ASM_LEVEL 2.0 # endif #else #define LDCW ldcw #define BL bl -#define LEVEL 1.1 +#define PA_ASM_LEVEL 1.1 #endif #ifdef __ASSEMBLY__ diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index fbb4e43fda0533..f56cbab64ac107 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -22,7 +22,7 @@ #include #include - .level LEVEL + .level PA_ASM_LEVEL __INITDATA ENTRY(boot_args) @@ -258,7 +258,7 @@ stext_pdc_ret: ldo R%PA(fault_vector_11)(%r10),%r10 $is_pa20: - .level LEVEL /* restore 1.1 || 2.0w */ + .level PA_ASM_LEVEL /* restore 1.1 || 2.0w */ #endif /*!CONFIG_64BIT*/ load32 PA(fault_vector_20),%r10 diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 841db71958cdb5..97c206734e24f3 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -193,6 +193,7 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r) */ int running_on_qemu __read_mostly; +EXPORT_SYMBOL(running_on_qemu); void __cpuidle arch_cpu_idle_dead(void) { diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index f453997a7b8f21..61a647a55c6955 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -48,7 +48,7 @@ registers). */ #define KILL_INSN break 0,0 - .level LEVEL + .level PA_ASM_LEVEL .text diff --git a/arch/parisc/math-emu/cnv_float.h b/arch/parisc/math-emu/cnv_float.h index 933423fa5144ac..b0db61188a6128 100644 --- a/arch/parisc/math-emu/cnv_float.h +++ b/arch/parisc/math-emu/cnv_float.h @@ -60,19 +60,19 @@ ((exponent < (SGL_P - 1)) ? \ (Sall(sgl_value) << (SGL_EXP_LENGTH + 1 + exponent)) : FALSE) -#define Int_isinexact_to_sgl(int_value) (int_value << 33 - SGL_EXP_LENGTH) +#define Int_isinexact_to_sgl(int_value) ((int_value << 33 - SGL_EXP_LENGTH) != 0) #define Sgl_roundnearest_from_int(int_value,sgl_value) \ if (int_value & 1<<(SGL_EXP_LENGTH - 2)) /* round bit */ \ - if ((int_value << 34 - SGL_EXP_LENGTH) || Slow(sgl_value)) \ + if (((int_value << 34 - SGL_EXP_LENGTH) != 0) || Slow(sgl_value)) \ Sall(sgl_value)++ #define Dint_isinexact_to_sgl(dint_valueA,dint_valueB) \ - ((Dintp1(dint_valueA) << 33 - SGL_EXP_LENGTH) || Dintp2(dint_valueB)) + (((Dintp1(dint_valueA) << 33 - SGL_EXP_LENGTH) != 0) || Dintp2(dint_valueB)) #define Sgl_roundnearest_from_dint(dint_valueA,dint_valueB,sgl_value) \ if (Dintp1(dint_valueA) & 1<<(SGL_EXP_LENGTH - 2)) \ - if ((Dintp1(dint_valueA) << 34 - SGL_EXP_LENGTH) || \ + if (((Dintp1(dint_valueA) << 34 - SGL_EXP_LENGTH) != 0) || \ Dintp2(dint_valueB) || Slow(sgl_value)) Sall(sgl_value)++ #define Dint_isinexact_to_dbl(dint_value) \ diff --git a/arch/powerpc/boot/addnote.c b/arch/powerpc/boot/addnote.c index 9d9f6f334d3cc0..3da3e2b1b51bcb 100644 --- a/arch/powerpc/boot/addnote.c +++ b/arch/powerpc/boot/addnote.c @@ -223,7 +223,11 @@ main(int ac, char **av) PUT_16(E_PHNUM, np + 2); /* write back */ - lseek(fd, (long) 0, SEEK_SET); + i = lseek(fd, (long) 0, SEEK_SET); + if (i < 0) { + perror("lseek"); + exit(1); + } i = write(fd, buf, n); if (i < 0) { perror("write"); diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 1f4691ce412618..e398173ae67d14 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -38,7 +38,7 @@ extern struct static_key hcall_tracepoint_key; void __trace_hcall_entry(unsigned long opcode, unsigned long *args); void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf); /* OPAL tracing */ -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL extern struct static_key opal_tracepoint_key; #endif diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index bccc5051249e1a..2b6049e839706c 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -299,6 +299,7 @@ struct kvm_arch { #ifdef CONFIG_PPC_BOOK3S_64 struct list_head spapr_tce_tables; struct list_head rtas_tokens; + struct mutex rtas_token_lock; DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); #endif #ifdef CONFIG_KVM_MPIC diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 2b713539123137..d9d5391b2af6f9 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -336,6 +336,7 @@ #define PPC_INST_MULLI 0x1c000000 #define PPC_INST_DIVWU 0x7c000396 #define PPC_INST_DIVD 0x7c0003d2 +#define PPC_INST_DIVDU 0x7c000392 #define PPC_INST_RLWINM 0x54000000 #define PPC_INST_RLWIMI 0x50000000 #define PPC_INST_RLDICL 0x78000000 diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 4898e9491a1cd3..9168a247e24ff2 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -970,7 +970,9 @@ start_here_multiplatform: /* Restore parameters passed from prom_init/kexec */ mr r3,r31 - bl early_setup /* also sets r13 and SPRG_PACA */ + LOAD_REG_ADDR(r12, DOTSYM(early_setup)) + mtctr r12 + bctrl /* also sets r13 and SPRG_PACA */ LOAD_REG_ADDR(r3, start_here_common) ld r4,PACAKMSR(r13) diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c index 6472472093d084..0080c5fbd225de 100644 --- a/arch/powerpc/kernel/jump_label.c +++ b/arch/powerpc/kernel/jump_label.c @@ -11,7 +11,6 @@ #include #include -#ifdef HAVE_JUMP_LABEL void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { @@ -22,4 +21,3 @@ void arch_jump_label_transform(struct jump_entry *entry, else patch_instruction(addr, PPC_INST_NOP); } -#endif diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index 22e9d281324daf..e7d4ce6964ae99 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -563,8 +563,6 @@ static int nvram_pstore_init(void) nvram_pstore_info.buf = oops_data; nvram_pstore_info.bufsize = oops_data_sz; - spin_lock_init(&nvram_pstore_info.buf_lock); - rc = pstore_register(&nvram_pstore_info); if (rc && (rc != -EPERM)) /* Print error only when pstore.backend == nvram */ diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c index 3c6ab22a0c4e3b..af3c15a1d41eb1 100644 --- a/arch/powerpc/kernel/watchdog.c +++ b/arch/powerpc/kernel/watchdog.c @@ -77,7 +77,7 @@ static u64 wd_smp_panic_timeout_tb __read_mostly; /* panic other CPUs */ static u64 wd_timer_period_ms __read_mostly; /* interval between heartbeat */ -static DEFINE_PER_CPU(struct timer_list, wd_timer); +static DEFINE_PER_CPU(struct hrtimer, wd_hrtimer); static DEFINE_PER_CPU(u64, wd_timer_tb); /* SMP checker bits */ @@ -293,21 +293,21 @@ void soft_nmi_interrupt(struct pt_regs *regs) nmi_exit(); } -static void wd_timer_reset(unsigned int cpu, struct timer_list *t) -{ - t->expires = jiffies + msecs_to_jiffies(wd_timer_period_ms); - if (wd_timer_period_ms > 1000) - t->expires = __round_jiffies_up(t->expires, cpu); - add_timer_on(t, cpu); -} - -static void wd_timer_fn(struct timer_list *t) +static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) { int cpu = smp_processor_id(); + if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) + return HRTIMER_NORESTART; + + if (!cpumask_test_cpu(cpu, &watchdog_cpumask)) + return HRTIMER_NORESTART; + watchdog_timer_interrupt(cpu); - wd_timer_reset(cpu, t); + hrtimer_forward_now(hrtimer, ms_to_ktime(wd_timer_period_ms)); + + return HRTIMER_RESTART; } void arch_touch_nmi_watchdog(void) @@ -323,37 +323,22 @@ void arch_touch_nmi_watchdog(void) } EXPORT_SYMBOL(arch_touch_nmi_watchdog); -static void start_watchdog_timer_on(unsigned int cpu) -{ - struct timer_list *t = per_cpu_ptr(&wd_timer, cpu); - - per_cpu(wd_timer_tb, cpu) = get_tb(); - - timer_setup(t, wd_timer_fn, TIMER_PINNED); - wd_timer_reset(cpu, t); -} - -static void stop_watchdog_timer_on(unsigned int cpu) -{ - struct timer_list *t = per_cpu_ptr(&wd_timer, cpu); - - del_timer_sync(t); -} - -static int start_wd_on_cpu(unsigned int cpu) +static void start_watchdog(void *arg) { + struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer); + int cpu = smp_processor_id(); unsigned long flags; if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) { WARN_ON(1); - return 0; + return; } if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) - return 0; + return; if (!cpumask_test_cpu(cpu, &watchdog_cpumask)) - return 0; + return; wd_smp_lock(&flags); cpumask_set_cpu(cpu, &wd_cpus_enabled); @@ -363,27 +348,40 @@ static int start_wd_on_cpu(unsigned int cpu) } wd_smp_unlock(&flags); - start_watchdog_timer_on(cpu); + *this_cpu_ptr(&wd_timer_tb) = get_tb(); - return 0; + hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer->function = watchdog_timer_fn; + hrtimer_start(hrtimer, ms_to_ktime(wd_timer_period_ms), + HRTIMER_MODE_REL_PINNED); } -static int stop_wd_on_cpu(unsigned int cpu) +static int start_watchdog_on_cpu(unsigned int cpu) { + return smp_call_function_single(cpu, start_watchdog, NULL, true); +} + +static void stop_watchdog(void *arg) +{ + struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer); + int cpu = smp_processor_id(); unsigned long flags; if (!cpumask_test_cpu(cpu, &wd_cpus_enabled)) - return 0; /* Can happen in CPU unplug case */ + return; /* Can happen in CPU unplug case */ - stop_watchdog_timer_on(cpu); + hrtimer_cancel(hrtimer); wd_smp_lock(&flags); cpumask_clear_cpu(cpu, &wd_cpus_enabled); wd_smp_unlock(&flags); wd_smp_clear_cpu_pending(cpu, get_tb()); +} - return 0; +static int stop_watchdog_on_cpu(unsigned int cpu) +{ + return smp_call_function_single(cpu, stop_watchdog, NULL, true); } static void watchdog_calc_timeouts(void) @@ -402,7 +400,7 @@ void watchdog_nmi_stop(void) int cpu; for_each_cpu(cpu, &wd_cpus_enabled) - stop_wd_on_cpu(cpu); + stop_watchdog_on_cpu(cpu); } void watchdog_nmi_start(void) @@ -411,7 +409,7 @@ void watchdog_nmi_start(void) watchdog_calc_timeouts(); for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask) - start_wd_on_cpu(cpu); + start_watchdog_on_cpu(cpu); } /* @@ -423,7 +421,8 @@ int __init watchdog_nmi_probe(void) err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "powerpc/watchdog:online", - start_wd_on_cpu, stop_wd_on_cpu); + start_watchdog_on_cpu, + stop_watchdog_on_cpu); if (err < 0) { pr_warn("could not be initialized"); return err; diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 87348e498c89e9..281f074581a3b0 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -840,6 +840,7 @@ int kvmppc_core_init_vm(struct kvm *kvm) #ifdef CONFIG_PPC64 INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables); INIT_LIST_HEAD(&kvm->arch.rtas_tokens); + mutex_init(&kvm->arch.rtas_token_lock); #endif return kvm->arch.kvm_ops->init_vm(kvm); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 3e3a71594e6319..083dcedba11ce1 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -426,12 +426,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id) { - struct kvm_vcpu *ret; - - mutex_lock(&kvm->lock); - ret = kvm_get_vcpu_by_id(kvm, id); - mutex_unlock(&kvm->lock); - return ret; + return kvm_get_vcpu_by_id(kvm, id); } static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) @@ -1309,7 +1304,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, struct kvmppc_vcore *vc = vcpu->arch.vcore; u64 mask; - mutex_lock(&kvm->lock); spin_lock(&vc->lock); /* * If ILE (interrupt little-endian) has changed, update the @@ -1349,7 +1343,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, mask &= 0xFFFFFFFF; vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); spin_unlock(&vc->lock); - mutex_unlock(&kvm->lock); } static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c index 2d3b2b1cc272b0..8f2355138f80bc 100644 --- a/arch/powerpc/kvm/book3s_rtas.c +++ b/arch/powerpc/kvm/book3s_rtas.c @@ -146,7 +146,7 @@ static int rtas_token_undefine(struct kvm *kvm, char *name) { struct rtas_token_definition *d, *tmp; - lockdep_assert_held(&kvm->lock); + lockdep_assert_held(&kvm->arch.rtas_token_lock); list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) { if (rtas_name_matches(d->handler->name, name)) { @@ -167,7 +167,7 @@ static int rtas_token_define(struct kvm *kvm, char *name, u64 token) bool found; int i; - lockdep_assert_held(&kvm->lock); + lockdep_assert_held(&kvm->arch.rtas_token_lock); list_for_each_entry(d, &kvm->arch.rtas_tokens, list) { if (d->token == token) @@ -206,14 +206,14 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp) if (copy_from_user(&args, argp, sizeof(args))) return -EFAULT; - mutex_lock(&kvm->lock); + mutex_lock(&kvm->arch.rtas_token_lock); if (args.token) rc = rtas_token_define(kvm, args.name, args.token); else rc = rtas_token_undefine(kvm, args.name); - mutex_unlock(&kvm->lock); + mutex_unlock(&kvm->arch.rtas_token_lock); return rc; } @@ -245,7 +245,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) orig_rets = args.rets; args.rets = &args.args[be32_to_cpu(args.nargs)]; - mutex_lock(&vcpu->kvm->lock); + mutex_lock(&vcpu->kvm->arch.rtas_token_lock); rc = -ENOENT; list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) { @@ -256,7 +256,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) } } - mutex_unlock(&vcpu->kvm->lock); + mutex_unlock(&vcpu->kvm->arch.rtas_token_lock); if (rc == 0) { args.rets = orig_rets; @@ -282,8 +282,6 @@ void kvmppc_rtas_tokens_free(struct kvm *kvm) { struct rtas_token_definition *d, *tmp; - lockdep_assert_held(&kvm->lock); - list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) { list_del(&d->list); kfree(d); diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 30c2eb7669549a..aae34f218ab455 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -1723,7 +1723,6 @@ static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd) { xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01); xive_native_configure_irq(hw_num, 0, MASKED, 0); - xive_cleanup_irq_data(xd); } static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb) @@ -1737,9 +1736,10 @@ static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb) continue; kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data); + xive_cleanup_irq_data(&state->ipi_data); xive_native_free_irq(state->ipi_number); - /* Pass-through, cleanup too */ + /* Pass-through, cleanup too but keep IRQ hw data */ if (state->pt_number) kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 79b79408d92ea4..578174a33d2297 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -632,6 +632,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; + case KVM_CAP_MAX_VCPU_ID: + r = KVM_MAX_VCPU_ID; + break; #ifdef CONFIG_PPC_BOOK3S_64 case KVM_CAP_PPC_GET_SMMU_INFO: r = 1; diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c index dbd8f762140b69..68984d85ad6b8f 100644 --- a/arch/powerpc/mm/mmu_context_book3s64.c +++ b/arch/powerpc/mm/mmu_context_book3s64.c @@ -53,14 +53,48 @@ int hash__alloc_context_id(void) } EXPORT_SYMBOL_GPL(hash__alloc_context_id); +static int realloc_context_ids(mm_context_t *ctx) +{ + int i, id; + + /* + * id 0 (aka. ctx->id) is special, we always allocate a new one, even if + * there wasn't one allocated previously (which happens in the exec + * case where ctx is newly allocated). + * + * We have to be a bit careful here. We must keep the existing ids in + * the array, so that we can test if they're non-zero to decide if we + * need to allocate a new one. However in case of error we must free the + * ids we've allocated but *not* any of the existing ones (or risk a + * UAF). That's why we decrement i at the start of the error handling + * loop, to skip the id that we just tested but couldn't reallocate. + */ + for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) { + if (i == 0 || ctx->extended_id[i]) { + id = hash__alloc_context_id(); + if (id < 0) + goto error; + + ctx->extended_id[i] = id; + } + } + + /* The caller expects us to return id */ + return ctx->id; + +error: + for (i--; i >= 0; i--) { + if (ctx->extended_id[i]) + ida_free(&mmu_context_ida, ctx->extended_id[i]); + } + + return id; +} + static int hash__init_new_context(struct mm_struct *mm) { int index; - index = hash__alloc_context_id(); - if (index < 0) - return index; - /* * The old code would re-promote on fork, we don't do that when using * slices as it could cause problem promoting slices that have been @@ -78,6 +112,10 @@ static int hash__init_new_context(struct mm_struct *mm) if (mm->context.id == 0) slice_init_new_context_exec(mm); + index = realloc_context_ids(&mm->context); + if (index < 0) + return index; + subpage_prot_init_new_context(mm); pkey_mm_init(mm); diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 10fb43efef5088..f473c05e964977 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1495,6 +1495,9 @@ int start_topology_update(void) { int rc = 0; + if (!topology_updates_enabled) + return 0; + if (firmware_has_feature(FW_FEATURE_PRRN)) { if (!prrn_enabled) { prrn_enabled = 1; @@ -1524,6 +1527,9 @@ int stop_topology_update(void) { int rc = 0; + if (!topology_updates_enabled) + return 0; + if (prrn_enabled) { prrn_enabled = 0; #ifdef CONFIG_SMP @@ -1579,11 +1585,13 @@ static ssize_t topology_write(struct file *file, const char __user *buf, kbuf[read_len] = '\0'; - if (!strncmp(kbuf, "on", 2)) + if (!strncmp(kbuf, "on", 2)) { + topology_updates_enabled = true; start_topology_update(); - else if (!strncmp(kbuf, "off", 3)) + } else if (!strncmp(kbuf, "off", 3)) { stop_topology_update(); - else + topology_updates_enabled = false; + } else return -EINVAL; return count; @@ -1598,9 +1606,7 @@ static const struct file_operations topology_ops = { static int topology_update_init(void) { - /* Do not poll for changes if disabled at boot */ - if (topology_updates_enabled) - start_topology_update(); + start_topology_update(); if (vphn_enabled) topology_schedule_update(); diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 68dece206048f5..e5c1d30ee968b4 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -116,7 +116,7 @@ ___PPC_RA(a) | IMM_L(i)) #define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | ___PPC_RT(d) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_DIVD(d, a, b) EMIT(PPC_INST_DIVD | ___PPC_RT(d) | \ +#define PPC_DIVDU(d, a, b) EMIT(PPC_INST_DIVDU | ___PPC_RT(d) | \ ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_AND(d, a, b) EMIT(PPC_INST_AND | ___PPC_RA(d) | \ ___PPC_RS(a) | ___PPC_RB(b)) diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 226eec62d1255a..279a51bf94d052 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -372,12 +372,12 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */ case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */ if (BPF_OP(code) == BPF_MOD) { - PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg); + PPC_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg); PPC_MULD(b2p[TMP_REG_1], src_reg, b2p[TMP_REG_1]); PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]); } else - PPC_DIVD(dst_reg, dst_reg, src_reg); + PPC_DIVDU(dst_reg, dst_reg, src_reg); break; case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */ case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */ @@ -405,7 +405,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, break; case BPF_ALU64: if (BPF_OP(code) == BPF_MOD) { - PPC_DIVD(b2p[TMP_REG_2], dst_reg, + PPC_DIVDU(b2p[TMP_REG_2], dst_reg, b2p[TMP_REG_1]); PPC_MULD(b2p[TMP_REG_1], b2p[TMP_REG_1], @@ -413,7 +413,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]); } else - PPC_DIVD(dst_reg, dst_reg, + PPC_DIVDU(dst_reg, dst_reg, b2p[TMP_REG_1]); break; } diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 81f8a0c838ae38..4004dbdab9c7ba 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1827,6 +1827,7 @@ static int power_pmu_event_init(struct perf_event *event) int n; int err; struct cpu_hw_events *cpuhw; + u64 bhrb_filter; if (!ppmu) return -ENOENT; @@ -1932,13 +1933,14 @@ static int power_pmu_event_init(struct perf_event *event) err = power_check_constraints(cpuhw, events, cflags, n + 1); if (has_branch_stack(event)) { - cpuhw->bhrb_filter = ppmu->bhrb_filter_map( + bhrb_filter = ppmu->bhrb_filter_map( event->attr.branch_sample_type); - if (cpuhw->bhrb_filter == -1) { + if (bhrb_filter == -1) { put_cpu_var(cpu_hw_events); return -EOPNOTSUPP; } + cpuhw->bhrb_filter = bhrb_filter; } put_cpu_var(cpu_hw_events); diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index 1fafc32b12a0f6..5553226770748b 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -496,6 +496,11 @@ static int nest_imc_event_init(struct perf_event *event) * Get the base memory addresss for this cpu. */ chip_id = cpu_to_chip_id(event->cpu); + + /* Return, if chip_id is not valid */ + if (chip_id < 0) + return -ENODEV; + pcni = pmu->mem_info; do { if (pcni->id == chip_id) { @@ -503,7 +508,7 @@ static int nest_imc_event_init(struct perf_event *event) break; } pcni++; - } while (pcni); + } while (pcni->vbase != 0); if (!flag) return -ENODEV; diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index d12a2db2635353..d10feef93b6bc1 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c @@ -29,6 +29,7 @@ enum { #define POWER8_MMCRA_IFM1 0x0000000040000000UL #define POWER8_MMCRA_IFM2 0x0000000080000000UL #define POWER8_MMCRA_IFM3 0x00000000C0000000UL +#define POWER8_MMCRA_BHRB_MASK 0x00000000C0000000UL /* * Raw event encoding for PowerISA v2.07 (Power8): @@ -243,6 +244,8 @@ static u64 power8_bhrb_filter_map(u64 branch_sample_type) static void power8_config_bhrb(u64 pmu_bhrb_filter) { + pmu_bhrb_filter &= POWER8_MMCRA_BHRB_MASK; + /* Enable BHRB filter in PMU */ mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); } diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c index e012b1030a5b18..c07b1615ee39d3 100644 --- a/arch/powerpc/perf/power9-pmu.c +++ b/arch/powerpc/perf/power9-pmu.c @@ -100,6 +100,7 @@ enum { #define POWER9_MMCRA_IFM1 0x0000000040000000UL #define POWER9_MMCRA_IFM2 0x0000000080000000UL #define POWER9_MMCRA_IFM3 0x00000000C0000000UL +#define POWER9_MMCRA_BHRB_MASK 0x00000000C0000000UL /* Nasty Power9 specific hack */ #define PVR_POWER9_CUMULUS 0x00002000 @@ -308,6 +309,8 @@ static u64 power9_bhrb_filter_map(u64 branch_sample_type) static void power9_config_bhrb(u64 pmu_bhrb_filter) { + pmu_bhrb_filter &= POWER9_MMCRA_BHRB_MASK; + /* Enable BHRB filter in PMU */ mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); } diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c index 58a07948c76e78..828f6656f8f745 100644 --- a/arch/powerpc/platforms/powernv/opal-imc.c +++ b/arch/powerpc/platforms/powernv/opal-imc.c @@ -127,7 +127,7 @@ static int imc_get_mem_addr_nest(struct device_node *node, nr_chips)) goto error; - pmu_ptr->mem_info = kcalloc(nr_chips, sizeof(*pmu_ptr->mem_info), + pmu_ptr->mem_info = kcalloc(nr_chips + 1, sizeof(*pmu_ptr->mem_info), GFP_KERNEL); if (!pmu_ptr->mem_info) goto error; @@ -161,6 +161,10 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain) struct imc_pmu *pmu_ptr; u32 offset; + /* Return for unknown domain */ + if (domain < 0) + return -EINVAL; + /* memory for pmu */ pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL); if (!pmu_ptr) diff --git a/arch/powerpc/platforms/powernv/opal-tracepoints.c b/arch/powerpc/platforms/powernv/opal-tracepoints.c index 1ab7d26c0a2cd9..f16a43540e3075 100644 --- a/arch/powerpc/platforms/powernv/opal-tracepoints.c +++ b/arch/powerpc/platforms/powernv/opal-tracepoints.c @@ -4,7 +4,7 @@ #include #include -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL struct static_key opal_tracepoint_key = STATIC_KEY_INIT; int opal_tracepoint_regfunc(void) diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index 251528231a9e94..f4875fe3f8ff28 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -20,7 +20,7 @@ .section ".text" #ifdef CONFIG_TRACEPOINTS -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL #define OPAL_BRANCH(LABEL) \ ARCH_STATIC_BRANCH(LABEL, opal_tracepoint_key) #else diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index d91412c591effa..50dc9426d0be94 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S @@ -19,7 +19,7 @@ #ifdef CONFIG_TRACEPOINTS -#ifndef HAVE_JUMP_LABEL +#ifndef CONFIG_JUMP_LABEL .section ".toc","aw" .globl hcall_tracepoint_refcount @@ -79,7 +79,7 @@ hcall_tracepoint_refcount: mr r5,BUFREG; \ __HCALL_INST_POSTCALL -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL #define HCALL_BRANCH(LABEL) \ ARCH_STATIC_BRANCH(LABEL, hcall_tracepoint_key) #else diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index d3992ced0782bc..9e52b686a8fa43 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -828,7 +828,7 @@ EXPORT_SYMBOL(arch_free_page); #endif /* CONFIG_PPC_BOOK3S_64 */ #ifdef CONFIG_TRACEPOINTS -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL struct static_key hcall_tracepoint_key = STATIC_KEY_INIT; int hcall_tracepoint_regfunc(void) diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 88401d5125bcc0..523dbfbac03dde 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -29,6 +29,7 @@ #include #include +#include /* * This routine handles page faults. It determines the address and the @@ -281,6 +282,18 @@ asmlinkage void do_page_fault(struct pt_regs *regs) pte_k = pte_offset_kernel(pmd_k, addr); if (!pte_present(*pte_k)) goto no_context; + + /* + * The kernel assumes that TLBs don't cache invalid + * entries, but in RISC-V, SFENCE.VMA specifies an + * ordering constraint, not a cache flush; it is + * necessary even after writing invalid entries. + * Relying on flush_tlb_fix_spurious_fault would + * suffice, but the extra traps reduce + * performance. So, eagerly SFENCE.VMA. + */ + local_flush_tlb_page(addr); + return; } } diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index c54cb26eb7f504..8ff7cb3da1cbac 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -27,14 +27,14 @@ #include #include #include -#include +#include #include #include #include #include static u8 *ctrblk; -static DEFINE_SPINLOCK(ctrblk_lock); +static DEFINE_MUTEX(ctrblk_lock); static cpacf_mask_t km_functions, kmc_functions, kmctr_functions, kma_functions; @@ -698,7 +698,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, unsigned int n, nbytes; int ret, locked; - locked = spin_trylock(&ctrblk_lock); + locked = mutex_trylock(&ctrblk_lock); ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { @@ -716,7 +716,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, ret = blkcipher_walk_done(desc, walk, nbytes - n); } if (locked) - spin_unlock(&ctrblk_lock); + mutex_unlock(&ctrblk_lock); /* * final block may be < AES_BLOCK_SIZE, copy only nbytes */ @@ -826,19 +826,45 @@ static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize) return 0; } -static void gcm_sg_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg, - unsigned int len) +static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg, + unsigned int len) { memset(gw, 0, sizeof(*gw)); gw->walk_bytes_remain = len; scatterwalk_start(&gw->walk, sg); } -static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) +static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw) +{ + struct scatterlist *nextsg; + + gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain); + while (!gw->walk_bytes) { + nextsg = sg_next(gw->walk.sg); + if (!nextsg) + return 0; + scatterwalk_start(&gw->walk, nextsg); + gw->walk_bytes = scatterwalk_clamp(&gw->walk, + gw->walk_bytes_remain); + } + gw->walk_ptr = scatterwalk_map(&gw->walk); + return gw->walk_bytes; +} + +static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw, + unsigned int nbytes) +{ + gw->walk_bytes_remain -= nbytes; + scatterwalk_unmap(&gw->walk); + scatterwalk_advance(&gw->walk, nbytes); + scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); + gw->walk_ptr = NULL; +} + +static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) { int n; - /* minbytesneeded <= AES_BLOCK_SIZE */ if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) { gw->ptr = gw->buf; gw->nbytes = gw->buf_bytes; @@ -851,13 +877,11 @@ static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) goto out; } - gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain); - if (!gw->walk_bytes) { - scatterwalk_start(&gw->walk, sg_next(gw->walk.sg)); - gw->walk_bytes = scatterwalk_clamp(&gw->walk, - gw->walk_bytes_remain); + if (!_gcm_sg_clamp_and_map(gw)) { + gw->ptr = NULL; + gw->nbytes = 0; + goto out; } - gw->walk_ptr = scatterwalk_map(&gw->walk); if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) { gw->ptr = gw->walk_ptr; @@ -869,51 +893,90 @@ static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes); memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n); gw->buf_bytes += n; - gw->walk_bytes_remain -= n; - scatterwalk_unmap(&gw->walk); - scatterwalk_advance(&gw->walk, n); - scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); - + _gcm_sg_unmap_and_advance(gw, n); if (gw->buf_bytes >= minbytesneeded) { gw->ptr = gw->buf; gw->nbytes = gw->buf_bytes; goto out; } - - gw->walk_bytes = scatterwalk_clamp(&gw->walk, - gw->walk_bytes_remain); - if (!gw->walk_bytes) { - scatterwalk_start(&gw->walk, sg_next(gw->walk.sg)); - gw->walk_bytes = scatterwalk_clamp(&gw->walk, - gw->walk_bytes_remain); + if (!_gcm_sg_clamp_and_map(gw)) { + gw->ptr = NULL; + gw->nbytes = 0; + goto out; } - gw->walk_ptr = scatterwalk_map(&gw->walk); } out: return gw->nbytes; } -static void gcm_sg_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) +static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) { - int n; + if (gw->walk_bytes_remain == 0) { + gw->ptr = NULL; + gw->nbytes = 0; + goto out; + } + if (!_gcm_sg_clamp_and_map(gw)) { + gw->ptr = NULL; + gw->nbytes = 0; + goto out; + } + + if (gw->walk_bytes >= minbytesneeded) { + gw->ptr = gw->walk_ptr; + gw->nbytes = gw->walk_bytes; + goto out; + } + + scatterwalk_unmap(&gw->walk); + gw->walk_ptr = NULL; + + gw->ptr = gw->buf; + gw->nbytes = sizeof(gw->buf); + +out: + return gw->nbytes; +} + +static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) +{ if (gw->ptr == NULL) - return; + return 0; if (gw->ptr == gw->buf) { - n = gw->buf_bytes - bytesdone; + int n = gw->buf_bytes - bytesdone; if (n > 0) { memmove(gw->buf, gw->buf + bytesdone, n); - gw->buf_bytes -= n; + gw->buf_bytes = n; } else gw->buf_bytes = 0; - } else { - gw->walk_bytes_remain -= bytesdone; - scatterwalk_unmap(&gw->walk); - scatterwalk_advance(&gw->walk, bytesdone); - scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); - } + } else + _gcm_sg_unmap_and_advance(gw, bytesdone); + + return bytesdone; +} + +static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) +{ + int i, n; + + if (gw->ptr == NULL) + return 0; + + if (gw->ptr == gw->buf) { + for (i = 0; i < bytesdone; i += n) { + if (!_gcm_sg_clamp_and_map(gw)) + return i; + n = min(gw->walk_bytes, bytesdone - i); + memcpy(gw->walk_ptr, gw->buf + i, n); + _gcm_sg_unmap_and_advance(gw, n); + } + } else + _gcm_sg_unmap_and_advance(gw, bytesdone); + + return bytesdone; } static int gcm_aes_crypt(struct aead_request *req, unsigned int flags) @@ -926,7 +989,7 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags) unsigned int pclen = req->cryptlen; int ret = 0; - unsigned int len, in_bytes, out_bytes, + unsigned int n, len, in_bytes, out_bytes, min_bytes, bytes, aad_bytes, pc_bytes; struct gcm_sg_walk gw_in, gw_out; u8 tag[GHASH_DIGEST_SIZE]; @@ -963,14 +1026,14 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags) *(u32 *)(param.j0 + ivsize) = 1; memcpy(param.k, ctx->key, ctx->key_len); - gcm_sg_walk_start(&gw_in, req->src, len); - gcm_sg_walk_start(&gw_out, req->dst, len); + gcm_walk_start(&gw_in, req->src, len); + gcm_walk_start(&gw_out, req->dst, len); do { min_bytes = min_t(unsigned int, aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE); - in_bytes = gcm_sg_walk_go(&gw_in, min_bytes); - out_bytes = gcm_sg_walk_go(&gw_out, min_bytes); + in_bytes = gcm_in_walk_go(&gw_in, min_bytes); + out_bytes = gcm_out_walk_go(&gw_out, min_bytes); bytes = min(in_bytes, out_bytes); if (aadlen + pclen <= bytes) { @@ -997,8 +1060,11 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags) gw_in.ptr + aad_bytes, pc_bytes, gw_in.ptr, aad_bytes); - gcm_sg_walk_done(&gw_in, aad_bytes + pc_bytes); - gcm_sg_walk_done(&gw_out, aad_bytes + pc_bytes); + n = aad_bytes + pc_bytes; + if (gcm_in_walk_done(&gw_in, n) != n) + return -ENOMEM; + if (gcm_out_walk_done(&gw_out, n) != n) + return -ENOMEM; aadlen -= aad_bytes; pclen -= pc_bytes; } while (aadlen + pclen > 0); diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 5346b5a80bb6c1..65bda1178963c0 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -21,7 +22,7 @@ #define DES3_KEY_SIZE (3 * DES_KEY_SIZE) static u8 *ctrblk; -static DEFINE_SPINLOCK(ctrblk_lock); +static DEFINE_MUTEX(ctrblk_lock); static cpacf_mask_t km_functions, kmc_functions, kmctr_functions; @@ -387,7 +388,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc, unsigned int n, nbytes; int ret, locked; - locked = spin_trylock(&ctrblk_lock); + locked = mutex_trylock(&ctrblk_lock); ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE); while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) { @@ -404,7 +405,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc, ret = blkcipher_walk_done(desc, walk, nbytes - n); } if (locked) - spin_unlock(&ctrblk_lock); + mutex_unlock(&ctrblk_lock); /* final block may be < DES_BLOCK_SIZE, copy only nbytes */ if (nbytes) { cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr, diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h index 8c00fd509c4555..1a6a7092d94209 100644 --- a/arch/s390/include/asm/ap.h +++ b/arch/s390/include/asm/ap.h @@ -221,16 +221,22 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid, void *ind) { register unsigned long reg0 asm ("0") = qid | (3UL << 24); - register struct ap_qirq_ctrl reg1_in asm ("1") = qirqctrl; - register struct ap_queue_status reg1_out asm ("1"); + register union { + unsigned long value; + struct ap_qirq_ctrl qirqctrl; + struct ap_queue_status status; + } reg1 asm ("1"); register void *reg2 asm ("2") = ind; + reg1.qirqctrl = qirqctrl; + asm volatile( ".long 0xb2af0000" /* PQAP(AQIC) */ - : "=d" (reg1_out) - : "d" (reg0), "d" (reg1_in), "d" (reg2) + : "+d" (reg1) + : "d" (reg0), "d" (reg2) : "cc"); - return reg1_out; + + return reg1.status; } /* @@ -264,17 +270,21 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit, { register unsigned long reg0 asm ("0") = qid | (5UL << 24) | ((ifbit & 0x01) << 22); - register unsigned long reg1_in asm ("1") = apinfo->val; - register struct ap_queue_status reg1_out asm ("1"); + register union { + unsigned long value; + struct ap_queue_status status; + } reg1 asm ("1"); register unsigned long reg2 asm ("2"); + reg1.value = apinfo->val; + asm volatile( ".long 0xb2af0000" /* PQAP(QACT) */ - : "+d" (reg1_in), "=d" (reg1_out), "=d" (reg2) + : "+d" (reg1), "=d" (reg2) : "d" (reg0) : "cc"); apinfo->val = reg2; - return reg1_out; + return reg1.status; } /** diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h index 40f651292aa789..9c7dc970e96653 100644 --- a/arch/s390/include/asm/jump_label.h +++ b/arch/s390/include/asm/jump_label.h @@ -10,6 +10,12 @@ #define JUMP_LABEL_NOP_SIZE 6 #define JUMP_LABEL_NOP_OFFSET 2 +#if __GNUC__ < 9 +#define JUMP_LABEL_STATIC_KEY_CONSTRAINT "X" +#else +#define JUMP_LABEL_STATIC_KEY_CONSTRAINT "jdd" +#endif + /* * We use a brcl 0,2 instruction for jump labels at compile time so it * can be easily distinguished from a hotpatch generated instruction. @@ -19,9 +25,9 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n" ".pushsection __jump_table, \"aw\"\n" ".balign 8\n" - ".quad 0b, %l[label], %0\n" + ".quad 0b, %l[label], %0+%1\n" ".popsection\n" - : : "X" (&((char *)key)[branch]) : : label); + : : JUMP_LABEL_STATIC_KEY_CONSTRAINT (key), "i" (branch) : : label); return false; label: @@ -33,9 +39,9 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool asm_volatile_goto("0: brcl 15, %l[label]\n" ".pushsection __jump_table, \"aw\"\n" ".balign 8\n" - ".quad 0b, %l[label], %0\n" + ".quad 0b, %l[label], %0+%1\n" ".popsection\n" - : : "X" (&((char *)key)[branch]) : : label); + : : JUMP_LABEL_STATIC_KEY_CONSTRAINT (key), "i" (branch) : : label); return false; label: diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index ad6b91013a0525..5332f628c1edc8 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -56,8 +56,10 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n); unsigned long __must_check raw_copy_to_user(void __user *to, const void *from, unsigned long n); +#ifndef CONFIG_KASAN #define INLINE_COPY_FROM_USER #define INLINE_COPY_TO_USER +#endif #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index dbfd1730e631ac..b205c0ff0b2204 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -44,7 +44,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o early_nobss.o -obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o +obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o pgm_check.o obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o obj-y += nospec-branch.o @@ -68,6 +68,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_UPROBES) += uprobes.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index 43f8430fb67d50..68f415e334a594 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c @@ -10,8 +10,6 @@ #include #include -#ifdef HAVE_JUMP_LABEL - struct insn { u16 opcode; s32 offset; @@ -102,5 +100,3 @@ void arch_jump_label_transform_static(struct jump_entry *entry, { __jump_label_transform(entry, type, 1); } - -#endif diff --git a/arch/s390/kernel/kexec_elf.c b/arch/s390/kernel/kexec_elf.c index 5a286b012043bc..602e7cc26d1181 100644 --- a/arch/s390/kernel/kexec_elf.c +++ b/arch/s390/kernel/kexec_elf.c @@ -19,10 +19,15 @@ static int kexec_file_add_elf_kernel(struct kimage *image, struct kexec_buf buf; const Elf_Ehdr *ehdr; const Elf_Phdr *phdr; + Elf_Addr entry; int i, ret; ehdr = (Elf_Ehdr *)kernel; buf.image = image; + if (image->type == KEXEC_TYPE_CRASH) + entry = STARTUP_KDUMP_OFFSET; + else + entry = ehdr->e_entry; phdr = (void *)ehdr + ehdr->e_phoff; for (i = 0; i < ehdr->e_phnum; i++, phdr++) { @@ -35,7 +40,7 @@ static int kexec_file_add_elf_kernel(struct kimage *image, buf.mem = ALIGN(phdr->p_paddr, phdr->p_align); buf.memsz = phdr->p_memsz; - if (phdr->p_paddr == 0) { + if (entry - phdr->p_paddr < phdr->p_memsz) { data->kernel_buf = buf.buffer; data->memsz += STARTUP_NORMAL_OFFSET; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index ac5da6b0b862a3..fc7de27960e73d 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -489,6 +489,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) break; case KVM_CAP_NR_VCPUS: case KVM_CAP_MAX_VCPUS: + case KVM_CAP_MAX_VCPU_ID: r = KVM_S390_BSCA_CPU_SLOTS; if (!kvm_s390_use_sca_entries()) r = KVM_MAX_VCPUS; @@ -4155,21 +4156,28 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, const struct kvm_memory_slot *new, enum kvm_mr_change change) { - int rc; - - /* If the basics of the memslot do not change, we do not want - * to update the gmap. Every update causes several unnecessary - * segment translation exceptions. This is usually handled just - * fine by the normal fault handler + gmap, but it will also - * cause faults on the prefix page of running guest CPUs. - */ - if (old->userspace_addr == mem->userspace_addr && - old->base_gfn * PAGE_SIZE == mem->guest_phys_addr && - old->npages * PAGE_SIZE == mem->memory_size) - return; + int rc = 0; - rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, - mem->guest_phys_addr, mem->memory_size); + switch (change) { + case KVM_MR_DELETE: + rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, + old->npages * PAGE_SIZE); + break; + case KVM_MR_MOVE: + rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, + old->npages * PAGE_SIZE); + if (rc) + break; + /* FALLTHROUGH */ + case KVM_MR_CREATE: + rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, + mem->guest_phys_addr, mem->memory_size); + break; + case KVM_MR_FLAGS_ONLY: + break; + default: + WARN(1, "Unknown KVM MR CHANGE: %d\n", change); + } if (rc) pr_warn("failed to commit memory region\n"); return; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 72af23bacbb586..a6e3c7022245d4 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -107,7 +107,6 @@ void bust_spinlocks(int yes) /* * Find out which address space caused the exception. - * Access register mode is impossible, ignore space == 3. */ static inline enum fault_type get_fault_type(struct pt_regs *regs) { @@ -132,6 +131,10 @@ static inline enum fault_type get_fault_type(struct pt_regs *regs) } return VDSO_FAULT; } + if (trans_exc_code == 1) { + /* access register mode, not used in the kernel */ + return USER_FAULT; + } /* home space exception -> access via kernel ASCE */ return KERNEL_FAULT; } diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index f2cc7da473e4ed..ae894ac83fd618 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -410,6 +410,7 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm, return old; } +#ifdef CONFIG_PGSTE static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; @@ -427,6 +428,7 @@ static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr) pmd = pmd_alloc(mm, pud, addr); return pmd; } +#endif pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t new) diff --git a/arch/sh/include/cpu-sh4/cpu/sh7786.h b/arch/sh/include/cpu-sh4/cpu/sh7786.h index 96b8cb1f754a95..029bbadaf7ab50 100644 --- a/arch/sh/include/cpu-sh4/cpu/sh7786.h +++ b/arch/sh/include/cpu-sh4/cpu/sh7786.h @@ -135,7 +135,7 @@ enum { static inline u32 sh7786_mm_sel(void) { - return __raw_readl(0xFC400020) & 0x7; + return __raw_readl((const volatile void __iomem *)0xFC400020) & 0x7; } #endif /* __CPU_SH7786_H__ */ diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index cf8640841b7a2c..97c0e19263d1f6 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -118,4 +118,4 @@ pc--$(CONFIG_PERF_EVENTS) := perf_event.o obj-$(CONFIG_SPARC64) += $(pc--y) obj-$(CONFIG_UPROBES) += uprobes.o -obj-$(CONFIG_SPARC64) += jump_label.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c index 7f8eac51df337e..a4cfaeecaf5ea8 100644 --- a/arch/sparc/kernel/jump_label.c +++ b/arch/sparc/kernel/jump_label.c @@ -9,8 +9,6 @@ #include -#ifdef HAVE_JUMP_LABEL - void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { @@ -47,5 +45,3 @@ void arch_jump_label_transform(struct jump_entry *entry, flushi(insn); mutex_unlock(&text_mutex); } - -#endif diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index 39a2503fa3e18e..51028abe5e9038 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -357,6 +357,8 @@ static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node, node_info->vdev_port.id = *idp; node_info->vdev_port.name = kstrdup_const(name, GFP_KERNEL); + if (!node_info->vdev_port.name) + return -1; node_info->vdev_port.parent_cfg_hdl = *parent_cfg_hdlp; return 0; diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 67b3e6b3ce5d7c..1ad5911f62b416 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -891,6 +891,10 @@ static int sparc_perf_event_set_period(struct perf_event *event, s64 period = hwc->sample_period; int ret = 0; + /* The period may have been changed by PERF_EVENT_IOC_PERIOD */ + if (unlikely(period != hwc->last_period)) + left = period - (hwc->last_period - left); + if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index d245f89d139528..d220b6848746c5 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S @@ -587,7 +587,7 @@ xcall_flush_tlb_kernel_range: /* 44 insns */ sub %g7, %g1, %g3 srlx %g3, 18, %g2 brnz,pn %g2, 2f - add %g2, 1, %g2 + sethi %hi(PAGE_SIZE), %g2 sub %g3, %g2, %g3 or %g1, 0x20, %g1 ! Nucleus 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP @@ -751,7 +751,7 @@ __cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */ sub %g7, %g1, %g3 srlx %g3, 18, %g2 brnz,pn %g2, 2f - add %g2, 1, %g2 + sethi %hi(PAGE_SIZE), %g2 sub %g3, %g2, %g3 or %g1, 0x20, %g1 ! Nucleus 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index 052de4c8acb2ec..0c572a48158e8d 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c @@ -56,7 +56,7 @@ static int itimer_one_shot(struct clock_event_device *evt) static struct clock_event_device timer_clockevent = { .name = "posix-timer", .rating = 250, - .cpumask = cpu_all_mask, + .cpumask = cpu_possible_mask, .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_state_shutdown = itimer_shutdown, diff --git a/arch/x86/Makefile b/arch/x86/Makefile index ffc823a8312fbd..ce0d0424a53d6a 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -47,7 +47,7 @@ export REALMODE_CFLAGS export BITS ifdef CONFIG_X86_NEED_RELOCS - LDFLAGS_vmlinux := --emit-relocs + LDFLAGS_vmlinux := --emit-relocs --discard-none endif # @@ -305,7 +305,7 @@ vdso_install: archprepare: checkbin checkbin: -ifndef CC_HAVE_ASM_GOTO +ifndef CONFIG_CC_HAS_ASM_GOTO @echo Compiler lacks asm-goto support. @exit 1 endif diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c index cd4df93225014b..7bbfe7d35da7dd 100644 --- a/arch/x86/crypto/crct10dif-pclmul_glue.c +++ b/arch/x86/crypto/crct10dif-pclmul_glue.c @@ -76,15 +76,14 @@ static int chksum_final(struct shash_desc *desc, u8 *out) return 0; } -static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, - u8 *out) +static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out) { if (irq_fpu_usable()) { kernel_fpu_begin(); - *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len); + *(__u16 *)out = crc_t10dif_pcl(crc, data, len); kernel_fpu_end(); } else - *(__u16 *)out = crc_t10dif_generic(*crcp, data, len); + *(__u16 *)out = crc_t10dif_generic(crc, data, len); return 0; } @@ -93,15 +92,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data, { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - return __chksum_finup(&ctx->crc, data, len, out); + return __chksum_finup(ctx->crc, data, len, out); } static int chksum_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out) { - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - - return __chksum_finup(&ctx->crc, data, length, out); + return __chksum_finup(0, data, length, out); } static struct shash_alg alg = { diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 352e70cd33e80b..e699b204166536 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -337,7 +337,7 @@ For 32-bit we have the following conventions - kernel is built with */ .macro CALL_enter_from_user_mode #ifdef CONFIG_CONTEXT_TRACKING -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0 #endif call enter_from_user_mode diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index fbbf1ba57ec67c..b5c2b1091b18db 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -648,6 +648,7 @@ ENTRY(__switch_to_asm) pushl %ebx pushl %edi pushl %esi + pushfl /* switch stack */ movl %esp, TASK_threadsp(%eax) @@ -670,6 +671,7 @@ ENTRY(__switch_to_asm) #endif /* restore callee-saved registers */ + popfl popl %esi popl %edi popl %ebx diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 617df50a11d969..c90e00db5c13c6 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -352,6 +352,7 @@ ENTRY(__switch_to_asm) pushq %r13 pushq %r14 pushq %r15 + pushfq /* switch stack */ movq %rsp, TASK_threadsp(%rdi) @@ -374,6 +375,7 @@ ENTRY(__switch_to_asm) #endif /* restore callee-saved registers */ + popfq popq %r15 popq %r14 popq %r13 @@ -903,7 +905,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt */ #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8) -.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 +.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 create_gap=0 ENTRY(\sym) UNWIND_HINT_IRET_REGS offset=\has_error_code*8 @@ -923,6 +925,20 @@ ENTRY(\sym) jnz .Lfrom_usermode_switch_stack_\@ .endif + .if \create_gap == 1 + /* + * If coming from kernel space, create a 6-word gap to allow the + * int3 handler to emulate a call instruction. + */ + testb $3, CS-ORIG_RAX(%rsp) + jnz .Lfrom_usermode_no_gap_\@ + .rept 6 + pushq 5*8(%rsp) + .endr + UNWIND_HINT_IRET_REGS offset=8 +.Lfrom_usermode_no_gap_\@: + .endif + .if \paranoid call paranoid_entry .else @@ -1152,7 +1168,7 @@ apicinterrupt3 HYPERV_STIMER0_VECTOR \ #endif /* CONFIG_HYPERV */ idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK -idtentry int3 do_int3 has_error_code=0 +idtentry int3 do_int3 has_error_code=0 create_gap=1 idtentry stack_segment do_stack_segment has_error_code=1 #ifdef CONFIG_XEN diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index a759e59990fbdc..c8b0bf2b0d5e40 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2074,15 +2074,19 @@ static void intel_pmu_disable_event(struct perf_event *event) cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx); cpuc->intel_cp_status &= ~(1ull << hwc->idx); - if (unlikely(event->attr.precise_ip)) - intel_pmu_pebs_disable(event); - if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { intel_pmu_disable_fixed(hwc); return; } x86_pmu_disable_event(event); + + /* + * Needs to be called after x86_pmu_disable_event, + * so we don't trigger the event without PEBS bit set. + */ + if (unlikely(event->attr.precise_ip)) + intel_pmu_pebs_disable(event); } static void intel_pmu_del_event(struct perf_event *event) @@ -3068,7 +3072,7 @@ static int intel_pmu_hw_config(struct perf_event *event) return ret; if (event->attr.precise_ip) { - if (!(event->attr.freq || event->attr.wakeup_events)) { + if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) { event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; if (!(event->attr.sample_type & ~intel_pmu_large_pebs_flags(event))) diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 56194c571299f0..4a650eb3d94a3a 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -584,6 +584,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X, glm_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates), + + X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_MOBILE, snb_cstates), { }, }; MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index b7b01d762d32a3..e91814d1a27f34 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -684,7 +684,7 @@ struct event_constraint intel_core2_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01), EVENT_CONSTRAINT_END }; @@ -693,7 +693,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01), /* Allow all events as PEBS with no flags */ INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), EVENT_CONSTRAINT_END @@ -701,7 +701,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = { struct event_constraint intel_slm_pebs_event_constraints[] = { /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1), /* Allow all events as PEBS with no flags */ INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), EVENT_CONSTRAINT_END @@ -726,7 +726,7 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = { INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f), EVENT_CONSTRAINT_END }; @@ -743,7 +743,7 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = { INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f), EVENT_CONSTRAINT_END }; @@ -752,7 +752,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = { INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ @@ -767,9 +767,9 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = { INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ @@ -783,9 +783,9 @@ struct event_constraint intel_hsw_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */ @@ -806,9 +806,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */ @@ -829,9 +829,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = { struct event_constraint intel_skl_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */ /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), /* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f), INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 91039ffed63334..2413169ce36271 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -780,6 +780,8 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = { X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, hsw_rapl_init), X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, hsw_rapl_init), + + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, skl_rapl_init), {}, }; diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c index 1b9f85abf9bc10..ace6c1e752fb1c 100644 --- a/arch/x86/events/msr.c +++ b/arch/x86/events/msr.c @@ -89,6 +89,7 @@ static bool test_intel(int idx) case INTEL_FAM6_SKYLAKE_X: case INTEL_FAM6_KABYLAKE_MOBILE: case INTEL_FAM6_KABYLAKE_DESKTOP: + case INTEL_FAM6_ICELAKE_MOBILE: if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF) return true; break; diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 86b1341cba9ac5..513ba49c204fed 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -61,9 +61,8 @@ } while (0) #define RELOAD_SEG(seg) { \ - unsigned int pre = GET_SEG(seg); \ + unsigned int pre = (seg) | 3; \ unsigned int cur = get_user_seg(seg); \ - pre |= 3; \ if (pre != cur) \ set_user_seg(seg, pre); \ } @@ -72,6 +71,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, struct sigcontext_32 __user *sc) { unsigned int tmpflags, err = 0; + u16 gs, fs, es, ds; void __user *buf; u32 tmp; @@ -79,16 +79,10 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, current->restart_block.fn = do_no_restart_syscall; get_user_try { - /* - * Reload fs and gs if they have changed in the signal - * handler. This does not handle long fs/gs base changes in - * the handler, but does not clobber them at least in the - * normal case. - */ - RELOAD_SEG(gs); - RELOAD_SEG(fs); - RELOAD_SEG(ds); - RELOAD_SEG(es); + gs = GET_SEG(gs); + fs = GET_SEG(fs); + ds = GET_SEG(ds); + es = GET_SEG(es); COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); COPY(dx); COPY(cx); COPY(ip); COPY(ax); @@ -106,6 +100,17 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, buf = compat_ptr(tmp); } get_user_catch(err); + /* + * Reload fs and gs if they have changed in the signal + * handler. This does not handle long fs/gs base changes in + * the handler, but does not clobber them at least in the + * normal case. + */ + RELOAD_SEG(gs); + RELOAD_SEG(fs); + RELOAD_SEG(ds); + RELOAD_SEG(es); + err |= fpu__restore_sig(buf, 1); force_iret(); diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index aced6c9290d6f9..ce95b8cbd2296b 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -140,7 +140,7 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); #define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit) -#if defined(__clang__) && !defined(CC_HAVE_ASM_GOTO) +#if defined(__clang__) && !defined(CONFIG_CC_HAS_ASM_GOTO) /* * Workaround for the sake of BPF compilation which utilizes kernel diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index 8c0de4282659b5..7010e1c594c435 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h @@ -2,19 +2,6 @@ #ifndef _ASM_X86_JUMP_LABEL_H #define _ASM_X86_JUMP_LABEL_H -#ifndef HAVE_JUMP_LABEL -/* - * For better or for worse, if jump labels (the gcc extension) are missing, - * then the entire static branch patching infrastructure is compiled out. - * If that happens, the code in here will malfunction. Raise a compiler - * error instead. - * - * In theory, jump labels and the static branch patching infrastructure - * could be decoupled to fix this. - */ -#error asm/jump_label.h included on a non-jump-label kernel -#endif - #define JUMP_LABEL_NOP_SIZE 5 #ifdef CONFIG_X86_64 diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h index 4914a3e7c80355..033dc7ca49e978 100644 --- a/arch/x86/include/asm/rmwcc.h +++ b/arch/x86/include/asm/rmwcc.h @@ -4,7 +4,7 @@ #define __CLOBBERS_MEM(clb...) "memory", ## clb -#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO) +#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CONFIG_CC_HAS_ASM_GOTO) /* Use asm goto */ @@ -21,7 +21,7 @@ cc_label: \ #define __BINARY_RMWcc_ARG " %1, " -#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ +#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */ /* Use flags output or a set instruction */ @@ -36,7 +36,7 @@ do { \ #define __BINARY_RMWcc_ARG " %2, " -#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ +#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */ #define GEN_UNARY_RMWcc(op, var, arg0, cc) \ __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM()) diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 7cf1a270d89101..157149d4129c09 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -40,6 +40,7 @@ asmlinkage void ret_from_fork(void); * order of the fields must match the code in __switch_to_asm(). */ struct inactive_task_frame { + unsigned long flags; #ifdef CONFIG_X86_64 unsigned long r15; unsigned long r14; diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h index e85ff65c43c3ef..0bbb07eaed6b10 100644 --- a/arch/x86/include/asm/text-patching.h +++ b/arch/x86/include/asm/text-patching.h @@ -39,4 +39,34 @@ extern int poke_int3_handler(struct pt_regs *regs); extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); extern int after_bootmem; +#ifndef CONFIG_UML_X86 +static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) +{ + regs->ip = ip; +} + +#define INT3_INSN_SIZE 1 +#define CALL_INSN_SIZE 5 + +#ifdef CONFIG_X86_64 +static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val) +{ + /* + * The int3 handler in entry_64.S adds a gap between the + * stack where the break point happened, and the saving of + * pt_regs. We can extend the original stack because of + * this gap. See the idtentry macro's create_gap option. + */ + regs->sp -= sizeof(unsigned long); + *(unsigned long *)regs->sp = val; +} + +static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func) +{ + int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); + int3_emulate_jmp(regs, func); +} +#endif /* CONFIG_X86_64 */ +#endif /* !CONFIG_UML_X86 */ + #endif /* _ASM_X86_TEXT_PATCHING_H */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 8824d01c0c352d..da0b6bc090f3e0 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -49,7 +49,8 @@ obj-$(CONFIG_COMPAT) += signal_compat.o obj-y += traps.o idt.o irq.o irq_$(BITS).o dumpstack_$(BITS).o obj-y += time.o ioport.o dumpstack.o nmi.o obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o -obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o +obj-y += setup.o x86_init.o i8259.o irqinit.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-y += probe_roms.o obj-$(CONFIG_X86_64) += sys_x86_64.o diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index b9d5e7c9ef43e6..918a23704c0c5b 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -662,15 +662,29 @@ void __init alternative_instructions(void) * handlers seeing an inconsistent instruction while you patch. */ void *__init_or_module text_poke_early(void *addr, const void *opcode, - size_t len) + size_t len) { unsigned long flags; - local_irq_save(flags); - memcpy(addr, opcode, len); - local_irq_restore(flags); - sync_core(); - /* Could also do a CLFLUSH here to speed up CPU recovery; but - that causes hangs on some VIA CPUs. */ + + if (boot_cpu_has(X86_FEATURE_NX) && + is_module_text_address((unsigned long)addr)) { + /* + * Modules text is marked initially as non-executable, so the + * code cannot be running and speculative code-fetches are + * prevented. Just change the code. + */ + memcpy(addr, opcode, len); + } else { + local_irq_save(flags); + memcpy(addr, opcode, len); + local_irq_restore(flags); + sync_core(); + + /* + * Could also do a CLFLUSH here to speed up CPU recovery; but + * that causes hangs on some VIA CPUs. + */ + } return addr; } diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 6a25278e009290..da1f5e78363e91 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -819,8 +819,11 @@ static void init_amd_zn(struct cpuinfo_x86 *c) { set_cpu_cap(c, X86_FEATURE_ZEN); - /* Fix erratum 1076: CPB feature bit not being set in CPUID. */ - if (!cpu_has(c, X86_FEATURE_CPB)) + /* + * Fix erratum 1076: CPB feature bit not being set in CPUID. + * Always set it, except when running under a hypervisor. + */ + if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB)) set_cpu_cap(c, X86_FEATURE_CPB); } diff --git a/arch/x86/kernel/cpu/intel_rdt_monitor.c b/arch/x86/kernel/cpu/intel_rdt_monitor.c index b0f3aed76b7507..3d4ec80a6bb96e 100644 --- a/arch/x86/kernel/cpu/intel_rdt_monitor.c +++ b/arch/x86/kernel/cpu/intel_rdt_monitor.c @@ -371,6 +371,9 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) struct list_head *head; struct rdtgroup *entry; + if (!is_mbm_local_enabled()) + return; + r_mba = &rdt_resources_all[RDT_RESOURCE_MBA]; closid = rgrp->closid; rmid = rgrp->mon.rmid; diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index 643670fb894348..274d220d0a83da 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -2379,7 +2379,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) if (closid_allocated(i) && i != closid) { mode = rdtgroup_mode_by_closid(i); if (mode == RDT_MODE_PSEUDO_LOCKSETUP) - break; + continue; used_b |= *ctrl; if (mode == RDT_MODE_SHAREABLE) d->new_ctrl |= *ctrl; diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c index c805a06e14c388..ff1c00b695aeda 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c @@ -46,8 +46,6 @@ static struct mce i_mce; static struct dentry *dfs_inj; -static u8 n_banks; - #define MAX_FLAG_OPT_SIZE 4 #define NBCFG 0x44 @@ -567,9 +565,15 @@ static void do_inject(void) static int inj_bank_set(void *data, u64 val) { struct mce *m = (struct mce *)data; + u8 n_banks; + u64 cap; + + /* Get bank count on target CPU so we can handle non-uniform values. */ + rdmsrl_on_cpu(m->extcpu, MSR_IA32_MCG_CAP, &cap); + n_banks = cap & MCG_BANKCNT_MASK; if (val >= n_banks) { - pr_err("Non-existent MCE bank: %llu\n", val); + pr_err("MCA bank %llu non-existent on CPU%d\n", val, m->extcpu); return -EINVAL; } @@ -659,10 +663,6 @@ static struct dfs_node { static int __init debugfs_init(void) { unsigned int i; - u64 cap; - - rdmsrl(MSR_IA32_MCG_CAP, cap); - n_banks = cap & MCG_BANKCNT_MASK; dfs_inj = debugfs_create_dir("mce-inject", NULL); if (!dfs_inj) diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index f9e7096b1804de..fee118b3b69fd7 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -711,19 +711,49 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b) barrier(); m.status = mce_rdmsrl(msr_ops.status(i)); + + /* If this entry is not valid, ignore it */ if (!(m.status & MCI_STATUS_VAL)) continue; /* - * Uncorrected or signalled events are handled by the exception - * handler when it is enabled, so don't process those here. - * - * TBD do the same check for MCI_STATUS_EN here? + * If we are logging everything (at CPU online) or this + * is a corrected error, then we must log it. */ - if (!(flags & MCP_UC) && - (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC))) - continue; + if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC)) + goto log_it; + + /* + * Newer Intel systems that support software error + * recovery need to make additional checks. Other + * CPUs should skip over uncorrected errors, but log + * everything else. + */ + if (!mca_cfg.ser) { + if (m.status & MCI_STATUS_UC) + continue; + goto log_it; + } + + /* Log "not enabled" (speculative) errors */ + if (!(m.status & MCI_STATUS_EN)) + goto log_it; + + /* + * Log UCNA (SDM: 15.6.3 "UCR Error Classification") + * UC == 1 && PCC == 0 && S == 0 + */ + if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S)) + goto log_it; + + /* + * Skip anything else. Presumption is that our read of this + * bank is racing with a machine check. Leave the log alone + * for do_machine_check() to deal with it. + */ + continue; +log_it: error_seen = true; mce_read_aux(&m, i); @@ -1450,13 +1480,12 @@ EXPORT_SYMBOL_GPL(mce_notify_irq); static int __mcheck_cpu_mce_banks_init(void) { int i; - u8 num_banks = mca_cfg.banks; - mce_banks = kcalloc(num_banks, sizeof(struct mce_bank), GFP_KERNEL); + mce_banks = kcalloc(MAX_NR_BANKS, sizeof(struct mce_bank), GFP_KERNEL); if (!mce_banks) return -ENOMEM; - for (i = 0; i < num_banks; i++) { + for (i = 0; i < MAX_NR_BANKS; i++) { struct mce_bank *b = &mce_banks[i]; b->ctl = -1ULL; @@ -1470,28 +1499,19 @@ static int __mcheck_cpu_mce_banks_init(void) */ static int __mcheck_cpu_cap_init(void) { - unsigned b; u64 cap; + u8 b; rdmsrl(MSR_IA32_MCG_CAP, cap); b = cap & MCG_BANKCNT_MASK; - if (!mca_cfg.banks) - pr_info("CPU supports %d MCE banks\n", b); - - if (b > MAX_NR_BANKS) { - pr_warn("Using only %u machine check banks out of %u\n", - MAX_NR_BANKS, b); + if (WARN_ON_ONCE(b > MAX_NR_BANKS)) b = MAX_NR_BANKS; - } - /* Don't support asymmetric configurations today */ - WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks); - mca_cfg.banks = b; + mca_cfg.banks = max(mca_cfg.banks, b); if (!mce_banks) { int err = __mcheck_cpu_mce_banks_init(); - if (err) return err; } @@ -2473,6 +2493,8 @@ EXPORT_SYMBOL_GPL(mcsafe_key); static int __init mcheck_late_init(void) { + pr_info("Using %d MCE banks\n", mca_cfg.banks); + if (mca_cfg.recovery) static_branch_inc(&mcsafe_key); diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index b9bc8a1a584e39..b7027e667604e9 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -418,8 +418,9 @@ static int do_microcode_update(const void __user *buf, size_t size) if (ustate == UCODE_ERROR) { error = -1; break; - } else if (ustate == UCODE_OK) + } else if (ustate == UCODE_NEW) { apply_microcode_on_target(cpu); + } } return error; @@ -872,7 +873,7 @@ int __init microcode_init(void) goto out_ucode_group; register_syscore_ops(&mc_syscore_ops); - cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", + cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:online", mc_cpu_online, mc_cpu_down_prep); pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION); diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 7ee8067cbf45c7..9f033dfd27669b 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -29,6 +29,7 @@ #include #include #include +#include #ifdef CONFIG_DYNAMIC_FTRACE @@ -228,6 +229,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, } static unsigned long ftrace_update_func; +static unsigned long ftrace_update_func_call; static int update_ftrace_func(unsigned long ip, void *new) { @@ -256,6 +258,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func) unsigned char *new; int ret; + ftrace_update_func_call = (unsigned long)func; + new = ftrace_call_replace(ip, (unsigned long)func); ret = update_ftrace_func(ip, new); @@ -291,13 +295,28 @@ int ftrace_int3_handler(struct pt_regs *regs) if (WARN_ON_ONCE(!regs)) return 0; - ip = regs->ip - 1; - if (!ftrace_location(ip) && !is_ftrace_caller(ip)) - return 0; + ip = regs->ip - INT3_INSN_SIZE; - regs->ip += MCOUNT_INSN_SIZE - 1; +#ifdef CONFIG_X86_64 + if (ftrace_location(ip)) { + int3_emulate_call(regs, (unsigned long)ftrace_regs_caller); + return 1; + } else if (is_ftrace_caller(ip)) { + if (!ftrace_update_func_call) { + int3_emulate_jmp(regs, ip + CALL_INSN_SIZE); + return 1; + } + int3_emulate_call(regs, ftrace_update_func_call); + return 1; + } +#else + if (ftrace_location(ip) || is_ftrace_caller(ip)) { + int3_emulate_jmp(regs, ip + CALL_INSN_SIZE); + return 1; + } +#endif - return 1; + return 0; } static int ftrace_write(unsigned long ip, const char *val, int size) @@ -733,18 +752,21 @@ union ftrace_op_code_union { } __attribute__((packed)); }; +#define RET_SIZE 1 + static unsigned long create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) { - unsigned const char *jmp; unsigned long start_offset; unsigned long end_offset; unsigned long op_offset; unsigned long offset; + unsigned long npages; unsigned long size; - unsigned long ip; + unsigned long retq; unsigned long *ptr; void *trampoline; + void *ip; /* 48 8b 15 is movq (%rip), %rdx */ unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 }; union ftrace_op_code_union op_ptr; @@ -764,27 +786,28 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) /* * Allocate enough size to store the ftrace_caller code, - * the jmp to ftrace_epilogue, as well as the address of - * the ftrace_ops this trampoline is used for. + * the iret , as well as the address of the ftrace_ops this + * trampoline is used for. */ - trampoline = alloc_tramp(size + MCOUNT_INSN_SIZE + sizeof(void *)); + trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *)); if (!trampoline) return 0; - *tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *); + *tramp_size = size + RET_SIZE + sizeof(void *); + npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE); /* Copy ftrace_caller onto the trampoline memory */ ret = probe_kernel_read(trampoline, (void *)start_offset, size); - if (WARN_ON(ret < 0)) { - tramp_free(trampoline, *tramp_size); - return 0; - } + if (WARN_ON(ret < 0)) + goto fail; - ip = (unsigned long)trampoline + size; + ip = trampoline + size; - /* The trampoline ends with a jmp to ftrace_epilogue */ - jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_epilogue); - memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE); + /* The trampoline ends with ret(q) */ + retq = (unsigned long)ftrace_stub; + ret = probe_kernel_read(ip, (void *)retq, RET_SIZE); + if (WARN_ON(ret < 0)) + goto fail; /* * The address of the ftrace_ops that is used for this trampoline @@ -794,17 +817,15 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) * the global function_trace_op variable. */ - ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE); + ptr = (unsigned long *)(trampoline + size + RET_SIZE); *ptr = (unsigned long)ops; op_offset -= start_offset; memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE); /* Are we pointing to the reference? */ - if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) { - tramp_free(trampoline, *tramp_size); - return 0; - } + if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) + goto fail; /* Load the contents of ptr into the callback parameter */ offset = (unsigned long)ptr; @@ -818,7 +839,16 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) /* ALLOC_TRAMP flags lets us know we created it */ ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP; + /* + * Module allocation needs to be completed by making the page + * executable. The page is still writable, which is a security hazard, + * but anyhow ftrace breaks W^X completely. + */ + set_memory_x((unsigned long)trampoline, npages); return (unsigned long)trampoline; +fail: + tramp_free(trampoline, *tramp_size); + return 0; } static unsigned long calc_trampoline_call_offset(bool save_regs) @@ -868,6 +898,8 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops) func = ftrace_ops_get_func(ops); + ftrace_update_func_call = (unsigned long)func; + /* Do a safe modify in case the trampoline is executing */ new = ftrace_call_replace(ip, (unsigned long)func); ret = update_ftrace_func(ip, new); @@ -964,6 +996,7 @@ static int ftrace_mod_jmp(unsigned long ip, void *func) { unsigned char *new; + ftrace_update_func_call = 0UL; new = ftrace_jmp_replace(ip, (unsigned long)func); return update_ftrace_func(ip, new); diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index 91b2cff4b79a39..75f2b36b41a695 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -171,9 +171,6 @@ GLOBAL(ftrace_call) restore_mcount_regs /* - * The copied trampoline must call ftrace_epilogue as it - * still may need to call the function graph tracer. - * * The code up to this label is copied into trampolines so * think twice before adding any new code or changing the * layout here. @@ -185,7 +182,10 @@ GLOBAL(ftrace_graph_call) jmp ftrace_stub #endif -/* This is weak to keep gas from relaxing the jumps */ +/* + * This is weak to keep gas from relaxing the jumps. + * It is also used to copy the retq for trampolines. + */ WEAK(ftrace_stub) retq ENDPROC(ftrace_caller) diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 0469cd078db15c..b50ac9c7397bb2 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -26,9 +26,18 @@ int sysctl_panic_on_stackoverflow; /* * Probabilistic stack overflow check: * - * Only check the stack in process context, because everything else - * runs on the big interrupt stacks. Checking reliably is too expensive, - * so we just check from interrupts. + * Regular device interrupts can enter on the following stacks: + * + * - User stack + * + * - Kernel task stack + * + * - Interrupt stack if a device driver reenables interrupts + * which should only happen in really old drivers. + * + * - Debug IST stack + * + * All other contexts are invalid. */ static inline void stack_overflow_check(struct pt_regs *regs) { @@ -53,8 +62,8 @@ static inline void stack_overflow_check(struct pt_regs *regs) return; oist = this_cpu_ptr(&orig_ist); - estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN; - estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1]; + estack_bottom = (u64)oist->ist[DEBUG_STACK]; + estack_top = estack_bottom - DEBUG_STKSZ + STACK_TOP_MARGIN; if (regs->sp >= estack_top && regs->sp <= estack_bottom) return; diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c index eeea935e9bb53f..4c3d9a3d45b2f9 100644 --- a/arch/x86/kernel/jump_label.c +++ b/arch/x86/kernel/jump_label.c @@ -16,8 +16,6 @@ #include #include -#ifdef HAVE_JUMP_LABEL - union jump_code_union { char code[JUMP_LABEL_NOP_SIZE]; struct { @@ -142,5 +140,3 @@ __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry, if (jlstate == JL_STATE_UPDATE) __jump_label_transform(entry, type, text_poke_early, 1); } - -#endif diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 544bc2dfe4082c..e83a057564d1b5 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -431,8 +431,20 @@ void *alloc_insn_page(void) void *page; page = module_alloc(PAGE_SIZE); - if (page) - set_memory_ro((unsigned long)page & PAGE_MASK, 1); + if (!page) + return NULL; + + /* + * First make the page read-only, and only then make it executable to + * prevent it from being W+X in between. + */ + set_memory_ro((unsigned long)page, 1); + + /* + * TODO: Once additional kernel code protection mechanisms are set, ensure + * that the page was not maliciously altered and it is still zeroed. + */ + set_memory_x((unsigned long)page, 1); return page; } @@ -440,8 +452,12 @@ void *alloc_insn_page(void) /* Recover page to RW mode before releasing it */ void free_insn_page(void *page) { - set_memory_nx((unsigned long)page & PAGE_MASK, 1); - set_memory_rw((unsigned long)page & PAGE_MASK, 1); + /* + * First make the page non-executable, and only then make it writable to + * prevent it from being W+X in between. + */ + set_memory_nx((unsigned long)page, 1); + set_memory_rw((unsigned long)page, 1); module_memfree(page); } diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index f58336af095c9d..6645f123419c62 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -87,7 +87,7 @@ void *module_alloc(unsigned long size) p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR + get_module_load_offset(), MODULES_END, GFP_KERNEL, - PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, + PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0)); if (p && (kasan_module_alloc(p, size) < 0)) { vfree(p); diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index d3e593eb189f0b..020efe0f9614f8 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -130,6 +130,13 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp, struct task_struct *tsk; int err; + /* + * For a new task use the RESET flags value since there is no before. + * All the status flags are zero; DF and all the system flags must also + * be 0, specifically IF must be 0 because we context switch to the new + * task with interrupts disabled. + */ + frame->flags = X86_EFLAGS_FIXED; frame->bp = 0; frame->ret_addr = (unsigned long) ret_from_fork; p->thread.sp = (unsigned long) fork_frame; diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index a0854f283efda3..59f71d0f2b2340 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -300,6 +300,14 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp, childregs = task_pt_regs(p); fork_frame = container_of(childregs, struct fork_frame, regs); frame = &fork_frame->frame; + + /* + * For a new task use the RESET flags value since there is no before. + * All the status flags are zero; DF and all the system flags must also + * be 0, specifically IF must be 0 because we context switch to the new + * task with interrupts disabled. + */ + frame->flags = X86_EFLAGS_FIXED; frame->bp = 0; frame->ret_addr = (unsigned long) ret_from_fork; p->thread.sp = (unsigned long) fork_frame; diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 92a3b312a53c46..44e647a65de886 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -132,16 +132,6 @@ static int restore_sigcontext(struct pt_regs *regs, COPY_SEG_CPL3(cs); COPY_SEG_CPL3(ss); -#ifdef CONFIG_X86_64 - /* - * Fix up SS if needed for the benefit of old DOSEMU and - * CRIU. - */ - if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && - user_64bit_mode(regs))) - force_valid_ss(regs); -#endif - get_user_ex(tmpflags, &sc->flags); regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); regs->orig_ax = -1; /* disable syscall checks */ @@ -150,6 +140,15 @@ static int restore_sigcontext(struct pt_regs *regs, buf = (void __user *)buf_val; } get_user_catch(err); +#ifdef CONFIG_X86_64 + /* + * Fix up SS if needed for the benefit of old DOSEMU and + * CRIU. + */ + if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs))) + force_valid_ss(regs); +#endif + err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32)); force_iret(); @@ -461,6 +460,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, { struct rt_sigframe __user *frame; void __user *fp = NULL; + unsigned long uc_flags; int err = 0; frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp); @@ -473,9 +473,11 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, return -EFAULT; } + uc_flags = frame_uc_flags(regs); + put_user_try { /* Create the ucontext. */ - put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags); + put_user_ex(uc_flags, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_link); save_altstack_ex(&frame->uc.uc_stack, regs->sp); @@ -541,6 +543,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig, { #ifdef CONFIG_X86_X32_ABI struct rt_sigframe_x32 __user *frame; + unsigned long uc_flags; void __user *restorer; int err = 0; void __user *fpstate = NULL; @@ -555,9 +558,11 @@ static int x32_setup_rt_frame(struct ksignal *ksig, return -EFAULT; } + uc_flags = frame_uc_flags(regs); + put_user_try { /* Create the ucontext. */ - put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags); + put_user_ex(uc_flags, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_link); compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp); put_user_ex(0, &frame->uc.uc__pad0); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 0a5efd76491476..e6db475164edec 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -58,7 +58,6 @@ #include #include #include -#include #include #include #include @@ -388,13 +387,6 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) regs->ip = (unsigned long)general_protection; regs->sp = (unsigned long)&gpregs->orig_ax; - /* - * This situation can be triggered by userspace via - * modify_ldt(2) and the return does not take the regular - * user space exit, so a CPU buffer clear is required when - * MDS mitigation is enabled. - */ - mds_user_clear_cpu_buffers(); return; } #endif diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 860bd271619df0..4a688ef9e4481c 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -456,7 +456,7 @@ FOP_END; /* * XXX: inoutclob user must know where the argument is being expanded. - * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault. + * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault. */ #define asm_safe(insn, inoutclob...) \ ({ \ diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 01d209ab5481b0..229d996051653a 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1291,7 +1291,16 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, flush.address_space, flush.flags); sparse_banks[0] = flush.processor_mask; - all_cpus = flush.flags & HV_FLUSH_ALL_PROCESSORS; + + /* + * Work around possible WS2012 bug: it sends hypercalls + * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear, + * while also expecting us to flush something and crashing if + * we don't. Let's treat processor_mask == 0 same as + * HV_FLUSH_ALL_PROCESSORS. + */ + all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) || + flush.processor_mask == 0; } else { if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex, sizeof(flush_ex)))) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index d2f5aa220355f1..cba414db14cbe8 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1449,7 +1449,7 @@ static void apic_timer_expired(struct kvm_lapic *apic) if (swait_active(q)) swake_up_one(q); - if (apic_lvtt_tscdeadline(apic)) + if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use) ktimer->expired_tscdeadline = ktimer->tscdeadline; } diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 58ead7db71a312..952aebd0a8a34f 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -282,20 +282,16 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) { bool fast_mode = idx & (1u << 31); struct kvm_pmc *pmc; - u64 ctr_val; + u64 mask = fast_mode ? ~0u : ~0ull; if (is_vmware_backdoor_pmc(idx)) return kvm_pmu_rdpmc_vmware(vcpu, idx, data); - pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx); + pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask); if (!pmc) return 1; - ctr_val = pmc_read_counter(pmc); - if (fast_mode) - ctr_val = (u32)ctr_val; - - *data = ctr_val; + *data = pmc_read_counter(pmc) & mask; return 0; } diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index ba8898e1a8542c..22dff661145a1b 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -25,7 +25,8 @@ struct kvm_pmu_ops { unsigned (*find_fixed_event)(int idx); bool (*pmc_is_enabled)(struct kvm_pmc *pmc); struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); - struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx); + struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx, + u64 *mask); int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx); bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data); diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c index 1495a735b38e75..41dff881e0f03d 100644 --- a/arch/x86/kvm/pmu_amd.c +++ b/arch/x86/kvm/pmu_amd.c @@ -186,7 +186,7 @@ static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) } /* idx is the ECX register of RDPMC instruction */ -static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx) +static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *mask) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); struct kvm_pmc *counters; diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c index 5ab4a364348e3c..c3f103e2b08e15 100644 --- a/arch/x86/kvm/pmu_intel.c +++ b/arch/x86/kvm/pmu_intel.c @@ -126,7 +126,7 @@ static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) } static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, - unsigned idx) + unsigned idx, u64 *mask) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); bool fixed = idx & (1u << 30); @@ -138,6 +138,7 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, if (fixed && idx >= pmu->nr_arch_fixed_counters) return NULL; counters = fixed ? pmu->fixed_counters : pmu->gp_counters; + *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP]; return &counters[idx]; } @@ -183,9 +184,13 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) *data = pmu->global_ovf_ctrl; return 0; default: - if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || - (pmc = get_fixed_pmc(pmu, msr))) { - *data = pmc_read_counter(pmc); + if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) { + u64 val = pmc_read_counter(pmc); + *data = val & pmu->counter_bitmask[KVM_PMC_GP]; + return 0; + } else if ((pmc = get_fixed_pmc(pmu, msr))) { + u64 val = pmc_read_counter(pmc); + *data = val & pmu->counter_bitmask[KVM_PMC_FIXED]; return 0; } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { *data = pmc->eventsel; @@ -235,11 +240,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) } break; default: - if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || - (pmc = get_fixed_pmc(pmu, msr))) { - if (!msr_info->host_initiated) - data = (s64)(s32)data; - pmc->counter += data - pmc_read_counter(pmc); + if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) { + if (msr_info->host_initiated) + pmc->counter = data; + else + pmc->counter = (s32)data; + return 0; + } else if ((pmc = get_fixed_pmc(pmu, msr))) { + pmc->counter = data; return 0; } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { if (data == pmc->eventsel) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 8dd9208ae4de13..ea454d3f7763f2 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -2022,7 +2022,11 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (!kvm_vcpu_apicv_active(vcpu)) return; - if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT)) + /* + * Since the host physical APIC id is 8 bits, + * we can support host APIC ID upto 255. + */ + if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK)) return; entry = READ_ONCE(*(svm->avic_physical_id_cache)); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f3337adaf9b323..7fed1d6dd1a13b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1162,31 +1162,42 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) return 0; } -bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) +static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) { - if (efer & efer_reserved_bits) - return false; - if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT)) - return false; + return false; if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM)) - return false; + return false; return true; + +} +bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) +{ + if (efer & efer_reserved_bits) + return false; + + return __kvm_valid_efer(vcpu, efer); } EXPORT_SYMBOL_GPL(kvm_valid_efer); -static int set_efer(struct kvm_vcpu *vcpu, u64 efer) +static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { u64 old_efer = vcpu->arch.efer; + u64 efer = msr_info->data; - if (!kvm_valid_efer(vcpu, efer)) + if (efer & efer_reserved_bits) return 1; - if (is_paging(vcpu) - && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) - return 1; + if (!msr_info->host_initiated) { + if (!__kvm_valid_efer(vcpu, efer)) + return 1; + + if (is_paging(vcpu) && + (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) + return 1; + } efer &= ~EFER_LMA; efer |= vcpu->arch.efer & EFER_LMA; @@ -2356,7 +2367,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vcpu->arch.arch_capabilities = data; break; case MSR_EFER: - return set_efer(vcpu, data); + return set_efer(vcpu, msr_info); case MSR_K7_HWCR: data &= ~(u64)0x40; /* ignore flush filter disable */ data &= ~(u64)0x100; /* ignore ignne emulation enable */ @@ -2976,6 +2987,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; + case KVM_CAP_MAX_VCPU_ID: + r = KVM_MAX_VCPU_ID; + break; case KVM_CAP_NR_MEMSLOTS: r = KVM_USER_MEM_SLOTS; break; diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 25a972c61b0ae9..3c19d60316a88c 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -6,6 +6,18 @@ # Produces uninteresting flaky coverage. KCOV_INSTRUMENT_delay.o := n +# Early boot use of cmdline; don't instrument it +ifdef CONFIG_AMD_MEM_ENCRYPT +KCOV_INSTRUMENT_cmdline.o := n +KASAN_SANITIZE_cmdline.o := n + +ifdef CONFIG_FUNCTION_TRACER +CFLAGS_REMOVE_cmdline.o = -pg +endif + +CFLAGS_cmdline.o := $(call cc-option, -fno-stack-protector) +endif + inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt quiet_cmd_inat_tables = GEN $@ diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c index 9119d8e41f1ff5..87dcba101e5626 100644 --- a/arch/x86/lib/insn-eval.c +++ b/arch/x86/lib/insn-eval.c @@ -555,7 +555,8 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs, } /** - * get_desc() - Obtain pointer to a segment descriptor + * get_desc() - Obtain contents of a segment descriptor + * @out: Segment descriptor contents on success * @sel: Segment selector * * Given a segment selector, obtain a pointer to the segment descriptor. @@ -563,18 +564,18 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs, * * Returns: * - * Pointer to segment descriptor on success. + * True on success, false on failure. * * NULL on error. */ -static struct desc_struct *get_desc(unsigned short sel) +static bool get_desc(struct desc_struct *out, unsigned short sel) { struct desc_ptr gdt_desc = {0, 0}; unsigned long desc_base; #ifdef CONFIG_MODIFY_LDT_SYSCALL if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) { - struct desc_struct *desc = NULL; + bool success = false; struct ldt_struct *ldt; /* Bits [15:3] contain the index of the desired entry. */ @@ -582,12 +583,14 @@ static struct desc_struct *get_desc(unsigned short sel) mutex_lock(¤t->active_mm->context.lock); ldt = current->active_mm->context.ldt; - if (ldt && sel < ldt->nr_entries) - desc = &ldt->entries[sel]; + if (ldt && sel < ldt->nr_entries) { + *out = ldt->entries[sel]; + success = true; + } mutex_unlock(¤t->active_mm->context.lock); - return desc; + return success; } #endif native_store_gdt(&gdt_desc); @@ -602,9 +605,10 @@ static struct desc_struct *get_desc(unsigned short sel) desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK); if (desc_base > gdt_desc.size) - return NULL; + return false; - return (struct desc_struct *)(gdt_desc.address + desc_base); + *out = *(struct desc_struct *)(gdt_desc.address + desc_base); + return true; } /** @@ -626,7 +630,7 @@ static struct desc_struct *get_desc(unsigned short sel) */ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) { - struct desc_struct *desc; + struct desc_struct desc; short sel; sel = get_segment_selector(regs, seg_reg_idx); @@ -664,11 +668,10 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) if (!sel) return -1L; - desc = get_desc(sel); - if (!desc) + if (!get_desc(&desc, sel)) return -1L; - return get_desc_base(desc); + return get_desc_base(&desc); } /** @@ -690,7 +693,7 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) */ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) { - struct desc_struct *desc; + struct desc_struct desc; unsigned long limit; short sel; @@ -704,8 +707,7 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) if (!sel) return 0; - desc = get_desc(sel); - if (!desc) + if (!get_desc(&desc, sel)) return 0; /* @@ -714,8 +716,8 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) * not tested when checking the segment limits. In practice, * this means that the segment ends in (limit << 12) + 0xfff. */ - limit = get_desc_limit(desc); - if (desc->g) + limit = get_desc_limit(&desc); + if (desc.g) limit = (limit << 12) + 0xfff; return limit; @@ -739,7 +741,7 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) */ int insn_get_code_seg_params(struct pt_regs *regs) { - struct desc_struct *desc; + struct desc_struct desc; short sel; if (v8086_mode(regs)) @@ -750,8 +752,7 @@ int insn_get_code_seg_params(struct pt_regs *regs) if (sel < 0) return sel; - desc = get_desc(sel); - if (!desc) + if (!get_desc(&desc, sel)) return -EINVAL; /* @@ -759,10 +760,10 @@ int insn_get_code_seg_params(struct pt_regs *regs) * determines whether a segment contains data or code. If this is a data * segment, return error. */ - if (!(desc->type & BIT(3))) + if (!(desc.type & BIT(3))) return -EINVAL; - switch ((desc->l << 1) | desc->d) { + switch ((desc.l << 1) | desc.d) { case 0: /* * Legacy mode. CS.L=0, CS.D=0. Address and operand size are * both 16-bit. diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 3b24dc05251c7c..9d05572370edc4 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -257,6 +257,7 @@ ENTRY(__memcpy_mcsafe) /* Copy successful. Return zero */ .L_done_memcpy_trap: xorl %eax, %eax +.L_done: ret ENDPROC(__memcpy_mcsafe) EXPORT_SYMBOL_GPL(__memcpy_mcsafe) @@ -273,7 +274,7 @@ EXPORT_SYMBOL_GPL(__memcpy_mcsafe) addl %edx, %ecx .E_trailing_bytes: mov %ecx, %eax - ret + jmp .L_done /* * For write fault handling, given the destination is unaligned, diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 47bebfe6efa70a..9d9765e4d1ef19 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -427,8 +427,6 @@ static noinline int vmalloc_fault(unsigned long address) if (!(address >= VMALLOC_START && address < VMALLOC_END)) return -1; - WARN_ON_ONCE(in_nmi()); - /* * Copy kernel mappings over when needed. This can also * happen within a race in page table update. In the later diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index e3e77527f8dff8..4bfd14d5da8e54 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -198,7 +198,7 @@ static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr) if (!pgtable_l5_enabled()) return (p4d_t *)pgd; - p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK; + p4d = pgd_val(*pgd) & PTE_PFN_MASK; p4d += __START_KERNEL_map - phys_base; return (p4d_t *)p4d + p4d_index(addr); } diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index 0988971069c930..bfe769209eaef7 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -51,7 +51,7 @@ static __initdata struct kaslr_memory_region { } kaslr_regions[] = { { &page_offset_base, 0 }, { &vmalloc_base, 0 }, - { &vmemmap_base, 1 }, + { &vmemmap_base, 0 }, }; /* Get size in bytes used by the memory region */ @@ -77,6 +77,7 @@ void __init kernel_randomize_memory(void) unsigned long rand, memory_tb; struct rnd_state rand_state; unsigned long remain_entropy; + unsigned long vmemmap_size; vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4; vaddr = vaddr_start; @@ -108,6 +109,14 @@ void __init kernel_randomize_memory(void) if (memory_tb < kaslr_regions[0].size_tb) kaslr_regions[0].size_tb = memory_tb; + /* + * Calculate the vmemmap region size in TBs, aligned to a TB + * boundary. + */ + vmemmap_size = (kaslr_regions[0].size_tb << (TB_SHIFT - PAGE_SHIFT)) * + sizeof(struct page); + kaslr_regions[2].size_tb = DIV_ROUND_UP(vmemmap_size, 1UL << TB_SHIFT); + /* Calculate entropy available between regions */ remain_entropy = vaddr_end - vaddr_start; for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 52e55108404ebf..d3a73f9335e11c 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -1119,6 +1119,8 @@ static const struct dmi_system_id pciirq_dmi_table[] __initconst = { void __init pcibios_irq_init(void) { + struct irq_routing_table *rtable = NULL; + DBG(KERN_DEBUG "PCI: IRQ init\n"); if (raw_pci_ops == NULL) @@ -1129,8 +1131,10 @@ void __init pcibios_irq_init(void) pirq_table = pirq_find_routing_table(); #ifdef CONFIG_PCI_BIOS - if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) + if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) { pirq_table = pcibios_get_irq_routing_table(); + rtable = pirq_table; + } #endif if (pirq_table) { pirq_peer_trick(); @@ -1145,8 +1149,10 @@ void __init pcibios_irq_init(void) * If we're using the I/O APIC, avoid using the PCI IRQ * routing table */ - if (io_apic_assign_pci_irqs) + if (io_apic_assign_pci_irqs) { + kfree(rtable); pirq_table = NULL; + } } x86_init.pci.fixup_irqs(); diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index a7d966964c6f20..513ce09e995043 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -299,7 +299,17 @@ int hibernate_resume_nonboot_cpu_disable(void) * address in its instruction pointer may not be possible to resolve * any more at that point (the page tables used by it previously may * have been overwritten by hibernate image data). + * + * First, make sure that we wake up all the potentially disabled SMT + * threads which have been initially brought up and then put into + * mwait/cpuidle sleep. + * Those will be put to proper (not interfering with hibernation + * resume) sleep afterwards, and the resumed kernel will decide itself + * what to do with them. */ + ret = cpuhp_smt_enable(); + if (ret) + return ret; smp_ops.play_dead = resume_play_dead; ret = disable_nonboot_cpus(); smp_ops.play_dead = play_dead; diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index f8e3b668d20b9b..c9986041a5e124 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c @@ -13,6 +13,7 @@ #include #include #include +#include #include @@ -363,3 +364,35 @@ int arch_hibernation_header_restore(void *addr) return 0; } + +int arch_resume_nosmt(void) +{ + int ret = 0; + /* + * We reached this while coming out of hibernation. This means + * that SMT siblings are sleeping in hlt, as mwait is not safe + * against control transition during resume (see comment in + * hibernate_resume_nonboot_cpu_disable()). + * + * If the resumed kernel has SMT disabled, we have to take all the + * SMT siblings out of hlt, and offline them again so that they + * end up in mwait proper. + * + * Called with hotplug disabled. + */ + cpu_hotplug_enable(); + if (cpu_smt_control == CPU_SMT_DISABLED || + cpu_smt_control == CPU_SMT_FORCE_DISABLED) { + enum cpuhp_smt_control old = cpu_smt_control; + + ret = cpuhp_smt_enable(); + if (ret) + goto out; + ret = cpuhp_smt_disable(old); + if (ret) + goto out; + } +out: + cpu_hotplug_disable(); + return ret; +} diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c index f7f77023288ab3..dab07827d25e88 100644 --- a/arch/x86/xen/enlighten_pvh.c +++ b/arch/x86/xen/enlighten_pvh.c @@ -97,6 +97,7 @@ void __init xen_prepare_pvh(void) } xen_pvh = 1; + xen_domain_type = XEN_HVM_DOMAIN; xen_start_flags = pvh_start_info.flags; msr = cpuid_ebx(xen_cpuid_base() + 2); diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 351283b60df617..a285fbd0fd9be9 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -310,7 +310,8 @@ extern char _SecondaryResetVector_text_start; extern char _SecondaryResetVector_text_end; #endif -static inline int mem_reserve(unsigned long start, unsigned long end) +static inline int __init_memblock mem_reserve(unsigned long start, + unsigned long end) { return memblock_reserve(start, end - start); } diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 15e8c9955b793a..6bb397995610e9 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -2509,6 +2509,8 @@ static void bfq_arm_slice_timer(struct bfq_data *bfqd) if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && bfq_symmetric_scenario(bfqd)) sl = min_t(u64, sl, BFQ_MIN_TT); + else if (bfqq->wr_coeff > 1) + sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC); bfqd->last_idling_start = ktime_get(); hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl), diff --git a/block/blk-core.c b/block/blk-core.c index 33488b1426b7be..6eed5d84c2efd4 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -411,7 +411,6 @@ void blk_sync_queue(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; - cancel_delayed_work_sync(&q->requeue_work); queue_for_each_hw_ctx(q, hctx, i) cancel_delayed_work_sync(&hctx->run_work); } else { diff --git a/block/blk-mq.c b/block/blk-mq.c index 4e563ee462cb65..70d839b9c3b09c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2465,6 +2465,8 @@ void blk_mq_release(struct request_queue *q) struct blk_mq_hw_ctx *hctx; unsigned int i; + cancel_delayed_work_sync(&q->requeue_work); + /* hctx kobj stays in hctx */ queue_for_each_hw_ctx(q, hctx, i) { if (!hctx) diff --git a/block/genhd.c b/block/genhd.c index be5bab20b2abf2..2b2a936cf8480c 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -518,6 +518,18 @@ void blk_free_devt(dev_t devt) } } +/** + * We invalidate devt by assigning NULL pointer for devt in idr. + */ +void blk_invalidate_devt(dev_t devt) +{ + if (MAJOR(devt) == BLOCK_EXT_MAJOR) { + spin_lock_bh(&ext_devt_lock); + idr_replace(&ext_devt_idr, NULL, blk_mangle_minor(MINOR(devt))); + spin_unlock_bh(&ext_devt_lock); + } +} + static char *bdevt_str(dev_t devt, char *buf) { if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) { @@ -769,6 +781,13 @@ void del_gendisk(struct gendisk *disk) if (!(disk->flags & GENHD_FL_HIDDEN)) blk_unregister_region(disk_devt(disk), disk->minors); + /* + * Remove gendisk pointer from idr so that it cannot be looked up + * while RCU period before freeing gendisk is running to prevent + * use-after-free issues. Note that the device number stays + * "in-use" until we really free the gendisk. + */ + blk_invalidate_devt(disk_devt(disk)); kobject_put(disk->part0.holder_dir); kobject_put(disk->slave_dir); diff --git a/block/partition-generic.c b/block/partition-generic.c index 5f8db5c5140f40..98d60a59b843c4 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -289,6 +289,13 @@ void delete_partition(struct gendisk *disk, int partno) kobject_put(part->holder_dir); device_del(part_to_dev(part)); + /* + * Remove gendisk pointer from idr so that it cannot be looked up + * while RCU period before freeing gendisk is running to prevent + * use-after-free issues. Note that the device number stays + * "in-use" until we really free the gendisk. + */ + blk_invalidate_devt(part_devt(part)); hd_struct_kill(part); } diff --git a/block/sed-opal.c b/block/sed-opal.c index e0de4dd448b3c7..1196408972937c 100644 --- a/block/sed-opal.c +++ b/block/sed-opal.c @@ -2095,13 +2095,16 @@ static int opal_erase_locking_range(struct opal_dev *dev, static int opal_enable_disable_shadow_mbr(struct opal_dev *dev, struct opal_mbr_data *opal_mbr) { + u8 enable_disable = opal_mbr->enable_disable == OPAL_MBR_ENABLE ? + OPAL_TRUE : OPAL_FALSE; + const struct opal_step mbr_steps[] = { { opal_discovery0, }, { start_admin1LSP_opal_session, &opal_mbr->key }, - { set_mbr_done, &opal_mbr->enable_disable }, + { set_mbr_done, &enable_disable }, { end_opal_session, }, { start_admin1LSP_opal_session, &opal_mbr->key }, - { set_mbr_enable_disable, &opal_mbr->enable_disable }, + { set_mbr_enable_disable, &enable_disable }, { end_opal_session, }, { NULL, } }; @@ -2221,7 +2224,7 @@ static int __opal_lock_unlock(struct opal_dev *dev, static int __opal_set_mbr_done(struct opal_dev *dev, struct opal_key *key) { - u8 mbr_done_tf = 1; + u8 mbr_done_tf = OPAL_TRUE; const struct opal_step mbrdone_step [] = { { opal_discovery0, }, { start_admin1LSP_opal_session, key }, diff --git a/crypto/ccm.c b/crypto/ccm.c index 0a083342ec8cf3..8104c564dd318c 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -455,7 +455,6 @@ static void crypto_ccm_free(struct aead_instance *inst) static int crypto_ccm_create_common(struct crypto_template *tmpl, struct rtattr **tb, - const char *full_name, const char *ctr_name, const char *mac_name) { @@ -483,7 +482,8 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, mac = __crypto_hash_alg_common(mac_alg); err = -EINVAL; - if (mac->digestsize != 16) + if (strncmp(mac->base.cra_name, "cbcmac(", 7) != 0 || + mac->digestsize != 16) goto out_put_mac; inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); @@ -506,23 +506,27 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, ctr = crypto_spawn_skcipher_alg(&ictx->ctr); - /* Not a stream cipher? */ + /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */ err = -EINVAL; - if (ctr->base.cra_blocksize != 1) + if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 || + crypto_skcipher_alg_ivsize(ctr) != 16 || + ctr->base.cra_blocksize != 1) goto err_drop_ctr; - /* We want the real thing! */ - if (crypto_skcipher_alg_ivsize(ctr) != 16) + /* ctr and cbcmac must use the same underlying block cipher. */ + if (strcmp(ctr->base.cra_name + 4, mac->base.cra_name + 7) != 0) goto err_drop_ctr; err = -ENAMETOOLONG; + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, + "ccm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME) + goto err_drop_ctr; + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)", ctr->base.cra_driver_name, mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_ctr; - memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME); - inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (mac->base.cra_priority + ctr->base.cra_priority) / 2; @@ -564,7 +568,6 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb) const char *cipher_name; char ctr_name[CRYPTO_MAX_ALG_NAME]; char mac_name[CRYPTO_MAX_ALG_NAME]; - char full_name[CRYPTO_MAX_ALG_NAME]; cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) @@ -578,12 +581,7 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb) cipher_name) >= CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; - if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >= - CRYPTO_MAX_ALG_NAME) - return -ENAMETOOLONG; - - return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name, - mac_name); + return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name); } static struct crypto_template crypto_ccm_tmpl = { @@ -596,23 +594,17 @@ static int crypto_ccm_base_create(struct crypto_template *tmpl, struct rtattr **tb) { const char *ctr_name; - const char *cipher_name; - char full_name[CRYPTO_MAX_ALG_NAME]; + const char *mac_name; ctr_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(ctr_name)) return PTR_ERR(ctr_name); - cipher_name = crypto_attr_alg_name(tb[2]); - if (IS_ERR(cipher_name)) - return PTR_ERR(cipher_name); - - if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)", - ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME) - return -ENAMETOOLONG; + mac_name = crypto_attr_alg_name(tb[2]); + if (IS_ERR(mac_name)) + return PTR_ERR(mac_name); - return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name, - cipher_name); + return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name); } static struct crypto_template crypto_ccm_base_tmpl = { diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 600afa99941fe0..4d6f51bcdfabe9 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -647,8 +647,8 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, - "%s(%s,%s)", name, chacha_name, - poly_name) >= CRYPTO_MAX_ALG_NAME) + "%s(%s,%s)", name, chacha->base.cra_name, + poly->cra_name) >= CRYPTO_MAX_ALG_NAME) goto out_drop_chacha; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s,%s)", name, chacha->base.cra_driver_name, diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c index 8e94e29dc6fc89..d08048ae555279 100644 --- a/crypto/crct10dif_generic.c +++ b/crypto/crct10dif_generic.c @@ -65,10 +65,9 @@ static int chksum_final(struct shash_desc *desc, u8 *out) return 0; } -static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, - u8 *out) +static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out) { - *(__u16 *)out = crc_t10dif_generic(*crcp, data, len); + *(__u16 *)out = crc_t10dif_generic(crc, data, len); return 0; } @@ -77,15 +76,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data, { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - return __chksum_finup(&ctx->crc, data, len, out); + return __chksum_finup(ctx->crc, data, len, out); } static int chksum_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out) { - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - - return __chksum_finup(&ctx->crc, data, length, out); + return __chksum_finup(0, data, length, out); } static struct shash_alg alg = { diff --git a/crypto/gcm.c b/crypto/gcm.c index 0ad879e1f9b212..9b0ea3ded1a426 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -597,7 +597,6 @@ static void crypto_gcm_free(struct aead_instance *inst) static int crypto_gcm_create_common(struct crypto_template *tmpl, struct rtattr **tb, - const char *full_name, const char *ctr_name, const char *ghash_name) { @@ -638,7 +637,8 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, goto err_free_inst; err = -EINVAL; - if (ghash->digestsize != 16) + if (strcmp(ghash->base.cra_name, "ghash") != 0 || + ghash->digestsize != 16) goto err_drop_ghash; crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst)); @@ -650,24 +650,24 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, ctr = crypto_spawn_skcipher_alg(&ctx->ctr); - /* We only support 16-byte blocks. */ + /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */ err = -EINVAL; - if (crypto_skcipher_alg_ivsize(ctr) != 16) + if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 || + crypto_skcipher_alg_ivsize(ctr) != 16 || + ctr->base.cra_blocksize != 1) goto out_put_ctr; - /* Not a stream cipher? */ - if (ctr->base.cra_blocksize != 1) + err = -ENAMETOOLONG; + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, + "gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME) goto out_put_ctr; - err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)", ctr->base.cra_driver_name, ghash_alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto out_put_ctr; - memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME); - inst->alg.base.cra_flags = (ghash->base.cra_flags | ctr->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (ghash->base.cra_priority + @@ -709,7 +709,6 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb) { const char *cipher_name; char ctr_name[CRYPTO_MAX_ALG_NAME]; - char full_name[CRYPTO_MAX_ALG_NAME]; cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) @@ -719,12 +718,7 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb) CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; - if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >= - CRYPTO_MAX_ALG_NAME) - return -ENAMETOOLONG; - - return crypto_gcm_create_common(tmpl, tb, full_name, - ctr_name, "ghash"); + return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash"); } static struct crypto_template crypto_gcm_tmpl = { @@ -738,7 +732,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl, { const char *ctr_name; const char *ghash_name; - char full_name[CRYPTO_MAX_ALG_NAME]; ctr_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(ctr_name)) @@ -748,12 +741,7 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl, if (IS_ERR(ghash_name)) return PTR_ERR(ghash_name); - if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)", - ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME) - return -ENAMETOOLONG; - - return crypto_gcm_create_common(tmpl, tb, full_name, - ctr_name, ghash_name); + return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name); } static struct crypto_template crypto_gcm_base_tmpl = { diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c index 8c77bc78a09f26..df8fc0f543741e 100644 --- a/crypto/salsa20_generic.c +++ b/crypto/salsa20_generic.c @@ -161,7 +161,7 @@ static int salsa20_crypt(struct skcipher_request *req) err = skcipher_walk_virt(&walk, req, true); - salsa20_init(state, ctx, walk.iv); + salsa20_init(state, ctx, req->iv); while (walk.nbytes > 0) { unsigned int nbytes = walk.nbytes; diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 46bb300d418fe7..b664cf867f5fb2 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -131,8 +131,13 @@ int skcipher_walk_done(struct skcipher_walk *walk, int err) memcpy(walk->dst.virt.addr, walk->page, n); skcipher_unmap_dst(walk); } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { - if (WARN_ON(err)) { - /* unexpected case; didn't process all bytes */ + if (err) { + /* + * Didn't process all bytes. Either the algorithm is + * broken, or this was the last step and it turned out + * the message wasn't evenly divisible into blocks but + * the algorithm requires it. + */ err = -EINVAL; goto finish; } diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 3c5ea7cb693ef5..ab8faa6d6616f3 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -1176,7 +1176,6 @@ static int __init erst_init(void) "Error Record Serialization Table (ERST) support is initialized.\n"); buf = kmalloc(erst_erange.size, GFP_KERNEL); - spin_lock_init(&erst_info.buf_lock); if (buf) { erst_info.buf = buf + sizeof(struct cper_pstore_record); erst_info.bufsize = erst_erange.size - diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index e48eebc27b81bf..43c2615434b48b 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1231,18 +1231,24 @@ static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node) /* * set numa proximity domain for smmuv3 device */ -static void __init arm_smmu_v3_set_proximity(struct device *dev, +static int __init arm_smmu_v3_set_proximity(struct device *dev, struct acpi_iort_node *node) { struct acpi_iort_smmu_v3 *smmu; smmu = (struct acpi_iort_smmu_v3 *)node->node_data; if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) { - set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm)); + int node = acpi_map_pxm_to_node(smmu->pxm); + + if (node != NUMA_NO_NODE && !node_online(node)) + return -EINVAL; + + set_dev_node(dev, node); pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n", smmu->base_address, smmu->pxm); } + return 0; } #else #define arm_smmu_v3_set_proximity NULL @@ -1317,7 +1323,7 @@ struct iort_dev_config { int (*dev_count_resources)(struct acpi_iort_node *node); void (*dev_init_resources)(struct resource *res, struct acpi_iort_node *node); - void (*dev_set_proximity)(struct device *dev, + int (*dev_set_proximity)(struct device *dev, struct acpi_iort_node *node); }; @@ -1368,8 +1374,11 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node, if (!pdev) return -ENOMEM; - if (ops->dev_set_proximity) - ops->dev_set_proximity(&pdev->dev, node); + if (ops->dev_set_proximity) { + ret = ops->dev_set_proximity(&pdev->dev, node); + if (ret) + goto dev_put; + } count = ops->dev_count_resources(node); diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index a7c2673ffd36e8..1806260938e89a 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -948,8 +948,8 @@ static bool acpi_dev_needs_resume(struct device *dev, struct acpi_device *adev) u32 sys_target = acpi_target_system_state(); int ret, state; - if (!pm_runtime_suspended(dev) || !adev || - device_may_wakeup(dev) != !!adev->wakeup.prepare_count) + if (!pm_runtime_suspended(dev) || !adev || (adev->wakeup.flags.valid && + device_may_wakeup(dev) != !!adev->wakeup.prepare_count)) return true; if (sys_target == ACPI_STATE_S0) diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 693cf05b0cc44f..288673cff85eaf 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -975,6 +975,14 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, const struct acpi_data_node *data = to_acpi_data_node(fwnode); struct acpi_data_node *dn; + /* + * We can have a combination of device and data nodes, e.g. with + * hierarchical _DSD properties. Make sure the adev pointer is + * restored before going through data nodes, otherwise we will + * be looking for data_nodes below the last device found instead + * of the common fwnode shared by device_nodes and data_nodes. + */ + adev = to_acpi_device_node(fwnode); if (adev) head = &adev->data.subnodes; else if (data) diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 74c489047f5709..847db3edcb5b8e 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -977,6 +977,8 @@ static int acpi_s2idle_prepare(void) if (acpi_sci_irq_valid()) enable_irq_wake(acpi_sci_irq); + acpi_enable_wakeup_devices(ACPI_STATE_S0); + /* Change the configuration of GPEs to avoid spurious wakeup. */ acpi_enable_all_wakeup_gpes(); acpi_os_wait_events_complete(); @@ -1026,6 +1028,8 @@ static void acpi_s2idle_restore(void) { acpi_enable_all_runtime_gpes(); + acpi_disable_wakeup_devices(ACPI_STATE_S0); + if (acpi_sci_irq_valid()) disable_irq_wake(acpi_sci_irq); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index adf28788cab52c..133fed8e4a8bb9 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4476,9 +4476,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, - /* drives which fail FPDMA_AA activation (some may freeze afterwards) */ - { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, - { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, + /* drives which fail FPDMA_AA activation (some may freeze afterwards) + the ST disks also have LPM issues */ + { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA | + ATA_HORKAGE_NOLPM, }, + { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA | + ATA_HORKAGE_NOLPM, }, { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA }, /* Blacklist entries taken from Silicon Image 3124/3132 diff --git a/drivers/base/dd.c b/drivers/base/dd.c index f5b74856784a90..d48b310c476037 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -482,7 +482,7 @@ static int really_probe(struct device *dev, struct device_driver *drv) ret = dma_configure(dev); if (ret) - goto dma_failed; + goto probe_failed; if (driver_sysfs_add(dev)) { printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n", @@ -537,14 +537,13 @@ static int really_probe(struct device *dev, struct device_driver *drv) goto done; probe_failed: - dma_deconfigure(dev); -dma_failed: if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_DRIVER_NOT_BOUND, dev); pinctrl_bind_failed: device_links_no_driver(dev); devres_release_all(dev); + dma_deconfigure(dev); driver_sysfs_remove(dev); dev->driver = NULL; dev_set_drvdata(dev, NULL); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index a690fd40026051..4abd7c6531d9de 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1736,6 +1736,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (dev->power.syscore) goto Complete; + /* Avoid direct_complete to let wakeup_path propagate. */ + if (device_may_wakeup(dev) || dev->power.wakeup_path) + dev->power.direct_complete = false; + if (dev->power.direct_complete) { if (pm_runtime_status_suspended(dev)) { pm_runtime_disable(dev); diff --git a/drivers/block/brd.c b/drivers/block/brd.c index c18586fccb6f2b..17defbf4f332c5 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -96,13 +96,8 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) /* * Must use NOIO because we don't want to recurse back into the * block or filesystem layers from page reclaim. - * - * Cannot support DAX and highmem, because our ->direct_access - * routine for DAX must return memory that is always addressable. - * If DAX was reworked to use pfns and kmap throughout, this - * restriction might be able to be lifted. */ - gfp_flags = GFP_NOIO | __GFP_ZERO; + gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM; page = alloc_page(gfp_flags); if (!page) return NULL; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 3e905da33bcbe7..5830d941788628 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1310,11 +1310,11 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) } free_shadow: - kfree(rinfo->shadow[i].grants_used); + kvfree(rinfo->shadow[i].grants_used); rinfo->shadow[i].grants_used = NULL; - kfree(rinfo->shadow[i].indirect_grants); + kvfree(rinfo->shadow[i].indirect_grants); rinfo->shadow[i].indirect_grants = NULL; - kfree(rinfo->shadow[i].sg); + kvfree(rinfo->shadow[i].sg); rinfo->shadow[i].sg = NULL; } @@ -1353,7 +1353,7 @@ static void blkif_free(struct blkfront_info *info, int suspend) for (i = 0; i < info->nr_rings; i++) blkif_free_ring(&info->rinfo[i]); - kfree(info->rinfo); + kvfree(info->rinfo); info->rinfo = NULL; info->nr_rings = 0; } @@ -1914,9 +1914,9 @@ static int negotiate_mq(struct blkfront_info *info) if (!info->nr_rings) info->nr_rings = 1; - info->rinfo = kcalloc(info->nr_rings, - sizeof(struct blkfront_ring_info), - GFP_KERNEL); + info->rinfo = kvcalloc(info->nr_rings, + sizeof(struct blkfront_ring_info), + GFP_KERNEL); if (!info->rinfo) { xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure"); info->nr_rings = 0; @@ -2232,17 +2232,17 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) for (i = 0; i < BLK_RING_SIZE(info); i++) { rinfo->shadow[i].grants_used = - kcalloc(grants, - sizeof(rinfo->shadow[i].grants_used[0]), - GFP_NOIO); - rinfo->shadow[i].sg = kcalloc(psegs, - sizeof(rinfo->shadow[i].sg[0]), - GFP_NOIO); + kvcalloc(grants, + sizeof(rinfo->shadow[i].grants_used[0]), + GFP_NOIO); + rinfo->shadow[i].sg = kvcalloc(psegs, + sizeof(rinfo->shadow[i].sg[0]), + GFP_NOIO); if (info->max_indirect_segments) rinfo->shadow[i].indirect_grants = - kcalloc(INDIRECT_GREFS(grants), - sizeof(rinfo->shadow[i].indirect_grants[0]), - GFP_NOIO); + kvcalloc(INDIRECT_GREFS(grants), + sizeof(rinfo->shadow[i].indirect_grants[0]), + GFP_NOIO); if ((rinfo->shadow[i].grants_used == NULL) || (rinfo->shadow[i].sg == NULL) || (info->max_indirect_segments && @@ -2256,11 +2256,11 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) out_of_memory: for (i = 0; i < BLK_RING_SIZE(info); i++) { - kfree(rinfo->shadow[i].grants_used); + kvfree(rinfo->shadow[i].grants_used); rinfo->shadow[i].grants_used = NULL; - kfree(rinfo->shadow[i].sg); + kvfree(rinfo->shadow[i].sg); rinfo->shadow[i].sg = NULL; - kfree(rinfo->shadow[i].indirect_grants); + kvfree(rinfo->shadow[i].indirect_grants); rinfo->shadow[i].indirect_grants = NULL; } if (!list_empty(&rinfo->indirect_pages)) { diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index f0d593c3fa728b..77004c29da089f 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -504,6 +504,8 @@ static int qca_open(struct hci_uart *hu) qcadev = serdev_device_get_drvdata(hu->serdev); if (qcadev->btsoc_type != QCA_WCN3990) { gpiod_set_value_cansleep(qcadev->bt_en, 1); + /* Controller needs time to bootup. */ + msleep(150); } else { hu->init_speed = qcadev->init_speed; hu->oper_speed = qcadev->oper_speed; diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index b65ff696289954..e9b6ac61fb7f64 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -443,6 +443,7 @@ static int omap_rng_probe(struct platform_device *pdev) priv->rng.read = omap_rng_do_read; priv->rng.init = omap_rng_init; priv->rng.cleanup = omap_rng_cleanup; + priv->rng.quality = 900; priv->rng.priv = (unsigned long)priv; platform_set_drvdata(pdev, priv); diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 76c2010ba67269..af44db2dfb68a8 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -688,12 +688,16 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, /* End of read */ len = ssif_info->multi_len; data = ssif_info->data; - } else if (blocknum != ssif_info->multi_pos) { + } else if (blocknum + 1 != ssif_info->multi_pos) { /* * Out of sequence block, just abort. Block * numbers start at zero for the second block, * but multi_pos starts at one, so the +1. */ + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + dev_dbg(&ssif_info->client->dev, + "Received message out of sequence, expected %u, got %u\n", + ssif_info->multi_pos - 1, blocknum); result = -EIO; } else { ssif_inc_stat(ssif_info, received_message_parts); diff --git a/drivers/char/random.c b/drivers/char/random.c index c75b6cdf00533a..0a84b7f468ad05 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -778,6 +778,7 @@ static struct crng_state **crng_node_pool __read_mostly; #endif static void invalidate_batched_entropy(void); +static void numa_crng_init(void); static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); static int __init parse_trust_cpu(char *arg) @@ -806,7 +807,9 @@ static void crng_initialize(struct crng_state *crng) } crng->state[i] ^= rv; } - if (trust_cpu && arch_init) { + if (trust_cpu && arch_init && crng == &primary_crng) { + invalidate_batched_entropy(); + numa_crng_init(); crng_init = 2; pr_notice("random: crng done (trusting CPU's manufacturer)\n"); } @@ -2212,8 +2215,8 @@ struct batched_entropy { u32 entropy_u32[CHACHA20_BLOCK_SIZE / sizeof(u32)]; }; unsigned int position; + spinlock_t batch_lock; }; -static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock); /* * Get a random word for internal kernel use only. The quality of the random @@ -2223,12 +2226,14 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_ * wait_for_random_bytes() should be called and return 0 at least once * at any point prior. */ -static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); +static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { + .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock), +}; + u64 get_random_u64(void) { u64 ret; - bool use_lock; - unsigned long flags = 0; + unsigned long flags; struct batched_entropy *batch; static void *previous; @@ -2243,28 +2248,25 @@ u64 get_random_u64(void) warn_unseeded_randomness(&previous); - use_lock = READ_ONCE(crng_init) < 2; - batch = &get_cpu_var(batched_entropy_u64); - if (use_lock) - read_lock_irqsave(&batched_entropy_reset_lock, flags); + batch = raw_cpu_ptr(&batched_entropy_u64); + spin_lock_irqsave(&batch->batch_lock, flags); if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { extract_crng((__u32 *)batch->entropy_u64); batch->position = 0; } ret = batch->entropy_u64[batch->position++]; - if (use_lock) - read_unlock_irqrestore(&batched_entropy_reset_lock, flags); - put_cpu_var(batched_entropy_u64); + spin_unlock_irqrestore(&batch->batch_lock, flags); return ret; } EXPORT_SYMBOL(get_random_u64); -static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32); +static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = { + .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock), +}; u32 get_random_u32(void) { u32 ret; - bool use_lock; - unsigned long flags = 0; + unsigned long flags; struct batched_entropy *batch; static void *previous; @@ -2273,18 +2275,14 @@ u32 get_random_u32(void) warn_unseeded_randomness(&previous); - use_lock = READ_ONCE(crng_init) < 2; - batch = &get_cpu_var(batched_entropy_u32); - if (use_lock) - read_lock_irqsave(&batched_entropy_reset_lock, flags); + batch = raw_cpu_ptr(&batched_entropy_u32); + spin_lock_irqsave(&batch->batch_lock, flags); if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { extract_crng(batch->entropy_u32); batch->position = 0; } ret = batch->entropy_u32[batch->position++]; - if (use_lock) - read_unlock_irqrestore(&batched_entropy_reset_lock, flags); - put_cpu_var(batched_entropy_u32); + spin_unlock_irqrestore(&batch->batch_lock, flags); return ret; } EXPORT_SYMBOL(get_random_u32); @@ -2298,12 +2296,19 @@ static void invalidate_batched_entropy(void) int cpu; unsigned long flags; - write_lock_irqsave(&batched_entropy_reset_lock, flags); for_each_possible_cpu (cpu) { - per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0; - per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0; + struct batched_entropy *batched_entropy; + + batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu); + spin_lock_irqsave(&batched_entropy->batch_lock, flags); + batched_entropy->position = 0; + spin_unlock(&batched_entropy->batch_lock); + + batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu); + spin_lock(&batched_entropy->batch_lock); + batched_entropy->position = 0; + spin_unlock_irqrestore(&batched_entropy->batch_lock, flags); } - write_unlock_irqrestore(&batched_entropy_reset_lock, flags); } /** diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 5b5b5d72eab7f4..c55f6aeb4227a4 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -75,7 +75,7 @@ struct ports_driver_data { /* All the console devices handled by this driver */ struct list_head consoles; }; -static struct ports_driver_data pdrvdata; +static struct ports_driver_data pdrvdata = { .next_vtermno = 1}; static DEFINE_SPINLOCK(pdrvdata_lock); static DECLARE_COMPLETION(early_console_added); @@ -1405,6 +1405,7 @@ static int add_port(struct ports_device *portdev, u32 id) port->async_queue = NULL; port->cons.ws.ws_row = port->cons.ws.ws_col = 0; + port->cons.vtermno = 0; port->host_connected = port->guest_connected = false; port->stats = (struct port_stats) { 0 }; diff --git a/drivers/clk/hisilicon/clk-hi3660.c b/drivers/clk/hisilicon/clk-hi3660.c index f404199596563e..794eeff0d5d2d5 100644 --- a/drivers/clk/hisilicon/clk-hi3660.c +++ b/drivers/clk/hisilicon/clk-hi3660.c @@ -163,8 +163,12 @@ static const struct hisi_gate_clock hi3660_crgctrl_gate_sep_clks[] = { "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 17, 0, }, { HI3660_CLK_GATE_ISP_SNCLK2, "clk_gate_isp_snclk2", "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 18, 0, }, + /* + * clk_gate_ufs_subsys is a system bus clock, mark it as critical + * clock and keep it on for system suspend and resume. + */ { HI3660_CLK_GATE_UFS_SUBSYS, "clk_gate_ufs_subsys", "clk_div_sysbus", - CLK_SET_RATE_PARENT, 0x50, 21, 0, }, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0x50, 21, 0, }, { HI3660_PCLK_GATE_DSI0, "pclk_gate_dsi0", "clk_div_cfgbus", CLK_SET_RATE_PARENT, 0x50, 28, 0, }, { HI3660_PCLK_GATE_DSI1, "pclk_gate_dsi1", "clk_div_cfgbus", diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c index f54e4015b0b1f3..18842d66031766 100644 --- a/drivers/clk/mediatek/clk-pll.c +++ b/drivers/clk/mediatek/clk-pll.c @@ -88,6 +88,32 @@ static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin, return ((unsigned long)vco + postdiv - 1) / postdiv; } +static void __mtk_pll_tuner_enable(struct mtk_clk_pll *pll) +{ + u32 r; + + if (pll->tuner_en_addr) { + r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit); + writel(r, pll->tuner_en_addr); + } else if (pll->tuner_addr) { + r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN; + writel(r, pll->tuner_addr); + } +} + +static void __mtk_pll_tuner_disable(struct mtk_clk_pll *pll) +{ + u32 r; + + if (pll->tuner_en_addr) { + r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit); + writel(r, pll->tuner_en_addr); + } else if (pll->tuner_addr) { + r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN; + writel(r, pll->tuner_addr); + } +} + static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw, int postdiv) { @@ -96,6 +122,9 @@ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw, pll_en = readl(pll->base_addr + REG_CON0) & CON0_BASE_EN; + /* disable tuner */ + __mtk_pll_tuner_disable(pll); + /* set postdiv */ val = readl(pll->pd_addr); val &= ~(POSTDIV_MASK << pll->data->pd_shift); @@ -122,6 +151,9 @@ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw, if (pll->tuner_addr) writel(con1 + 1, pll->tuner_addr); + /* restore tuner_en */ + __mtk_pll_tuner_enable(pll); + if (pll_en) udelay(20); } @@ -228,13 +260,7 @@ static int mtk_pll_prepare(struct clk_hw *hw) r |= pll->data->en_mask; writel(r, pll->base_addr + REG_CON0); - if (pll->tuner_en_addr) { - r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit); - writel(r, pll->tuner_en_addr); - } else if (pll->tuner_addr) { - r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN; - writel(r, pll->tuner_addr); - } + __mtk_pll_tuner_enable(pll); udelay(20); @@ -258,13 +284,7 @@ static void mtk_pll_unprepare(struct clk_hw *hw) writel(r, pll->base_addr + REG_CON0); } - if (pll->tuner_en_addr) { - r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit); - writel(r, pll->tuner_en_addr); - } else if (pll->tuner_addr) { - r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN; - writel(r, pll->tuner_addr); - } + __mtk_pll_tuner_disable(pll); r = readl(pll->base_addr + REG_CON0); r &= ~CON0_BASE_EN; diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c index 450de24a1b4224..9cfdbea493bb37 100644 --- a/drivers/clk/rockchip/clk-rk3288.c +++ b/drivers/clk/rockchip/clk-rk3288.c @@ -198,7 +198,7 @@ PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" }; PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" }; PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" }; -PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vepu", "aclk_vdpu" }; +PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vdpu", "aclk_vepu" }; PNAME(mux_usbphy480m_p) = { "sclk_otgphy1_480m", "sclk_otgphy2_480m", "sclk_otgphy0_480m" }; PNAME(mux_hsicphy480m_p) = { "cpll", "gpll", "usbphy480m_src" }; @@ -292,13 +292,13 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", CLK_IGNORE_UNUSED, RK3288_CLKSEL_CON(0), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY, RK3288_CLKGATE_CON(12), 6, GFLAGS), - COMPOSITE_NOMUX(0, "atclk", "armclk", CLK_IGNORE_UNUSED, + COMPOSITE_NOMUX(0, "atclk", "armclk", 0, RK3288_CLKSEL_CON(37), 4, 5, DFLAGS | CLK_DIVIDER_READ_ONLY, RK3288_CLKGATE_CON(12), 7, GFLAGS), COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", CLK_IGNORE_UNUSED, RK3288_CLKSEL_CON(37), 9, 5, DFLAGS | CLK_DIVIDER_READ_ONLY, RK3288_CLKGATE_CON(12), 8, GFLAGS), - GATE(0, "pclk_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED, + GATE(0, "pclk_dbg", "pclk_dbg_pre", 0, RK3288_CLKGATE_CON(12), 9, GFLAGS), GATE(0, "cs_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(12), 10, GFLAGS), @@ -399,7 +399,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb480m_p, 0, RK3288_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS, RK3288_CLKGATE_CON(3), 11, GFLAGS), - MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, 0, + MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, CLK_SET_RATE_PARENT, RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS), GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 0, RK3288_CLKGATE_CON(9), 0, GFLAGS), @@ -626,7 +626,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { INVERTER(SCLK_HSADC, "sclk_hsadc", "sclk_hsadc_out", RK3288_CLKSEL_CON(22), 7, IFLAGS), - GATE(0, "jtag", "ext_jtag", CLK_IGNORE_UNUSED, + GATE(0, "jtag", "ext_jtag", 0, RK3288_CLKGATE_CON(4), 14, GFLAGS), COMPOSITE_NODIV(SCLK_USBPHY480M_SRC, "usbphy480m_src", mux_usbphy480m_p, 0, @@ -635,7 +635,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0, RK3288_CLKSEL_CON(29), 0, 2, MFLAGS, RK3288_CLKGATE_CON(3), 6, GFLAGS), - GATE(0, "hsicphy12m_xin12m", "xin12m", CLK_IGNORE_UNUSED, + GATE(0, "hsicphy12m_xin12m", "xin12m", 0, RK3288_CLKGATE_CON(13), 9, GFLAGS), DIV(0, "hsicphy12m_usbphy", "sclk_hsicphy480m", 0, RK3288_CLKSEL_CON(11), 8, 6, DFLAGS), @@ -676,7 +676,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 3, GFLAGS), GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 9, GFLAGS), GATE(PCLK_EFUSE256, "pclk_efuse_256", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 10, GFLAGS), - GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 11, GFLAGS), + GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 11, GFLAGS), /* ddrctrl [DDR Controller PHY clock] gates */ GATE(0, "nclk_ddrupctl0", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 4, GFLAGS), @@ -816,12 +816,9 @@ static const char *const rk3288_critical_clocks[] __initconst = { "pclk_alive_niu", "pclk_pd_pmu", "pclk_pmu_niu", - "pclk_core_niu", - "pclk_ddrupctl0", - "pclk_publ0", - "pclk_ddrupctl1", - "pclk_publ1", "pmu_hclk_otg0", + /* pwm-regulators on some boards, so handoff-critical later */ + "pclk_rkpwm", }; static void __iomem *rk3288_cru_base; @@ -838,6 +835,9 @@ static const int rk3288_saved_cru_reg_ids[] = { RK3288_CLKSEL_CON(10), RK3288_CLKSEL_CON(33), RK3288_CLKSEL_CON(37), + + /* We turn aclk_dmac1 on for suspend; this will restore it */ + RK3288_CLKGATE_CON(10), }; static u32 rk3288_saved_cru_regs[ARRAY_SIZE(rk3288_saved_cru_reg_ids)]; @@ -853,6 +853,14 @@ static int rk3288_clk_suspend(void) readl_relaxed(rk3288_cru_base + reg_id); } + /* + * Going into deep sleep (specifically setting PMU_CLR_DMA in + * RK3288_PMU_PWRMODE_CON1) appears to fail unless + * "aclk_dmac1" is on. + */ + writel_relaxed(1 << (12 + 16), + rk3288_cru_base + RK3288_CLKGATE_CON(10)); + /* * Switch PLLs other than DPLL (for SDRAM) to slow mode to * avoid crashes on resume. The Mask ROM on the system will diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c index e431661fe874bc..ecbae8acd05b8c 100644 --- a/drivers/clk/rockchip/clk-rk3328.c +++ b/drivers/clk/rockchip/clk-rk3328.c @@ -458,7 +458,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = { RK3328_CLKSEL_CON(35), 15, 1, MFLAGS, 8, 7, DFLAGS, RK3328_CLKGATE_CON(2), 12, GFLAGS), COMPOSITE(SCLK_CRYPTO, "clk_crypto", mux_2plls_p, 0, - RK3328_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 7, DFLAGS, + RK3328_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 5, DFLAGS, RK3328_CLKGATE_CON(2), 4, GFLAGS), COMPOSITE_NOMUX(SCLK_TSADC, "clk_tsadc", "clk_24m", 0, RK3328_CLKSEL_CON(22), 0, 10, DFLAGS, @@ -550,15 +550,15 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = { GATE(0, "hclk_rkvenc_niu", "hclk_rkvenc", 0, RK3328_CLKGATE_CON(25), 1, GFLAGS), GATE(ACLK_H265, "aclk_h265", "aclk_rkvenc", 0, - RK3328_CLKGATE_CON(25), 0, GFLAGS), + RK3328_CLKGATE_CON(25), 2, GFLAGS), GATE(PCLK_H265, "pclk_h265", "hclk_rkvenc", 0, - RK3328_CLKGATE_CON(25), 1, GFLAGS), + RK3328_CLKGATE_CON(25), 3, GFLAGS), GATE(ACLK_H264, "aclk_h264", "aclk_rkvenc", 0, - RK3328_CLKGATE_CON(25), 0, GFLAGS), + RK3328_CLKGATE_CON(25), 4, GFLAGS), GATE(HCLK_H264, "hclk_h264", "hclk_rkvenc", 0, - RK3328_CLKGATE_CON(25), 1, GFLAGS), + RK3328_CLKGATE_CON(25), 5, GFLAGS), GATE(ACLK_AXISRAM, "aclk_axisram", "aclk_rkvenc", CLK_IGNORE_UNUSED, - RK3328_CLKGATE_CON(25), 0, GFLAGS), + RK3328_CLKGATE_CON(25), 6, GFLAGS), COMPOSITE(SCLK_VENC_CORE, "sclk_venc_core", mux_4plls_p, 0, RK3328_CLKSEL_CON(51), 14, 2, MFLAGS, 8, 5, DFLAGS, @@ -663,7 +663,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = { /* PD_GMAC */ COMPOSITE(ACLK_GMAC, "aclk_gmac", mux_2plls_hdmiphy_p, 0, - RK3328_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3328_CLKSEL_CON(25), 6, 2, MFLAGS, 0, 5, DFLAGS, RK3328_CLKGATE_CON(3), 2, GFLAGS), COMPOSITE_NOMUX(PCLK_GMAC, "pclk_gmac", "aclk_gmac", 0, RK3328_CLKSEL_CON(25), 8, 3, DFLAGS, @@ -733,7 +733,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = { /* PD_PERI */ GATE(0, "aclk_peri_noc", "aclk_peri", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(19), 11, GFLAGS), - GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0, RK3328_CLKGATE_CON(19), 4, GFLAGS), + GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0, RK3328_CLKGATE_CON(19), 14, GFLAGS), GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 0, GFLAGS), GATE(HCLK_SDIO, "hclk_sdio", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 1, GFLAGS), @@ -913,7 +913,7 @@ static void __init rk3328_clk_init(struct device_node *np) &rk3328_cpuclk_data, rk3328_cpuclk_rates, ARRAY_SIZE(rk3328_cpuclk_rates)); - rockchip_register_softrst(np, 11, reg_base + RK3328_SOFTRST_CON(0), + rockchip_register_softrst(np, 12, reg_base + RK3328_SOFTRST_CON(0), ROCKCHIP_SOFTRST_HIWORD_MASK); rockchip_register_restart_notifier(ctx, RK3328_GLB_SRST_FST, NULL); diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c index ebd9436d2c7cd3..1ad53d1016a3e8 100644 --- a/drivers/clk/sunxi-ng/ccu_nkmp.c +++ b/drivers/clk/sunxi-ng/ccu_nkmp.c @@ -160,7 +160,7 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw); - u32 n_mask, k_mask, m_mask, p_mask; + u32 n_mask = 0, k_mask = 0, m_mask = 0, p_mask = 0; struct _ccu_nkmp _nkmp; unsigned long flags; u32 reg; @@ -179,10 +179,18 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate, ccu_nkmp_find_best(parent_rate, rate, &_nkmp); - n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1, nkmp->n.shift); - k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1, nkmp->k.shift); - m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1, nkmp->m.shift); - p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1, nkmp->p.shift); + if (nkmp->n.width) + n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1, + nkmp->n.shift); + if (nkmp->k.width) + k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1, + nkmp->k.shift); + if (nkmp->m.width) + m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1, + nkmp->m.shift); + if (nkmp->p.width) + p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1, + nkmp->p.shift); spin_lock_irqsave(nkmp->common.lock, flags); diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c index 830d1c87fa7cb6..dc87866233b94b 100644 --- a/drivers/clk/tegra/clk-pll.c +++ b/drivers/clk/tegra/clk-pll.c @@ -662,8 +662,8 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll, pll_override_writel(val, params->pmc_divp_reg, pll); val = pll_override_readl(params->pmc_divnm_reg, pll); - val &= ~(divm_mask(pll) << div_nmp->override_divm_shift) | - ~(divn_mask(pll) << div_nmp->override_divn_shift); + val &= ~((divm_mask(pll) << div_nmp->override_divm_shift) | + (divn_mask(pll) << div_nmp->override_divn_shift)); val |= (cfg->m << div_nmp->override_divm_shift) | (cfg->n << div_nmp->override_divn_shift); pll_override_writel(val, params->pmc_divnm_reg, pll); diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c index 421b0539222058..ca3218337fd74f 100644 --- a/drivers/clk/ti/clkctrl.c +++ b/drivers/clk/ti/clkctrl.c @@ -137,9 +137,6 @@ static int _omap4_clkctrl_clk_enable(struct clk_hw *hw) int ret; union omap4_timeout timeout = { 0 }; - if (!clk->enable_bit) - return 0; - if (clk->clkdm) { ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk); if (ret) { @@ -151,6 +148,9 @@ static int _omap4_clkctrl_clk_enable(struct clk_hw *hw) } } + if (!clk->enable_bit) + return 0; + val = ti_clk_ll_ops->clk_readl(&clk->enable_reg); val &= ~OMAP4_MODULEMODE_MASK; @@ -179,7 +179,7 @@ static void _omap4_clkctrl_clk_disable(struct clk_hw *hw) union omap4_timeout timeout = { 0 }; if (!clk->enable_bit) - return; + goto exit; val = ti_clk_ll_ops->clk_readl(&clk->enable_reg); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 505c9a55d5551f..d3213594d1a7a9 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1103,6 +1103,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) cpufreq_global_kobject, "policy%u", cpu); if (ret) { pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret); + kobject_put(&policy->kobj); goto err_free_real_cpus; } diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 6d53f7d9fc7a92..69fc5cf4782fb9 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -459,6 +459,8 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy) /* Failure, so roll back. */ pr_err("initialization failed (dbs_data kobject init error %d)\n", ret); + kobject_put(&dbs_data->attr_set.kobj); + policy->governor_data = NULL; if (!have_governor_per_policy()) diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c index c2dd43f3f5d8a3..8d63a6dc8383ce 100644 --- a/drivers/cpufreq/kirkwood-cpufreq.c +++ b/drivers/cpufreq/kirkwood-cpufreq.c @@ -124,13 +124,14 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk"); if (IS_ERR(priv.cpu_clk)) { dev_err(priv.dev, "Unable to get cpuclk\n"); - return PTR_ERR(priv.cpu_clk); + err = PTR_ERR(priv.cpu_clk); + goto out_node; } err = clk_prepare_enable(priv.cpu_clk); if (err) { dev_err(priv.dev, "Unable to prepare cpuclk\n"); - return err; + goto out_node; } kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000; @@ -161,20 +162,22 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) goto out_ddr; } - of_node_put(np); - np = NULL; - err = cpufreq_register_driver(&kirkwood_cpufreq_driver); - if (!err) - return 0; + if (err) { + dev_err(priv.dev, "Failed to register cpufreq driver\n"); + goto out_powersave; + } - dev_err(priv.dev, "Failed to register cpufreq driver\n"); + of_node_put(np); + return 0; +out_powersave: clk_disable_unprepare(priv.powersave_clk); out_ddr: clk_disable_unprepare(priv.ddr_clk); out_cpu: clk_disable_unprepare(priv.cpu_clk); +out_node: of_node_put(np); return err; diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c index 75dfbd2a58ea6f..c7710c149de85a 100644 --- a/drivers/cpufreq/pasemi-cpufreq.c +++ b/drivers/cpufreq/pasemi-cpufreq.c @@ -146,6 +146,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) cpu = of_get_cpu_node(policy->cpu, NULL); + of_node_put(cpu); if (!cpu) goto out; diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c index 61ae06ca008e74..e225edb5c35934 100644 --- a/drivers/cpufreq/pmac32-cpufreq.c +++ b/drivers/cpufreq/pmac32-cpufreq.c @@ -552,6 +552,7 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode) volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select"); if (volt_gpio_np) voltage_gpio = read_gpio(volt_gpio_np); + of_node_put(volt_gpio_np); if (!voltage_gpio){ pr_err("missing cpu-vcore-select gpio\n"); return 1; @@ -588,6 +589,7 @@ static int pmac_cpufreq_init_750FX(struct device_node *cpunode) if (volt_gpio_np) voltage_gpio = read_gpio(volt_gpio_np); + of_node_put(volt_gpio_np); pvr = mfspr(SPRN_PVR); has_cpu_l2lve = !((pvr & 0xf00) == 0x100); diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c index 41a0f0be3f9ff1..8414c3a4ea08cd 100644 --- a/drivers/cpufreq/ppc_cbe_cpufreq.c +++ b/drivers/cpufreq/ppc_cbe_cpufreq.c @@ -86,6 +86,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) if (!cbe_get_cpu_pmd_regs(policy->cpu) || !cbe_get_cpu_mic_tm_regs(policy->cpu)) { pr_info("invalid CBE regs pointers for cpufreq\n"); + of_node_put(cpu); return -EINVAL; } diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index f5c07498ea4f08..0c85a5123f85d9 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c @@ -141,9 +141,10 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher, /* Setup SA */ sa = ctx->sa_in; - set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ? - SA_SAVE_IV : SA_NOT_SAVE_IV), - SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE, + set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_ECB ? + SA_NOT_SAVE_IV : SA_SAVE_IV), + SA_NOT_LOAD_HASH, (cm == CRYPTO_MODE_ECB ? + SA_LOAD_IV_FROM_SA : SA_LOAD_IV_FROM_STATE), SA_NO_HEADER_PROC, SA_HASH_ALG_NULL, SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT, @@ -162,6 +163,11 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher, memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4); sa = ctx->sa_out; sa->sa_command_0.bf.dir = DIR_OUTBOUND; + /* + * SA_OPCODE_ENCRYPT is the same value as SA_OPCODE_DECRYPT. + * it's the DIR_(IN|OUT)BOUND that matters + */ + sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT; return 0; } diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 6eaec9ba0f68ba..d2ec9fd1b8bb03 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -712,7 +712,23 @@ int crypto4xx_build_pd(struct crypto_async_request *req, size_t offset_to_sr_ptr; u32 gd_idx = 0; int tmp; - bool is_busy; + bool is_busy, force_sd; + + /* + * There's a very subtile/disguised "bug" in the hardware that + * gets indirectly mentioned in 18.1.3.5 Encryption/Decryption + * of the hardware spec: + * *drum roll* the AES/(T)DES OFB and CFB modes are listed as + * operation modes for >>> "Block ciphers" <<<. + * + * To workaround this issue and stop the hardware from causing + * "overran dst buffer" on crypttexts that are not a multiple + * of 16 (AES_BLOCK_SIZE), we force the driver to use the + * scatter buffers. + */ + force_sd = (req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_CFB + || req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_OFB) + && (datalen % AES_BLOCK_SIZE); /* figure how many gd are needed */ tmp = sg_nents_for_len(src, assoclen + datalen); @@ -730,7 +746,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req, } /* figure how many sd are needed */ - if (sg_is_last(dst)) { + if (sg_is_last(dst) && force_sd == false) { num_sd = 0; } else { if (datalen > PPC4XX_SD_BUFFER_SIZE) { @@ -805,9 +821,10 @@ int crypto4xx_build_pd(struct crypto_async_request *req, pd->sa_len = sa_len; pd_uinfo = &dev->pdr_uinfo[pd_entry]; - pd_uinfo->async_req = req; pd_uinfo->num_gd = num_gd; pd_uinfo->num_sd = num_sd; + pd_uinfo->dest_va = dst; + pd_uinfo->async_req = req; if (iv_len) memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len); @@ -826,7 +843,6 @@ int crypto4xx_build_pd(struct crypto_async_request *req, /* get first gd we are going to use */ gd_idx = fst_gd; pd_uinfo->first_gd = fst_gd; - pd_uinfo->num_gd = num_gd; gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx); pd->src = gd_dma; /* enable gather */ @@ -863,17 +879,14 @@ int crypto4xx_build_pd(struct crypto_async_request *req, * Indicate gather array is not used */ pd_uinfo->first_gd = 0xffffffff; - pd_uinfo->num_gd = 0; } - if (sg_is_last(dst)) { + if (!num_sd) { /* * we know application give us dst a whole piece of memory * no need to use scatter ring. */ pd_uinfo->using_sd = 0; pd_uinfo->first_sd = 0xffffffff; - pd_uinfo->num_sd = 0; - pd_uinfo->dest_va = dst; sa->sa_command_0.bf.scatter = 0; pd->dest = (u32)dma_map_page(dev->core_dev->device, sg_page(dst), dst->offset, @@ -887,9 +900,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req, nbytes = datalen; sa->sa_command_0.bf.scatter = 1; pd_uinfo->using_sd = 1; - pd_uinfo->dest_va = dst; pd_uinfo->first_sd = fst_sd; - pd_uinfo->num_sd = num_sd; sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx); pd->dest = sd_dma; /* setup scatter descriptor */ diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 72790d88236d4d..1603dc8d2d7512 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -935,7 +935,7 @@ void psp_pci_init(void) rc = sev_platform_init(&error); if (rc) { dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error); - goto err; + return; } dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major, diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index 5852d29ae2dac2..0669033f5be507 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c @@ -415,7 +415,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx) /* This function prepers the user key so it can pass to the hmac processing * (copy to intenral buffer or hash in case of key longer than block */ -static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, +static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey, unsigned int keylen) { dma_addr_t key_dma_addr = 0; @@ -428,6 +428,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int hashmode; unsigned int idx = 0; int rc = 0; + u8 *key = NULL; struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; dma_addr_t padded_authkey_dma_addr = ctx->auth_state.hmac.padded_authkey_dma_addr; @@ -446,11 +447,17 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, } if (keylen != 0) { + + key = kmemdup(authkey, keylen, GFP_KERNEL); + if (!key) + return -ENOMEM; + key_dma_addr = dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE); if (dma_mapping_error(dev, key_dma_addr)) { dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", key, keylen); + kzfree(key); return -ENOMEM; } if (keylen > blocksize) { @@ -533,6 +540,8 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, if (key_dma_addr) dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE); + kzfree(key); + return rc; } diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index 3bcb6bce666e35..90b4870078fb72 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c @@ -83,24 +83,17 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req, */ static unsigned int cc_get_sgl_nents(struct device *dev, struct scatterlist *sg_list, - unsigned int nbytes, u32 *lbytes, - bool *is_chained) + unsigned int nbytes, u32 *lbytes) { unsigned int nents = 0; while (nbytes && sg_list) { - if (sg_list->length) { - nents++; - /* get the number of bytes in the last entry */ - *lbytes = nbytes; - nbytes -= (sg_list->length > nbytes) ? - nbytes : sg_list->length; - sg_list = sg_next(sg_list); - } else { - sg_list = (struct scatterlist *)sg_page(sg_list); - if (is_chained) - *is_chained = true; - } + nents++; + /* get the number of bytes in the last entry */ + *lbytes = nbytes; + nbytes -= (sg_list->length > nbytes) ? + nbytes : sg_list->length; + sg_list = sg_next(sg_list); } dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes); return nents; @@ -142,7 +135,7 @@ void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg, { u32 nents, lbytes; - nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL); + nents = cc_get_sgl_nents(dev, sg, end, &lbytes); sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip, (direct == CC_SG_TO_BUF)); } @@ -311,40 +304,10 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data, sgl_data->num_of_buffers++; } -static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents, - enum dma_data_direction direction) -{ - u32 i, j; - struct scatterlist *l_sg = sg; - - for (i = 0; i < nents; i++) { - if (!l_sg) - break; - if (dma_map_sg(dev, l_sg, 1, direction) != 1) { - dev_err(dev, "dma_map_page() sg buffer failed\n"); - goto err; - } - l_sg = sg_next(l_sg); - } - return nents; - -err: - /* Restore mapped parts */ - for (j = 0; j < i; j++) { - if (!sg) - break; - dma_unmap_sg(dev, sg, 1, direction); - sg = sg_next(sg); - } - return 0; -} - static int cc_map_sg(struct device *dev, struct scatterlist *sg, unsigned int nbytes, int direction, u32 *nents, u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents) { - bool is_chained = false; - if (sg_is_last(sg)) { /* One entry only case -set to DLLI */ if (dma_map_sg(dev, sg, 1, direction) != 1) { @@ -358,35 +321,21 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg, *nents = 1; *mapped_nents = 1; } else { /*sg_is_last*/ - *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes, - &is_chained); + *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); if (*nents > max_sg_nents) { *nents = 0; dev_err(dev, "Too many fragments. current %d max %d\n", *nents, max_sg_nents); return -ENOMEM; } - if (!is_chained) { - /* In case of mmu the number of mapped nents might - * be changed from the original sgl nents - */ - *mapped_nents = dma_map_sg(dev, sg, *nents, direction); - if (*mapped_nents == 0) { - *nents = 0; - dev_err(dev, "dma_map_sg() sg buffer failed\n"); - return -ENOMEM; - } - } else { - /*In this case the driver maps entry by entry so it - * must have the same nents before and after map - */ - *mapped_nents = cc_dma_map_sg(dev, sg, *nents, - direction); - if (*mapped_nents != *nents) { - *nents = *mapped_nents; - dev_err(dev, "dma_map_sg() sg buffer failed\n"); - return -ENOMEM; - } + /* In case of mmu the number of mapped nents might + * be changed from the original sgl nents + */ + *mapped_nents = dma_map_sg(dev, sg, *nents, direction); + if (*mapped_nents == 0) { + *nents = 0; + dev_err(dev, "dma_map_sg() sg buffer failed\n"); + return -ENOMEM; } } @@ -571,7 +520,6 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_drvdata *drvdata = dev_get_drvdata(dev); u32 dummy; - bool chained; u32 size_to_unmap = 0; if (areq_ctx->mac_buf_dma_addr) { @@ -612,6 +560,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) if (areq_ctx->gen_ctx.iv_dma_addr) { dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr, hw_iv_size, DMA_BIDIRECTIONAL); + kzfree(areq_ctx->gen_ctx.iv); } /* Release pool */ @@ -636,15 +585,14 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) size_to_unmap += crypto_aead_ivsize(tfm); dma_unmap_sg(dev, req->src, - cc_get_sgl_nents(dev, req->src, size_to_unmap, - &dummy, &chained), + cc_get_sgl_nents(dev, req->src, size_to_unmap, &dummy), DMA_BIDIRECTIONAL); if (req->src != req->dst) { dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", sg_virt(req->dst)); dma_unmap_sg(dev, req->dst, cc_get_sgl_nents(dev, req->dst, size_to_unmap, - &dummy, &chained), + &dummy), DMA_BIDIRECTIONAL); } if (drvdata->coherent && @@ -717,19 +665,27 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata, struct aead_req_ctx *areq_ctx = aead_request_ctx(req); unsigned int hw_iv_size = areq_ctx->hw_iv_size; struct device *dev = drvdata_to_dev(drvdata); + gfp_t flags = cc_gfp_flags(&req->base); int rc = 0; if (!req->iv) { areq_ctx->gen_ctx.iv_dma_addr = 0; + areq_ctx->gen_ctx.iv = NULL; goto chain_iv_exit; } - areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv, - hw_iv_size, - DMA_BIDIRECTIONAL); + areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags); + if (!areq_ctx->gen_ctx.iv) + return -ENOMEM; + + areq_ctx->gen_ctx.iv_dma_addr = + dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size, + DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) { dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", hw_iv_size, req->iv); + kzfree(areq_ctx->gen_ctx.iv); + areq_ctx->gen_ctx.iv = NULL; rc = -ENOMEM; goto chain_iv_exit; } @@ -1022,7 +978,6 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, unsigned int size_for_map = req->assoclen + req->cryptlen; struct crypto_aead *tfm = crypto_aead_reqtfm(req); u32 sg_index = 0; - bool chained = false; bool is_gcm4543 = areq_ctx->is_gcm4543; u32 size_to_skip = req->assoclen; @@ -1043,7 +998,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0; src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map, - &src_last_bytes, &chained); + &src_last_bytes); sg_index = areq_ctx->src_sgl->length; //check where the data starts while (sg_index <= size_to_skip) { @@ -1085,7 +1040,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, } dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map, - &dst_last_bytes, &chained); + &dst_last_bytes); sg_index = areq_ctx->dst_sgl->length; offset = size_to_skip; @@ -1486,7 +1441,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n", curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); areq_ctx->in_nents = - cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL); + cc_get_sgl_nents(dev, src, nbytes, &dummy); sg_copy_to_buffer(src, areq_ctx->in_nents, &curr_buff[*curr_buff_cnt], nbytes); *curr_buff_cnt += nbytes; diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h index d608a4faf66225..be7f9bd5c55990 100644 --- a/drivers/crypto/ccree/cc_driver.h +++ b/drivers/crypto/ccree/cc_driver.h @@ -162,6 +162,7 @@ struct cc_alg_template { struct async_gen_req_ctx { dma_addr_t iv_dma_addr; + u8 *iv; enum drv_crypto_direction op_type; }; diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c index b4d0a6d983e0cf..09f708f6418ed6 100644 --- a/drivers/crypto/ccree/cc_fips.c +++ b/drivers/crypto/ccree/cc_fips.c @@ -72,20 +72,28 @@ static inline void tee_fips_error(struct device *dev) dev_err(dev, "TEE reported error!\n"); } +/* + * This function check if cryptocell tee fips error occurred + * and in such case triggers system error + */ +void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata) +{ + struct device *dev = drvdata_to_dev(p_drvdata); + + if (!cc_get_tee_fips_status(p_drvdata)) + tee_fips_error(dev); +} + /* Deferred service handler, run as interrupt-fired tasklet */ static void fips_dsr(unsigned long devarg) { struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg; - struct device *dev = drvdata_to_dev(drvdata); - u32 irq, state, val; + u32 irq, val; irq = (drvdata->irq & (CC_GPR0_IRQ_MASK)); if (irq) { - state = cc_ioread(drvdata, CC_REG(GPR_HOST)); - - if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK)) - tee_fips_error(dev); + cc_tee_handle_fips_error(drvdata); } /* after verifing that there is nothing to do, @@ -113,8 +121,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata) dev_dbg(dev, "Initializing fips tasklet\n"); tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata); - if (!cc_get_tee_fips_status(p_drvdata)) - tee_fips_error(dev); + cc_tee_handle_fips_error(p_drvdata); return 0; } diff --git a/drivers/crypto/ccree/cc_fips.h b/drivers/crypto/ccree/cc_fips.h index 645e096a7a823a..67d5fbfa09b56a 100644 --- a/drivers/crypto/ccree/cc_fips.h +++ b/drivers/crypto/ccree/cc_fips.h @@ -18,6 +18,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata); void cc_fips_fini(struct cc_drvdata *drvdata); void fips_handler(struct cc_drvdata *drvdata); void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok); +void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata); #else /* CONFIG_CRYPTO_FIPS */ @@ -30,6 +31,7 @@ static inline void cc_fips_fini(struct cc_drvdata *drvdata) {} static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok) {} static inline void fips_handler(struct cc_drvdata *drvdata) {} +static inline void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata) {} #endif /* CONFIG_CRYPTO_FIPS */ diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c index b9313306c36feb..2cadd7a218445f 100644 --- a/drivers/crypto/ccree/cc_hash.c +++ b/drivers/crypto/ccree/cc_hash.c @@ -64,6 +64,7 @@ struct cc_hash_alg { struct hash_key_req_ctx { u32 keylen; dma_addr_t key_dma_addr; + u8 *key; }; /* hash per-session context */ @@ -724,13 +725,20 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key, ctx->key_params.keylen = keylen; ctx->key_params.key_dma_addr = 0; ctx->is_hmac = true; + ctx->key_params.key = NULL; if (keylen) { + ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL); + if (!ctx->key_params.key) + return -ENOMEM; + ctx->key_params.key_dma_addr = - dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE); + dma_map_single(dev, (void *)ctx->key_params.key, keylen, + DMA_TO_DEVICE); if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) { dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", - key, keylen); + ctx->key_params.key, keylen); + kzfree(ctx->key_params.key); return -ENOMEM; } dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n", @@ -881,6 +889,9 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key, dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n", &ctx->key_params.key_dma_addr, ctx->key_params.keylen); } + + kzfree(ctx->key_params.key); + return rc; } @@ -907,11 +918,16 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash, ctx->key_params.keylen = keylen; + ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL); + if (!ctx->key_params.key) + return -ENOMEM; + ctx->key_params.key_dma_addr = - dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE); + dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE); if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) { dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", key, keylen); + kzfree(ctx->key_params.key); return -ENOMEM; } dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n", @@ -963,6 +979,8 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash, dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n", &ctx->key_params.key_dma_addr, ctx->key_params.keylen); + kzfree(ctx->key_params.key); + return rc; } @@ -1598,7 +1616,7 @@ static struct cc_hash_template driver_hash[] = { .setkey = cc_hash_setkey, .halg = { .digestsize = SHA224_DIGEST_SIZE, - .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE), + .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE), }, }, .hash_mode = DRV_HASH_SHA224, @@ -1623,7 +1641,7 @@ static struct cc_hash_template driver_hash[] = { .setkey = cc_hash_setkey, .halg = { .digestsize = SHA384_DIGEST_SIZE, - .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE), + .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE), }, }, .hash_mode = DRV_HASH_SHA384, diff --git a/drivers/crypto/ccree/cc_ivgen.c b/drivers/crypto/ccree/cc_ivgen.c index 7694583233944d..1abec3896a7800 100644 --- a/drivers/crypto/ccree/cc_ivgen.c +++ b/drivers/crypto/ccree/cc_ivgen.c @@ -154,9 +154,6 @@ void cc_ivgen_fini(struct cc_drvdata *drvdata) } ivgen_ctx->pool = NULL_SRAM_ADDR; - - /* release "this" context */ - kfree(ivgen_ctx); } /*! @@ -174,10 +171,12 @@ int cc_ivgen_init(struct cc_drvdata *drvdata) int rc; /* Allocate "this" context */ - ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL); + ivgen_ctx = devm_kzalloc(device, sizeof(*ivgen_ctx), GFP_KERNEL); if (!ivgen_ctx) return -ENOMEM; + drvdata->ivgen_handle = ivgen_ctx; + /* Allocate pool's header for initial enc. key/IV */ ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE, &ivgen_ctx->pool_meta_dma, @@ -196,8 +195,6 @@ int cc_ivgen_init(struct cc_drvdata *drvdata) goto out; } - drvdata->ivgen_handle = ivgen_ctx; - return cc_init_iv_sram(drvdata); out: diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c index d990f472e89fb3..79fc0a37ba6e42 100644 --- a/drivers/crypto/ccree/cc_pm.c +++ b/drivers/crypto/ccree/cc_pm.c @@ -11,6 +11,7 @@ #include "cc_ivgen.h" #include "cc_hash.h" #include "cc_pm.h" +#include "cc_fips.h" #define POWER_DOWN_ENABLE 0x01 #define POWER_DOWN_DISABLE 0x00 @@ -25,13 +26,13 @@ int cc_pm_suspend(struct device *dev) int rc; dev_dbg(dev, "set HOST_POWER_DOWN_EN\n"); - cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE); rc = cc_suspend_req_queue(drvdata); if (rc) { dev_err(dev, "cc_suspend_req_queue (%x)\n", rc); return rc; } fini_cc_regs(drvdata); + cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE); cc_clk_off(drvdata); return 0; } @@ -42,19 +43,21 @@ int cc_pm_resume(struct device *dev) struct cc_drvdata *drvdata = dev_get_drvdata(dev); dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n"); - cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE); - + /* Enables the device source clk */ rc = cc_clk_on(drvdata); if (rc) { dev_err(dev, "failed getting clock back on. We're toast.\n"); return rc; } + cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE); rc = init_cc_regs(drvdata, false); if (rc) { dev_err(dev, "init_cc_regs (%x)\n", rc); return rc; } + /* check if tee fips error occurred during power down */ + cc_tee_handle_fips_error(drvdata); rc = cc_resume_req_queue(drvdata); if (rc) { diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c index 23305f22072fe7..204e4ad62c3843 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c @@ -250,9 +250,14 @@ static int rk_set_data_start(struct rk_crypto_info *dev) u8 *src_last_blk = page_address(sg_page(dev->sg_src)) + dev->sg_src->offset + dev->sg_src->length - ivsize; - /* store the iv that need to be updated in chain mode */ - if (ctx->mode & RK_CRYPTO_DEC) + /* Store the iv that need to be updated in chain mode. + * And update the IV buffer to contain the next IV for decryption mode. + */ + if (ctx->mode & RK_CRYPTO_DEC) { memcpy(ctx->iv, src_last_blk, ivsize); + sg_pcopy_to_buffer(dev->first, dev->src_nents, req->info, + ivsize, dev->total - ivsize); + } err = dev->load_data(dev, dev->sg_src, dev->sg_dst); if (!err) @@ -288,13 +293,19 @@ static void rk_iv_copyback(struct rk_crypto_info *dev) struct ablkcipher_request *req = ablkcipher_request_cast(dev->async_req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); u32 ivsize = crypto_ablkcipher_ivsize(tfm); - if (ivsize == DES_BLOCK_SIZE) - memcpy_fromio(req->info, dev->reg + RK_CRYPTO_TDES_IV_0, - ivsize); - else if (ivsize == AES_BLOCK_SIZE) - memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize); + /* Update the IV buffer to contain the next IV for encryption mode. */ + if (!(ctx->mode & RK_CRYPTO_DEC)) { + if (dev->aligned) { + memcpy(req->info, sg_virt(dev->sg_dst) + + dev->sg_dst->length - ivsize, ivsize); + } else { + memcpy(req->info, dev->addr_vir + + dev->count - ivsize, ivsize); + } + } } static void rk_update_iv(struct rk_crypto_info *dev) diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c index a4b5ff2b72f874..f6936bb3b7be4e 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c @@ -240,7 +240,10 @@ static int sun4i_hash(struct ahash_request *areq) } } else { /* Since we have the flag final, we can go up to modulo 4 */ - end = ((areq->nbytes + op->len) / 4) * 4 - op->len; + if (areq->nbytes < 4) + end = 0; + else + end = ((areq->nbytes + op->len) / 4) * 4 - op->len; } /* TODO if SGlen % 4 and !op->len then DMA */ diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl index d6a9f63d65ba2d..9c6b5c1d6a1a61 100644 --- a/drivers/crypto/vmx/aesp8-ppc.pl +++ b/drivers/crypto/vmx/aesp8-ppc.pl @@ -1357,7 +1357,7 @@ () addi $idx,$idx,16 bdnz Loop_ctr32_enc - vadduwm $ivec,$ivec,$one + vadduqm $ivec,$ivec,$one vmr $dat,$inptail lvx $inptail,0,$inp addi $inp,$inp,16 @@ -1854,7 +1854,7 @@ () stvx_u $out1,$x10,$out stvx_u $out2,$x20,$out addi $out,$out,0x30 - b Lcbc_dec8x_done + b Lctr32_enc8x_done .align 5 Lctr32_enc8x_two: @@ -1866,7 +1866,7 @@ () stvx_u $out0,$x00,$out stvx_u $out1,$x10,$out addi $out,$out,0x20 - b Lcbc_dec8x_done + b Lctr32_enc8x_done .align 5 Lctr32_enc8x_one: diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c index dd8b8716467a2c..2d1a8cd3550907 100644 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c @@ -1,22 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 /** * GHASH routines supporting VMX instructions on the Power 8 * - * Copyright (C) 2015 International Business Machines Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 only. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * Copyright (C) 2015, 2019 International Business Machines Inc. * * Author: Marcelo Henrique Cerri + * + * Extended by Daniel Axtens to replace the fallback + * mechanism. The new approach is based on arm64 code, which is: + * Copyright (C) 2014 - 2018 Linaro Ltd. */ #include @@ -39,71 +31,25 @@ void gcm_ghash_p8(u64 Xi[2], const u128 htable[16], const u8 *in, size_t len); struct p8_ghash_ctx { + /* key used by vector asm */ u128 htable[16]; - struct crypto_shash *fallback; + /* key used by software fallback */ + be128 key; }; struct p8_ghash_desc_ctx { u64 shash[2]; u8 buffer[GHASH_DIGEST_SIZE]; int bytes; - struct shash_desc fallback_desc; }; -static int p8_ghash_init_tfm(struct crypto_tfm *tfm) -{ - const char *alg = "ghash-generic"; - struct crypto_shash *fallback; - struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm); - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); - - fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(fallback)) { - printk(KERN_ERR - "Failed to allocate transformation for '%s': %ld\n", - alg, PTR_ERR(fallback)); - return PTR_ERR(fallback); - } - - crypto_shash_set_flags(fallback, - crypto_shash_get_flags((struct crypto_shash - *) tfm)); - - /* Check if the descsize defined in the algorithm is still enough. */ - if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx) - + crypto_shash_descsize(fallback)) { - printk(KERN_ERR - "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n", - alg, - shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx), - crypto_shash_descsize(fallback)); - return -EINVAL; - } - ctx->fallback = fallback; - - return 0; -} - -static void p8_ghash_exit_tfm(struct crypto_tfm *tfm) -{ - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); - - if (ctx->fallback) { - crypto_free_shash(ctx->fallback); - ctx->fallback = NULL; - } -} - static int p8_ghash_init(struct shash_desc *desc) { - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); dctx->bytes = 0; memset(dctx->shash, 0, GHASH_DIGEST_SIZE); - dctx->fallback_desc.tfm = ctx->fallback; - dctx->fallback_desc.flags = desc->flags; - return crypto_shash_init(&dctx->fallback_desc); + return 0; } static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, @@ -121,7 +67,51 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, disable_kernel_vsx(); pagefault_enable(); preempt_enable(); - return crypto_shash_setkey(ctx->fallback, key, keylen); + + memcpy(&ctx->key, key, GHASH_BLOCK_SIZE); + + return 0; +} + +static inline void __ghash_block(struct p8_ghash_ctx *ctx, + struct p8_ghash_desc_ctx *dctx) +{ + if (!IN_INTERRUPT) { + preempt_disable(); + pagefault_disable(); + enable_kernel_vsx(); + gcm_ghash_p8(dctx->shash, ctx->htable, + dctx->buffer, GHASH_DIGEST_SIZE); + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); + } else { + crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE); + gf128mul_lle((be128 *)dctx->shash, &ctx->key); + } +} + +static inline void __ghash_blocks(struct p8_ghash_ctx *ctx, + struct p8_ghash_desc_ctx *dctx, + const u8 *src, unsigned int srclen) +{ + if (!IN_INTERRUPT) { + preempt_disable(); + pagefault_disable(); + enable_kernel_vsx(); + gcm_ghash_p8(dctx->shash, ctx->htable, + src, srclen); + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); + } else { + while (srclen >= GHASH_BLOCK_SIZE) { + crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE); + gf128mul_lle((be128 *)dctx->shash, &ctx->key); + srclen -= GHASH_BLOCK_SIZE; + src += GHASH_BLOCK_SIZE; + } + } } static int p8_ghash_update(struct shash_desc *desc, @@ -131,49 +121,33 @@ static int p8_ghash_update(struct shash_desc *desc, struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); - if (IN_INTERRUPT) { - return crypto_shash_update(&dctx->fallback_desc, src, - srclen); - } else { - if (dctx->bytes) { - if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { - memcpy(dctx->buffer + dctx->bytes, src, - srclen); - dctx->bytes += srclen; - return 0; - } + if (dctx->bytes) { + if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { memcpy(dctx->buffer + dctx->bytes, src, - GHASH_DIGEST_SIZE - dctx->bytes); - preempt_disable(); - pagefault_disable(); - enable_kernel_vsx(); - gcm_ghash_p8(dctx->shash, ctx->htable, - dctx->buffer, GHASH_DIGEST_SIZE); - disable_kernel_vsx(); - pagefault_enable(); - preempt_enable(); - src += GHASH_DIGEST_SIZE - dctx->bytes; - srclen -= GHASH_DIGEST_SIZE - dctx->bytes; - dctx->bytes = 0; - } - len = srclen & ~(GHASH_DIGEST_SIZE - 1); - if (len) { - preempt_disable(); - pagefault_disable(); - enable_kernel_vsx(); - gcm_ghash_p8(dctx->shash, ctx->htable, src, len); - disable_kernel_vsx(); - pagefault_enable(); - preempt_enable(); - src += len; - srclen -= len; - } - if (srclen) { - memcpy(dctx->buffer, src, srclen); - dctx->bytes = srclen; + srclen); + dctx->bytes += srclen; + return 0; } - return 0; + memcpy(dctx->buffer + dctx->bytes, src, + GHASH_DIGEST_SIZE - dctx->bytes); + + __ghash_block(ctx, dctx); + + src += GHASH_DIGEST_SIZE - dctx->bytes; + srclen -= GHASH_DIGEST_SIZE - dctx->bytes; + dctx->bytes = 0; + } + len = srclen & ~(GHASH_DIGEST_SIZE - 1); + if (len) { + __ghash_blocks(ctx, dctx, src, len); + src += len; + srclen -= len; } + if (srclen) { + memcpy(dctx->buffer, src, srclen); + dctx->bytes = srclen; + } + return 0; } static int p8_ghash_final(struct shash_desc *desc, u8 *out) @@ -182,25 +156,14 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out) struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); - if (IN_INTERRUPT) { - return crypto_shash_final(&dctx->fallback_desc, out); - } else { - if (dctx->bytes) { - for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) - dctx->buffer[i] = 0; - preempt_disable(); - pagefault_disable(); - enable_kernel_vsx(); - gcm_ghash_p8(dctx->shash, ctx->htable, - dctx->buffer, GHASH_DIGEST_SIZE); - disable_kernel_vsx(); - pagefault_enable(); - preempt_enable(); - dctx->bytes = 0; - } - memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); - return 0; + if (dctx->bytes) { + for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) + dctx->buffer[i] = 0; + __ghash_block(ctx, dctx); + dctx->bytes = 0; } + memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); + return 0; } struct shash_alg p8_ghash_alg = { @@ -215,11 +178,8 @@ struct shash_alg p8_ghash_alg = { .cra_name = "ghash", .cra_driver_name = "p8_ghash", .cra_priority = 1000, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = GHASH_BLOCK_SIZE, .cra_ctxsize = sizeof(struct p8_ghash_ctx), .cra_module = THIS_MODULE, - .cra_init = p8_ghash_init_tfm, - .cra_exit = p8_ghash_exit_tfm, }, }; diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 948806e57cee33..a89ebd94c67021 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -325,8 +325,7 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); - return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, *pfn, - vmf->flags & FAULT_FLAG_WRITE); + return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE); } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD @@ -376,8 +375,7 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); - return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, *pfn, - vmf->flags & FAULT_FLAG_WRITE); + return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE); } #else static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index c4eb55e3011c9f..c05ef7f1d7b66c 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -512,7 +512,8 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr, return vchan_tx_prep(&chan->vc, &first->vd, flags); err_desc_get: - axi_desc_put(first); + if (first) + axi_desc_put(first); return NULL; } diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 1fbf9cb9b74297..89c5e5b4606870 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -597,7 +597,7 @@ static int idma64_probe(struct idma64_chip *chip) idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; - idma64->dma.dev = chip->dev; + idma64->dma.dev = chip->sysdev; dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK); @@ -637,6 +637,7 @@ static int idma64_platform_probe(struct platform_device *pdev) { struct idma64_chip *chip; struct device *dev = &pdev->dev; + struct device *sysdev = dev->parent; struct resource *mem; int ret; @@ -653,11 +654,12 @@ static int idma64_platform_probe(struct platform_device *pdev) if (IS_ERR(chip->regs)) return PTR_ERR(chip->regs); - ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64)); if (ret) return ret; chip->dev = dev; + chip->sysdev = sysdev; ret = idma64_probe(chip); if (ret) diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h index 6b816878e5e7a7..baa32e1425de31 100644 --- a/drivers/dma/idma64.h +++ b/drivers/dma/idma64.h @@ -216,12 +216,14 @@ static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value) /** * struct idma64_chip - representation of iDMA 64-bit controller hardware * @dev: struct device of the DMA controller + * @sysdev: struct device of the physical device that does DMA * @irq: irq line * @regs: memory mapped I/O space * @idma64: struct idma64 that is filed by idma64_probe() */ struct idma64_chip { struct device *dev; + struct device *sysdev; int irq; void __iomem *regs; struct idma64 *idma64; diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 88750a34e85987..bc8050c025b7b8 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -961,6 +961,7 @@ static void _stop(struct pl330_thread *thrd) { void __iomem *regs = thrd->dmac->base; u8 insn[6] = {0, 0, 0, 0, 0, 0}; + u32 inten = readl(regs + INTEN); if (_state(thrd) == PL330_STATE_FAULT_COMPLETING) UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING); @@ -973,10 +974,13 @@ static void _stop(struct pl330_thread *thrd) _emit_KILL(0, insn); - /* Stop generating interrupts for SEV */ - writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN); - _execute_DBGINSN(thrd, insn, is_manager(thrd)); + + /* clear the event */ + if (inten & (1 << thrd->ev)) + writel(1 << thrd->ev, regs + INTCLR); + /* Stop generating interrupts for SEV */ + writel(inten & ~(1 << thrd->ev), regs + INTEN); } /* Start doing req 'idx' of thread 'thrd' */ diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 55df0d41355b06..1ed1c7efa28853 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -663,7 +663,7 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK; hw->frg_len = temp; - hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK; + hw->blk_len = slave_cfg->src_maxburst & SPRD_DMA_BLK_LEN_MASK; hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK; temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET; diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index b26256f23d67fb..09b6756366c30e 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include @@ -141,6 +140,7 @@ struct tegra_adma { struct dma_device dma_dev; struct device *dev; void __iomem *base_addr; + struct clk *ahub_clk; unsigned int nr_channels; unsigned long rx_requests_reserved; unsigned long tx_requests_reserved; @@ -637,8 +637,9 @@ static int tegra_adma_runtime_suspend(struct device *dev) struct tegra_adma *tdma = dev_get_drvdata(dev); tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD); + clk_disable_unprepare(tdma->ahub_clk); - return pm_clk_suspend(dev); + return 0; } static int tegra_adma_runtime_resume(struct device *dev) @@ -646,10 +647,11 @@ static int tegra_adma_runtime_resume(struct device *dev) struct tegra_adma *tdma = dev_get_drvdata(dev); int ret; - ret = pm_clk_resume(dev); - if (ret) + ret = clk_prepare_enable(tdma->ahub_clk); + if (ret) { + dev_err(dev, "ahub clk_enable failed: %d\n", ret); return ret; - + } tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd); return 0; @@ -692,13 +694,11 @@ static int tegra_adma_probe(struct platform_device *pdev) if (IS_ERR(tdma->base_addr)) return PTR_ERR(tdma->base_addr); - ret = pm_clk_create(&pdev->dev); - if (ret) - return ret; - - ret = of_pm_clk_add_clk(&pdev->dev, "d_audio"); - if (ret) - goto clk_destroy; + tdma->ahub_clk = devm_clk_get(&pdev->dev, "d_audio"); + if (IS_ERR(tdma->ahub_clk)) { + dev_err(&pdev->dev, "Error: Missing ahub controller clock\n"); + return PTR_ERR(tdma->ahub_clk); + } pm_runtime_enable(&pdev->dev); @@ -775,8 +775,6 @@ static int tegra_adma_probe(struct platform_device *pdev) pm_runtime_put_sync(&pdev->dev); rpm_disable: pm_runtime_disable(&pdev->dev); -clk_destroy: - pm_clk_destroy(&pdev->dev); return ret; } @@ -786,6 +784,7 @@ static int tegra_adma_remove(struct platform_device *pdev) struct tegra_adma *tdma = platform_get_drvdata(pdev); int i; + of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&tdma->dma_dev); for (i = 0; i < tdma->nr_channels; ++i) @@ -793,7 +792,6 @@ static int tegra_adma_remove(struct platform_device *pdev) pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - pm_clk_destroy(&pdev->dev); return 0; } diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 57304b2e989f2c..b00cc03ad6b670 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -250,8 +250,8 @@ config EDAC_PND2 micro-server but may appear on others in the future. config EDAC_MPC85XX - tristate "Freescale MPC83xx / MPC85xx" - depends on FSL_SOC + bool "Freescale MPC83xx / MPC85xx" + depends on FSL_SOC && EDAC=y help Support for error detection and correction on the Freescale MPC8349, MPC8560, MPC8540, MPC8548, T4240 diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c index da0e9bc4262fad..9327479c719c23 100644 --- a/drivers/extcon/extcon-arizona.c +++ b/drivers/extcon/extcon-arizona.c @@ -1726,6 +1726,16 @@ static int arizona_extcon_remove(struct platform_device *pdev) struct arizona_extcon_info *info = platform_get_drvdata(pdev); struct arizona *arizona = info->arizona; int jack_irq_rise, jack_irq_fall; + bool change; + + regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1, + ARIZONA_MICD_ENA, 0, + &change); + + if (change) { + regulator_disable(info->micvdd); + pm_runtime_put(info->dev); + } gpiod_put(info->micd_pol_gpio); diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index cfe87b465819f8..0f7d97917197d5 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -259,8 +259,7 @@ static int efi_pstore_write(struct pstore_record *record) efi_name[i] = name[i]; ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES, - !pstore_cannot_block_path(record->reason), - record->size, record->psi->buf); + preemptible(), record->size, record->psi->buf); if (record->reason == KMSG_DUMP_OOPS) efivar_run_worker(); @@ -369,7 +368,6 @@ static __init int efivars_pstore_init(void) return -ENOMEM; efi_pstore_info.bufsize = 1024; - spin_lock_init(&efi_pstore_info.buf_lock); if (pstore_register(&efi_pstore_info)) { kfree(efi_pstore_info.buf); diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c index 0e81d33af856a6..c9a613dc9eb7e4 100644 --- a/drivers/fpga/dfl-afu-dma-region.c +++ b/drivers/fpga/dfl-afu-dma-region.c @@ -399,7 +399,7 @@ int afu_dma_map_region(struct dfl_feature_platform_data *pdata, region->pages[0], 0, region->length, DMA_BIDIRECTIONAL); - if (dma_mapping_error(&pdata->dev->dev, region->iova)) { + if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) { dev_err(&pdata->dev->dev, "failed to map for dma\n"); ret = -EFAULT; goto unpin_pages; diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c index a9b521bccb06db..ab361ec78df4c3 100644 --- a/drivers/fpga/dfl.c +++ b/drivers/fpga/dfl.c @@ -40,6 +40,13 @@ enum dfl_fpga_devt_type { DFL_FPGA_DEVT_MAX, }; +static struct lock_class_key dfl_pdata_keys[DFL_ID_MAX]; + +static const char *dfl_pdata_key_strings[DFL_ID_MAX] = { + "dfl-fme-pdata", + "dfl-port-pdata", +}; + /** * dfl_dev_info - dfl feature device information. * @name: name string of the feature platform device. @@ -443,11 +450,16 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) struct platform_device *fdev = binfo->feature_dev; struct dfl_feature_platform_data *pdata; struct dfl_feature_info *finfo, *p; + enum dfl_id_type type; int ret, index = 0; if (!fdev) return 0; + type = feature_dev_id_type(fdev); + if (WARN_ON_ONCE(type >= DFL_ID_MAX)) + return -EINVAL; + /* * we do not need to care for the memory which is associated with * the platform device. After calling platform_device_unregister(), @@ -463,6 +475,8 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) pdata->num = binfo->feature_num; pdata->dfl_cdev = binfo->cdev; mutex_init(&pdata->lock); + lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type], + dfl_pdata_key_strings[type]); /* * the count should be initialized to 0 to make sure @@ -497,7 +511,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) ret = platform_device_add(binfo->feature_dev); if (!ret) { - if (feature_dev_id_type(binfo->feature_dev) == PORT_ID) + if (type == PORT_ID) dfl_fpga_cdev_add_port_dev(binfo->cdev, binfo->feature_dev); else diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 9f318f763c3af2..1691fae679d00f 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -796,6 +796,7 @@ config GPIO_ADP5588 config GPIO_ADP5588_IRQ bool "Interrupt controller support for ADP5588" depends on GPIO_ADP5588=y + select GPIOLIB_IRQCHIP help Say yes here to enable the adp5588 to be used as an interrupt controller. It requires the driver to be built in the kernel. diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 6c1acf642c8e92..6fa430d98517cc 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -343,6 +343,22 @@ static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset) } } +/* + * Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain. + * See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs + * in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none + * are capable waking up the system from off mode. + */ +static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank *bank, u32 gpio_mask) +{ + u32 no_wake = bank->non_wakeup_gpios; + + if (no_wake) + return !!(~no_wake & gpio_mask); + + return false; +} + static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, unsigned trigger) { @@ -374,13 +390,7 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, } /* This part needs to be executed always for OMAP{34xx, 44xx} */ - if (!bank->regs->irqctrl) { - /* On omap24xx proceed only when valid GPIO bit is set */ - if (bank->non_wakeup_gpios) { - if (!(bank->non_wakeup_gpios & gpio_bit)) - goto exit; - } - + if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, gpio)) { /* * Log the edge gpio and manually trigger the IRQ * after resume if the input level changes @@ -393,7 +403,6 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, bank->enabled_non_wakeup_gpios &= ~gpio_bit; } -exit: bank->level_mask = readl_relaxed(bank->base + bank->regs->leveldetect0) | readl_relaxed(bank->base + bank->regs->leveldetect1); diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c index 7e09ce75ffb2a5..a9cb5571de5459 100644 --- a/drivers/gpio/gpio-vf610.c +++ b/drivers/gpio/gpio-vf610.c @@ -37,6 +37,7 @@ struct fsl_gpio_soc_data { struct vf610_gpio_port { struct gpio_chip gc; + struct irq_chip ic; void __iomem *base; void __iomem *gpio_base; const struct fsl_gpio_soc_data *sdata; @@ -66,8 +67,6 @@ struct vf610_gpio_port { #define PORT_INT_EITHER_EDGE 0xb #define PORT_INT_LOGIC_ONE 0xc -static struct irq_chip vf610_gpio_irq_chip; - static const struct fsl_gpio_soc_data imx_data = { .have_paddr = true, }; @@ -243,15 +242,6 @@ static int vf610_gpio_irq_set_wake(struct irq_data *d, u32 enable) return 0; } -static struct irq_chip vf610_gpio_irq_chip = { - .name = "gpio-vf610", - .irq_ack = vf610_gpio_irq_ack, - .irq_mask = vf610_gpio_irq_mask, - .irq_unmask = vf610_gpio_irq_unmask, - .irq_set_type = vf610_gpio_irq_set_type, - .irq_set_wake = vf610_gpio_irq_set_wake, -}; - static int vf610_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -259,6 +249,7 @@ static int vf610_gpio_probe(struct platform_device *pdev) struct vf610_gpio_port *port; struct resource *iores; struct gpio_chip *gc; + struct irq_chip *ic; int i; int ret; @@ -295,6 +286,14 @@ static int vf610_gpio_probe(struct platform_device *pdev) gc->direction_output = vf610_gpio_direction_output; gc->set = vf610_gpio_set; + ic = &port->ic; + ic->name = "gpio-vf610"; + ic->irq_ack = vf610_gpio_irq_ack; + ic->irq_mask = vf610_gpio_irq_mask; + ic->irq_unmask = vf610_gpio_irq_unmask; + ic->irq_set_type = vf610_gpio_irq_set_type; + ic->irq_set_wake = vf610_gpio_irq_set_wake; + ret = gpiochip_add_data(gc, port); if (ret < 0) return ret; @@ -306,14 +305,13 @@ static int vf610_gpio_probe(struct platform_device *pdev) /* Clear the interrupt status register for all GPIO's */ vf610_gpio_writel(~0, port->base + PORT_ISFR); - ret = gpiochip_irqchip_add(gc, &vf610_gpio_irq_chip, 0, - handle_edge_irq, IRQ_TYPE_NONE); + ret = gpiochip_irqchip_add(gc, ic, 0, handle_edge_irq, IRQ_TYPE_NONE); if (ret) { dev_err(dev, "failed to add irqchip\n"); gpiochip_remove(gc); return ret; } - gpiochip_set_chained_irqchip(gc, &vf610_gpio_irq_chip, port->irq, + gpiochip_set_chained_irqchip(gc, ic, port->irq, vf610_gpio_irq_handler); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index f008804f0b9759..bbd927e800af0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -416,8 +416,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, } } if (req.pending & ATIF_DGPU_DISPLAY_EVENT) { - if ((adev->flags & AMD_IS_PX) && - amdgpu_atpx_dgpu_req_power_for_displays()) { + if (adev->flags & AMD_IS_PX) { pm_runtime_get_sync(adev->ddev->dev); /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(adev->ddev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 7056925eb38606..869ff624b108c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -136,8 +136,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, { struct amdgpu_device *adev = ring->adev; struct amdgpu_fence *fence; - struct dma_fence *old, **ptr; + struct dma_fence __rcu **ptr; uint32_t seq; + int r; fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); if (fence == NULL) @@ -153,15 +154,24 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, seq, flags | AMDGPU_FENCE_FLAG_INT); ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; + if (unlikely(rcu_dereference_protected(*ptr, 1))) { + struct dma_fence *old; + + rcu_read_lock(); + old = dma_fence_get_rcu_safe(ptr); + rcu_read_unlock(); + + if (old) { + r = dma_fence_wait(old, false); + dma_fence_put(old); + if (r) + return r; + } + } + /* This function can't be called concurrently anyway, otherwise * emitting the fence would mess up the hardware ring buffer. */ - old = rcu_dereference_protected(*ptr, 1); - if (old && !dma_fence_is_signaled(old)) { - DRM_INFO("rcu slot is busy\n"); - dma_fence_wait(old, false); - } - rcu_assign_pointer(*ptr, dma_fence_get(&fence->base)); *f = &fence->base; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 5b39d139963046..5be82e4fd1da60 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -37,18 +37,10 @@ static void psp_set_funcs(struct amdgpu_device *adev); static int psp_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct psp_context *psp = &adev->psp; psp_set_funcs(adev); - return 0; -} - -static int psp_sw_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct psp_context *psp = &adev->psp; - int ret; - switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: @@ -67,6 +59,15 @@ static int psp_sw_init(void *handle) if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) return 0; + return 0; +} + +static int psp_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct psp_context *psp = &adev->psp; + int ret; + ret = psp_init_microcode(psp); if (ret) { DRM_ERROR("Failed to load psp firmware!\n"); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 76ee2de43ea667..dac7978f5ee1f9 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4369,8 +4369,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state, struct dc_stream_state *stream_state) { - stream_state->mode_changed = - crtc_state->mode_changed || crtc_state->active_changed; + stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); } static int amdgpu_dm_atomic_commit(struct drm_device *dev, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 87bf422f16be74..e0a96abb3c46c7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1401,10 +1401,12 @@ bool dc_remove_plane_from_context( * For head pipe detach surfaces from pipe for tail * pipe just zero it out */ - if (!pipe_ctx->top_pipe) { + if (!pipe_ctx->top_pipe || + (!pipe_ctx->top_pipe->top_pipe && + pipe_ctx->top_pipe->stream_res.opp != pipe_ctx->stream_res.opp)) { pipe_ctx->plane_state = NULL; pipe_ctx->bottom_pipe = NULL; - } else { + } else { memset(pipe_ctx, 0, sizeof(*pipe_ctx)); } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index bf8b68f8db4f7f..bce5741f2952ef 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -388,6 +388,10 @@ void dpp1_cnv_setup ( default: break; } + + /* Set default color space based on format if none is given. */ + color_space = input_color_space ? input_color_space : color_space; + REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, CNVC_SURFACE_PIXEL_FORMAT, pixel_format); REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); @@ -399,7 +403,7 @@ void dpp1_cnv_setup ( for (i = 0; i < 12; i++) tbl_entry.regval[i] = input_csc_color_matrix.matrix[i]; - tbl_entry.color_space = input_color_space; + tbl_entry.color_space = color_space; if (color_space >= COLOR_SPACE_YCBCR601) select = INPUT_CSC_SELECT_ICSC; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index 4a863a5dab4178..321af9af95e861 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -406,15 +406,25 @@ void dpp1_dscl_calc_lb_num_partitions( int *num_part_y, int *num_part_c) { + int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a, + lb_bpc, memory_line_size_y, memory_line_size_c, memory_line_size_a; + int line_size = scl_data->viewport.width < scl_data->recout.width ? scl_data->viewport.width : scl_data->recout.width; int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ? scl_data->viewport_c.width : scl_data->recout.width; - int lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth); - int memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */ - int memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */ - int memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */ - int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a; + + if (line_size == 0) + line_size = 1; + + if (line_size_c == 0) + line_size_c = 1; + + + lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth); + memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */ + memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */ + memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */ if (lb_config == LB_MEMORY_CONFIG_1) { lb_memory_size = 816; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index a0355709abd15c..7736ef123e9be0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1890,7 +1890,7 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state) plane_state->format, EXPANSION_MODE_ZERO, plane_state->input_csc_color_matrix, - COLOR_SPACE_YCBCR601_LIMITED); + plane_state->color_space); //set scale and bias registers build_prescale_params(&bns_params, plane_state); diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index e4d67b70244d57..e69d996eabadce 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -186,20 +186,20 @@ static void hdlcd_crtc_atomic_disable(struct drm_crtc *crtc, clk_disable_unprepare(hdlcd->clk); } -static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) +static enum drm_mode_status hdlcd_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) { struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); - struct drm_display_mode *mode = &state->adjusted_mode; long rate, clk_rate = mode->clock * 1000; rate = clk_round_rate(hdlcd->clk, clk_rate); - if (rate != clk_rate) { + /* 0.1% seems a close enough tolerance for the TDA19988 on Juno */ + if (abs(rate - clk_rate) * 1000 > clk_rate) { /* clock required by mode not supported by hardware */ - return -EINVAL; + return MODE_NOCLOCK; } - return 0; + return MODE_OK; } static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, @@ -220,7 +220,7 @@ static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = { - .atomic_check = hdlcd_crtc_atomic_check, + .mode_valid = hdlcd_crtc_mode_valid, .atomic_begin = hdlcd_crtc_atomic_begin, .atomic_enable = hdlcd_crtc_atomic_enable, .atomic_disable = hdlcd_crtc_atomic_disable, diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 94d6dabec2dc80..1ab511e3324321 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -190,6 +190,7 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state) { struct drm_device *drm = state->dev; struct malidp_drm *malidp = drm->dev_private; + int loop = 5; malidp->event = malidp->crtc.state->event; malidp->crtc.state->event = NULL; @@ -204,8 +205,18 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state) drm_crtc_vblank_get(&malidp->crtc); /* only set config_valid if the CRTC is enabled */ - if (malidp_set_and_wait_config_valid(drm) < 0) + if (malidp_set_and_wait_config_valid(drm) < 0) { + /* + * make a loop around the second CVAL setting and + * try 5 times before giving up. + */ + while (loop--) { + if (!malidp_set_and_wait_config_valid(drm)) + break; + } DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n"); + } + } else if (malidp->event) { /* CRTC inactive means vblank IRQ is disabled, send event directly */ spin_lock_irq(&drm->event_lock); diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 85c2d407a52e1a..e7ddd3e3db9209 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -747,11 +747,11 @@ static void adv7511_mode_set(struct adv7511 *adv7511, vsync_polarity = 1; } - if (mode->vrefresh <= 24000) + if (drm_mode_vrefresh(mode) <= 24) low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ; - else if (mode->vrefresh <= 25000) + else if (drm_mode_vrefresh(mode) <= 25) low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ; - else if (mode->vrefresh <= 30000) + else if (drm_mode_vrefresh(mode) <= 30) low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ; else low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE; diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 1f2c73e59446b6..56ae858b4538e7 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1611,15 +1611,6 @@ int drm_atomic_helper_async_check(struct drm_device *dev, if (old_plane_state->fb != new_plane_state->fb) return -EINVAL; - /* - * FIXME: Since prepare_fb and cleanup_fb are always called on - * the new_plane_state for async updates we need to block framebuffer - * changes. This prevents use of a fb that's been cleaned up and - * double cleanups from occuring. - */ - if (old_plane_state->fb != new_plane_state->fb) - return -EINVAL; - funcs = plane->helper_private; if (!funcs->atomic_async_update) return -EINVAL; @@ -1650,6 +1641,8 @@ EXPORT_SYMBOL(drm_atomic_helper_async_check); * drm_atomic_async_check() succeeds. Async commits are not supposed to swap * the states like normal sync commits, but just do in-place changes on the * current state. + * + * TODO: Implement full swap instead of doing in-place changes. */ void drm_atomic_helper_async_commit(struct drm_device *dev, struct drm_atomic_state *state) @@ -1660,6 +1653,9 @@ void drm_atomic_helper_async_commit(struct drm_device *dev, int i; for_each_new_plane_in_state(state, plane, plane_state, i) { + struct drm_framebuffer *new_fb = plane_state->fb; + struct drm_framebuffer *old_fb = plane->state->fb; + funcs = plane->helper_private; funcs->atomic_async_update(plane, plane_state); @@ -1668,11 +1664,17 @@ void drm_atomic_helper_async_commit(struct drm_device *dev, * plane->state in-place, make sure at least common * properties have been properly updated. */ - WARN_ON_ONCE(plane->state->fb != plane_state->fb); + WARN_ON_ONCE(plane->state->fb != new_fb); WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x); WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y); WARN_ON_ONCE(plane->state->src_x != plane_state->src_x); WARN_ON_ONCE(plane->state->src_y != plane_state->src_y); + + /* + * Make sure the FBs have been swapped so that cleanups in the + * new_state performs a cleanup in the old FB. + */ + WARN_ON_ONCE(plane_state->fb != old_fb); } } EXPORT_SYMBOL(drm_atomic_helper_async_commit); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 9cbe8f5c9acafe..6e241a3c31ee3a 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -595,6 +595,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, plane = crtc->primary; + /* allow disabling with the primary plane leased */ + if (crtc_req->mode_valid && !drm_lease_held(file_priv, plane->base.id)) + return -EACCES; + mutex_lock(&crtc->dev->mode_config.mutex); drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); retry: diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 0201ccb22f4cad..d8ae4ca129c701 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -499,7 +499,7 @@ int drm_dev_init(struct drm_device *dev, } kref_init(&dev->ref); - dev->dev = parent; + dev->dev = get_device(parent); dev->driver = driver; INIT_LIST_HEAD(&dev->filelist); @@ -568,6 +568,7 @@ int drm_dev_init(struct drm_device *dev, drm_minor_free(dev, DRM_MINOR_RENDER); drm_fs_inode_free(dev->anon_inode); err_free: + put_device(dev->dev); mutex_destroy(&dev->master_mutex); mutex_destroy(&dev->ctxlist_mutex); mutex_destroy(&dev->clientlist_mutex); @@ -603,6 +604,8 @@ void drm_dev_fini(struct drm_device *dev) drm_minor_free(dev, DRM_MINOR_PRIMARY); drm_minor_free(dev, DRM_MINOR_RENDER); + put_device(dev->dev); + mutex_destroy(&dev->master_mutex); mutex_destroy(&dev->ctxlist_mutex); mutex_destroy(&dev->clientlist_mutex); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index b506e3622b08f6..5965f6383ada34 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -172,6 +172,25 @@ static const struct edid_quirk { /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/ { "ETR", 13896, EDID_QUIRK_FORCE_8BPC }, + /* Valve Index Headset */ + { "VLV", 0x91a8, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b0, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b1, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b2, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b3, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b4, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b5, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b6, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b7, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b8, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b9, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91ba, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91bb, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91bc, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91bd, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91be, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91bf, EDID_QUIRK_NON_DESKTOP }, + /* HTC Vive and Vive Pro VR Headsets */ { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP }, { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP }, @@ -193,6 +212,12 @@ static const struct edid_quirk { /* Sony PlayStation VR Headset */ { "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP }, + + /* Sensics VR Headsets */ + { "SEN", 0x1019, EDID_QUIRK_NON_DESKTOP }, + + /* OSVR HDK and HDK2 VR Headsets */ + { "SVR", 0x1019, EDID_QUIRK_NON_DESKTOP }, }; /* @@ -1555,6 +1580,50 @@ static void connector_bad_edid(struct drm_connector *connector, } } +/* Get override or firmware EDID */ +static struct edid *drm_get_override_edid(struct drm_connector *connector) +{ + struct edid *override = NULL; + + if (connector->override_edid) + override = drm_edid_duplicate(connector->edid_blob_ptr->data); + + if (!override) + override = drm_load_edid_firmware(connector); + + return IS_ERR(override) ? NULL : override; +} + +/** + * drm_add_override_edid_modes - add modes from override/firmware EDID + * @connector: connector we're probing + * + * Add modes from the override/firmware EDID, if available. Only to be used from + * drm_helper_probe_single_connector_modes() as a fallback for when DDC probe + * failed during drm_get_edid() and caused the override/firmware EDID to be + * skipped. + * + * Return: The number of modes added or 0 if we couldn't find any. + */ +int drm_add_override_edid_modes(struct drm_connector *connector) +{ + struct edid *override; + int num_modes = 0; + + override = drm_get_override_edid(connector); + if (override) { + drm_connector_update_edid_property(connector, override); + num_modes = drm_add_edid_modes(connector, override); + kfree(override); + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] adding %d modes via fallback override/firmware EDID\n", + connector->base.id, connector->name, num_modes); + } + + return num_modes; +} +EXPORT_SYMBOL(drm_add_override_edid_modes); + /** * drm_do_get_edid - get EDID data using a custom EDID block read function * @connector: connector we're probing @@ -1582,15 +1651,10 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, { int i, j = 0, valid_extensions = 0; u8 *edid, *new; - struct edid *override = NULL; - - if (connector->override_edid) - override = drm_edid_duplicate(connector->edid_blob_ptr->data); - - if (!override) - override = drm_load_edid_firmware(connector); + struct edid *override; - if (!IS_ERR_OR_NULL(override)) + override = drm_get_override_edid(connector); + if (override) return override; if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index e4ccb52c67ea47..334addaca9c545 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -567,6 +567,7 @@ ssize_t drm_read(struct file *filp, char __user *buffer, file_priv->event_space -= length; list_add(&e->link, &file_priv->event_list); spin_unlock_irq(&dev->event_lock); + wake_up_interruptible(&file_priv->event_wait); break; } diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index d36b1be632d963..2411b6de055e22 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -940,6 +940,11 @@ static int drm_mode_cursor_common(struct drm_device *dev, if (ret) goto out; + if (!drm_lease_held(file_priv, crtc->cursor->base.id)) { + ret = -EACCES; + goto out; + } + ret = drm_mode_cursor_universal(crtc, req, file_priv, &ctx); goto out; } @@ -1042,6 +1047,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, plane = crtc->primary; + if (!drm_lease_held(file_priv, plane->base.id)) + return -EACCES; + if (crtc->funcs->page_flip_target) { u32 current_vblank; int r; diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index a1bb157bfdfaeb..d18b7e27ef64c9 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -479,6 +479,13 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, count = (*connector_funcs->get_modes)(connector); + /* + * Fallback for when DDC probe failed in drm_get_edid() and thus skipped + * override/firmware EDID. + */ + if (count == 0 && connector->status == connector_status_connected) + count = drm_add_override_edid_modes(connector); + if (count == 0 && connector->status == connector_status_connected) count = drm_add_modes_noedid(connector, 1024, 768); count += drm_helper_probe_add_cmdline_mode(connector); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 83c1f46670bfea..00675fcbffa2d9 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -527,6 +527,9 @@ static int etnaviv_bind(struct device *dev) } drm->dev_private = priv; + dev->dma_parms = &priv->dma_parms; + dma_set_max_seg_size(dev, SZ_2G); + mutex_init(&priv->gem_lock); INIT_LIST_HEAD(&priv->gem_list); priv->num_gpus = 0; @@ -564,6 +567,8 @@ static void etnaviv_unbind(struct device *dev) component_unbind_all(dev, drm); + dev->dma_parms = NULL; + drm->dev_private = NULL; kfree(priv); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index 8d02d1b7dcf5a5..b2930d1fe97c04 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -43,6 +43,7 @@ struct etnaviv_file_private { struct etnaviv_drm_private { int num_gpus; + struct device_dma_parameters dma_parms; struct etnaviv_gpu *gpu[ETNA_MAX_PIPES]; /* list of GEM objects: */ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c index 9146e30e24a6de..468dff2f79040e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c @@ -124,6 +124,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu) return; etnaviv_dump_core = false; + mutex_lock(&gpu->mmu->lock); + mmu_size = etnaviv_iommu_dump_size(gpu->mmu); /* We always dump registers, mmu, ring and end marker */ @@ -166,6 +168,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu) iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY, PAGE_KERNEL); if (!iter.start) { + mutex_unlock(&gpu->mmu->lock); dev_warn(gpu->dev, "failed to allocate devcoredump file\n"); return; } @@ -233,6 +236,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu) obj->base.size); } + mutex_unlock(&gpu->mmu->lock); + etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data); dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL); diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index de9531caaca0e0..9c8446184b177a 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -594,6 +594,9 @@ void cdv_intel_lvds_init(struct drm_device *dev, int pipe; u8 pin; + if (!dev_priv->lvds_enabled_in_vbt) + return; + pin = GMBUS_PORT_PANEL; if (!lvds_is_present_in_vbt(dev, &pin)) { DRM_DEBUG_KMS("LVDS is not present in VBT\n"); diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c index 63bde4e86c6a11..e019ea271ffc43 100644 --- a/drivers/gpu/drm/gma500/intel_bios.c +++ b/drivers/gpu/drm/gma500/intel_bios.c @@ -436,6 +436,9 @@ parse_driver_features(struct drm_psb_private *dev_priv, if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP) dev_priv->edp.support = 1; + dev_priv->lvds_enabled_in_vbt = driver->lvds_config != 0; + DRM_DEBUG_KMS("LVDS VBT config bits: 0x%x\n", driver->lvds_config); + /* This bit means to use 96Mhz for DPLL_A or not */ if (driver->primary_lfp_id) dev_priv->dplla_96mhz = true; diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index 93d2f4000d2f90..be3cf9b348bd4c 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -538,6 +538,7 @@ struct drm_psb_private { int lvds_ssc_freq; bool is_lvds_on; bool is_mipi_on; + bool lvds_enabled_in_vbt; u32 mipi_ctrl_display; unsigned int core_freq; diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 542f31ce108fd4..40b32b4d1d98be 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -2161,7 +2161,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; unsigned long gma, gfn; - struct intel_gvt_gtt_entry e, m; + struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE}; + struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE}; dma_addr_t dma_addr; int ret; @@ -2237,7 +2238,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, if (ops->test_present(&e)) { gfn = ops->get_pfn(&e); - m = e; + m.val64 = e.val64; + m.type = e.type; /* one PTE update may be issued in multiple writes and the * first write may not construct a valid gfn diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5b544cb3814809..16f5d2d9380149 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -32,7 +32,7 @@ * macros. Do **not** mass change existing definitions just to update the style. * * Layout - * '''''' + * ~~~~~~ * * Keep helper macros near the top. For example, _PIPE() and friends. * @@ -78,7 +78,7 @@ * style. Use lower case in hexadecimal values. * * Naming - * '''''' + * ~~~~~~ * * Try to name registers according to the specs. If the register name changes in * the specs from platform to another, stick to the original name. @@ -96,7 +96,7 @@ * suffix to the name. For example, ``_SKL`` or ``_GEN8``. * * Examples - * '''''''' + * ~~~~~~~~ * * (Note that the values in the example are indented using spaces instead of * TABs to avoid misalignment in generated documentation. Use TABs in the diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 01d1d2088f0488..728a20e1f638c7 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -1267,6 +1267,10 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) if (!HAS_FBC(dev_priv)) return 0; + /* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */ + if (IS_GEMINILAKE(dev_priv)) + return 0; + if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) return 1; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 812fe7b06f8738..1817a5c0c80fd1 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -925,6 +925,13 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo, return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1); } +static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo, + u8 audio_state) +{ + return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_AUDIO_STAT, + &audio_state, 1); +} + #if 0 static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) { @@ -1371,11 +1378,6 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, else sdvox |= SDVO_PIPE_SEL(crtc->pipe); - if (crtc_state->has_audio) { - WARN_ON_ONCE(INTEL_GEN(dev_priv) < 4); - sdvox |= SDVO_AUDIO_ENABLE; - } - if (INTEL_GEN(dev_priv) >= 4) { /* done in crtc_mode_set as the dpll_md reg must be written early */ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || @@ -1515,8 +1517,13 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, if (sdvox & HDMI_COLOR_RANGE_16_235) pipe_config->limited_color_range = true; - if (sdvox & SDVO_AUDIO_ENABLE) - pipe_config->has_audio = true; + if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT, + &val, 1)) { + u8 mask = SDVO_AUDIO_ELD_VALID | SDVO_AUDIO_PRESENCE_DETECT; + + if ((val & mask) == mask) + pipe_config->has_audio = true; + } if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &val, 1)) { @@ -1529,6 +1536,32 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, pipe_config->pixel_multiplier, encoder_pixel_multiplier); } +static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo) +{ + intel_sdvo_set_audio_state(intel_sdvo, 0); +} + +static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; + struct drm_connector *connector = conn_state->connector; + u8 *eld = connector->eld; + + eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; + + intel_sdvo_set_audio_state(intel_sdvo, 0); + + intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD, + SDVO_HBUF_TX_DISABLED, + eld, drm_eld_size(eld)); + + intel_sdvo_set_audio_state(intel_sdvo, SDVO_AUDIO_ELD_VALID | + SDVO_AUDIO_PRESENCE_DETECT); +} + static void intel_disable_sdvo(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *conn_state) @@ -1538,6 +1571,9 @@ static void intel_disable_sdvo(struct intel_encoder *encoder, struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); u32 temp; + if (old_crtc_state->has_audio) + intel_sdvo_disable_audio(intel_sdvo); + intel_sdvo_set_active_outputs(intel_sdvo, 0); if (0) intel_sdvo_set_encoder_power_state(intel_sdvo, @@ -1623,6 +1659,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder, intel_sdvo_set_encoder_power_state(intel_sdvo, DRM_MODE_DPMS_ON); intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); + + if (pipe_config->has_audio) + intel_sdvo_enable_audio(intel_sdvo, pipe_config, conn_state); } static enum drm_mode_status @@ -2514,7 +2553,6 @@ static bool intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) { struct drm_encoder *encoder = &intel_sdvo->base.base; - struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct drm_connector *connector; struct intel_encoder *intel_encoder = to_intel_encoder(encoder); struct intel_connector *intel_connector; @@ -2551,9 +2589,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) encoder->encoder_type = DRM_MODE_ENCODER_TMDS; connector->connector_type = DRM_MODE_CONNECTOR_DVID; - /* gen3 doesn't do the hdmi bits in the SDVO register */ - if (INTEL_GEN(dev_priv) >= 4 && - intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { + if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; intel_sdvo->is_hdmi = true; } diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h index db0ed499268ae2..e9ba3b047f9321 100644 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h @@ -707,6 +707,9 @@ struct intel_sdvo_enhancements_arg { #define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90 #define SDVO_CMD_SET_AUDIO_STAT 0x91 #define SDVO_CMD_GET_AUDIO_STAT 0x92 + #define SDVO_AUDIO_ELD_VALID (1 << 0) + #define SDVO_AUDIO_PRESENCE_DETECT (1 << 1) + #define SDVO_AUDIO_CP_READY (1 << 2) #define SDVO_CMD_SET_HBUF_INDEX 0x93 #define SDVO_HBUF_INDEX_ELD 0 #define SDVO_HBUF_INDEX_AVI_IF 1 diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c index 4bcdeaf8d98fa3..c44bb37e434c18 100644 --- a/drivers/gpu/drm/i915/intel_workarounds.c +++ b/drivers/gpu/drm/i915/intel_workarounds.c @@ -37,7 +37,7 @@ * costly and simplifies things. We can revisit this in the future. * * Layout - * '''''' + * ~~~~~~ * * Keep things in this file ordered by WA type, as per the above (context, GT, * display, register whitelist, batchbuffer). Then, inside each type, keep the diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index ab1d9308c31146..ba6f3c14495c0c 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -35,7 +35,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname) { struct device *dev = &gpu->pdev->dev; const struct firmware *fw; - struct device_node *np; + struct device_node *np, *mem_np; struct resource r; phys_addr_t mem_phys; ssize_t mem_size; @@ -49,11 +49,13 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname) if (!np) return -ENODEV; - np = of_parse_phandle(np, "memory-region", 0); - if (!np) + mem_np = of_parse_phandle(np, "memory-region", 0); + of_node_put(np); + if (!mem_np) return -EINVAL; - ret = of_address_to_resource(np, 0, &r); + ret = of_address_to_resource(mem_np, 0, &r); + of_node_put(mem_np); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c index 273cbbe27c2e5e..1ddf07514de6d9 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c @@ -503,6 +503,8 @@ static int mdp5_plane_atomic_async_check(struct drm_plane *plane, static void mdp5_plane_atomic_async_update(struct drm_plane *plane, struct drm_plane_state *new_state) { + struct drm_framebuffer *old_fb = plane->state->fb; + plane->state->src_x = new_state->src_x; plane->state->src_y = new_state->src_y; plane->state->crtc_x = new_state->crtc_x; @@ -525,6 +527,8 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane, *to_mdp5_plane_state(plane->state) = *to_mdp5_plane_state(new_state); + + new_state->fb = old_fb; } static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = { diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index 4b75ad40dd80e5..00d9d77f583a7b 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -16,10 +16,21 @@ config DRM_NOUVEAU select INPUT if ACPI && X86 select THERMAL if ACPI && X86 select ACPI_VIDEO if ACPI && X86 - select DRM_VM help Choose this option for open-source NVIDIA support. +config NOUVEAU_LEGACY_CTX_SUPPORT + bool "Nouveau legacy context support" + depends on DRM_NOUVEAU + select DRM_VM + default y + help + There was a version of the nouveau DDX that relied on legacy + ctx ioctls not erroring out. But that was back in time a long + ways, so offer a way to disable it now. For uapi compat with + old nouveau ddx this should be on by default, but modern distros + should consider turning it off. + config NOUVEAU_PLATFORM_DRIVER bool "Nouveau (NVIDIA) SoC GPUs" depends on DRM_NOUVEAU && ARCH_TEGRA diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h index e48c5eb35b4988..66c125a6b0b3c3 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h @@ -41,6 +41,7 @@ struct nv50_disp_interlock { NV50_DISP_INTERLOCK__SIZE } type; u32 data; + u32 wimm; }; void corec37d_ntfy_init(struct nouveau_bo *, u32); diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index 4f57e53797968e..d81a99bb2ac319 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -306,7 +306,7 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) asyh->set.or = head->func->or != NULL; } - if (asyh->state.mode_changed) + if (asyh->state.mode_changed || asyh->state.connectors_changed) nv50_head_atomic_check_mode(head, asyh); if (asyh->state.color_mgmt_changed || diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c index 9103b8494279c6..f7dbd965e4e729 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c @@ -75,6 +75,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm, return ret; } + wndw->interlock.wimm = wndw->interlock.data; wndw->immd = func; return 0; } diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 2187922e8dc28d..b3db4553098d57 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -151,7 +151,7 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock, if (asyw->set.point) { if (asyw->set.point = false, asyw->set.mask) interlock[wndw->interlock.type] |= wndw->interlock.data; - interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.data; + interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.wimm; wndw->immd->point(wndw, asyw); wndw->immd->update(wndw, interlock); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h index eef54e9b5d77d2..7957eafa5f0edd 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h @@ -38,6 +38,7 @@ struct nvkm_i2c_bus { struct mutex mutex; struct list_head head; struct i2c_adapter i2c; + u8 enabled; }; int nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *); @@ -57,6 +58,7 @@ struct nvkm_i2c_aux { struct mutex mutex; struct list_head head; struct i2c_adapter i2c; + u8 enabled; u32 intr; }; diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 74d2283f2c28e7..2b7a54cc3c9ef4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -1015,8 +1015,11 @@ nouveau_driver_fops = { static struct drm_driver driver_stub = { .driver_features = - DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER | - DRIVER_KMS_LEGACY_CONTEXT, + DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER +#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT) + | DRIVER_KMS_LEGACY_CONTEXT +#endif + , .load = nouveau_drm_load, .unload = nouveau_drm_unload, diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 8edb9f2a426945..e4b977cc845289 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -169,7 +169,11 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) +#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT) return drm_legacy_mmap(filp, vma); +#else + return -EINVAL; +#endif return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c index 5f301e632599b4..818d21bd28d312 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c @@ -365,8 +365,15 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps) * and it's better to have a failed modeset than that. */ for (cfg = nvkm_dp_rates; cfg->rate; cfg++) { - if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) - failsafe = cfg; + if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) { + /* Try to respect sink limits too when selecting + * lowest link configuration. + */ + if (!failsafe || + (cfg->nr <= sink_nr && cfg->bw <= sink_bw)) + failsafe = cfg; + } + if (failsafe && cfg[1].rate < dataKBps) break; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c index 157b076a127230..38c9c086754b68 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c @@ -109,7 +109,7 @@ nv50_bar_oneinit(struct nvkm_bar *base) struct nvkm_device *device = bar->base.subdev.device; static struct lock_class_key bar1_lock; static struct lock_class_key bar2_lock; - u64 start, limit; + u64 start, limit, size; int ret; ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem); @@ -127,7 +127,10 @@ nv50_bar_oneinit(struct nvkm_bar *base) /* BAR2 */ start = 0x0100000000ULL; - limit = start + device->func->resource_size(device, 3); + size = device->func->resource_size(device, 3); + if (!size) + return -ENOMEM; + limit = start + size; ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0, &bar2_lock, "bar2", &bar->bar2_vmm); @@ -164,7 +167,10 @@ nv50_bar_oneinit(struct nvkm_bar *base) /* BAR1 */ start = 0x0000000000ULL; - limit = start + device->func->resource_size(device, 1); + size = device->func->resource_size(device, 1); + if (!size) + return -ENOMEM; + limit = start + size; ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0, &bar1_lock, "bar1", &bar->bar1_vmm); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c index 4c1f547da463af..b4e7404fe660e2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c @@ -105,9 +105,15 @@ nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *aux) { struct nvkm_i2c_pad *pad = aux->pad; int ret; + AUX_TRACE(aux, "acquire"); mutex_lock(&aux->mutex); - ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_AUX); + + if (aux->enabled) + ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_AUX); + else + ret = -EIO; + if (ret) mutex_unlock(&aux->mutex); return ret; @@ -145,6 +151,24 @@ nvkm_i2c_aux_del(struct nvkm_i2c_aux **paux) } } +void +nvkm_i2c_aux_init(struct nvkm_i2c_aux *aux) +{ + AUX_TRACE(aux, "init"); + mutex_lock(&aux->mutex); + aux->enabled = true; + mutex_unlock(&aux->mutex); +} + +void +nvkm_i2c_aux_fini(struct nvkm_i2c_aux *aux) +{ + AUX_TRACE(aux, "fini"); + mutex_lock(&aux->mutex); + aux->enabled = false; + mutex_unlock(&aux->mutex); +} + int nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *func, struct nvkm_i2c_pad *pad, int id, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h index 7d56c4ba693cf6..08f6b2ee64abf0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h @@ -16,6 +16,8 @@ int nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *, int nvkm_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *, int id, struct nvkm_i2c_aux **); void nvkm_i2c_aux_del(struct nvkm_i2c_aux **); +void nvkm_i2c_aux_init(struct nvkm_i2c_aux *); +void nvkm_i2c_aux_fini(struct nvkm_i2c_aux *); int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type, u32 addr, u8 *data, u8 *size); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c index 4f197b15acf613..ecacb22834d76d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c @@ -160,8 +160,18 @@ nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend) { struct nvkm_i2c *i2c = nvkm_i2c(subdev); struct nvkm_i2c_pad *pad; + struct nvkm_i2c_bus *bus; + struct nvkm_i2c_aux *aux; u32 mask; + list_for_each_entry(aux, &i2c->aux, head) { + nvkm_i2c_aux_fini(aux); + } + + list_for_each_entry(bus, &i2c->bus, head) { + nvkm_i2c_bus_fini(bus); + } + if ((mask = (1 << i2c->func->aux) - 1), i2c->func->aux_stat) { i2c->func->aux_mask(i2c, NVKM_I2C_ANY, mask, 0); i2c->func->aux_stat(i2c, &mask, &mask, &mask, &mask); @@ -180,6 +190,7 @@ nvkm_i2c_init(struct nvkm_subdev *subdev) struct nvkm_i2c *i2c = nvkm_i2c(subdev); struct nvkm_i2c_bus *bus; struct nvkm_i2c_pad *pad; + struct nvkm_i2c_aux *aux; list_for_each_entry(pad, &i2c->pad, head) { nvkm_i2c_pad_init(pad); @@ -189,6 +200,10 @@ nvkm_i2c_init(struct nvkm_subdev *subdev) nvkm_i2c_bus_init(bus); } + list_for_each_entry(aux, &i2c->aux, head) { + nvkm_i2c_aux_init(aux); + } + return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c index 807a2b67bd641e..ed50cc3736b925 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c @@ -110,6 +110,19 @@ nvkm_i2c_bus_init(struct nvkm_i2c_bus *bus) BUS_TRACE(bus, "init"); if (bus->func->init) bus->func->init(bus); + + mutex_lock(&bus->mutex); + bus->enabled = true; + mutex_unlock(&bus->mutex); +} + +void +nvkm_i2c_bus_fini(struct nvkm_i2c_bus *bus) +{ + BUS_TRACE(bus, "fini"); + mutex_lock(&bus->mutex); + bus->enabled = false; + mutex_unlock(&bus->mutex); } void @@ -126,9 +139,15 @@ nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *bus) { struct nvkm_i2c_pad *pad = bus->pad; int ret; + BUS_TRACE(bus, "acquire"); mutex_lock(&bus->mutex); - ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_I2C); + + if (bus->enabled) + ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_I2C); + else + ret = -EIO; + if (ret) mutex_unlock(&bus->mutex); return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h index bea0dd33961e1e..465464bba58b6e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h @@ -18,6 +18,7 @@ int nvkm_i2c_bus_new_(const struct nvkm_i2c_bus_func *, struct nvkm_i2c_pad *, int id, struct nvkm_i2c_bus **); void nvkm_i2c_bus_del(struct nvkm_i2c_bus **); void nvkm_i2c_bus_init(struct nvkm_i2c_bus *); +void nvkm_i2c_bus_fini(struct nvkm_i2c_bus *); int nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *, struct i2c_msg *, int); diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 74467b30872186..8160954ebc2571 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -1386,12 +1386,9 @@ static int dsi_pll_enable(struct dss_pll *pll) */ dsi_enable_scp_clk(dsi); - if (!dsi->vdds_dsi_enabled) { - r = regulator_enable(dsi->vdds_dsi_reg); - if (r) - goto err0; - dsi->vdds_dsi_enabled = true; - } + r = regulator_enable(dsi->vdds_dsi_reg); + if (r) + goto err0; /* XXX PLL does not come out of reset without this... */ dispc_pck_free_enable(dsi->dss->dispc, 1); @@ -1416,36 +1413,25 @@ static int dsi_pll_enable(struct dss_pll *pll) return 0; err1: - if (dsi->vdds_dsi_enabled) { - regulator_disable(dsi->vdds_dsi_reg); - dsi->vdds_dsi_enabled = false; - } + regulator_disable(dsi->vdds_dsi_reg); err0: dsi_disable_scp_clk(dsi); dsi_runtime_put(dsi); return r; } -static void dsi_pll_uninit(struct dsi_data *dsi, bool disconnect_lanes) +static void dsi_pll_disable(struct dss_pll *pll) { + struct dsi_data *dsi = container_of(pll, struct dsi_data, pll); + dsi_pll_power(dsi, DSI_PLL_POWER_OFF); - if (disconnect_lanes) { - WARN_ON(!dsi->vdds_dsi_enabled); - regulator_disable(dsi->vdds_dsi_reg); - dsi->vdds_dsi_enabled = false; - } + + regulator_disable(dsi->vdds_dsi_reg); dsi_disable_scp_clk(dsi); dsi_runtime_put(dsi); - DSSDBG("PLL uninit done\n"); -} - -static void dsi_pll_disable(struct dss_pll *pll) -{ - struct dsi_data *dsi = container_of(pll, struct dsi_data, pll); - - dsi_pll_uninit(dsi, true); + DSSDBG("PLL disable done\n"); } static void dsi_dump_dsi_clocks(struct dsi_data *dsi, struct seq_file *s) @@ -4195,11 +4181,11 @@ static int dsi_display_init_dsi(struct dsi_data *dsi) r = dss_pll_enable(&dsi->pll); if (r) - goto err0; + return r; r = dsi_configure_dsi_clocks(dsi); if (r) - goto err1; + goto err0; dss_select_dsi_clk_source(dsi->dss, dsi->module_id, dsi->module_id == 0 ? @@ -4207,6 +4193,14 @@ static int dsi_display_init_dsi(struct dsi_data *dsi) DSSDBG("PLL OK\n"); + if (!dsi->vdds_dsi_enabled) { + r = regulator_enable(dsi->vdds_dsi_reg); + if (r) + goto err1; + + dsi->vdds_dsi_enabled = true; + } + r = dsi_cio_init(dsi); if (r) goto err2; @@ -4235,10 +4229,13 @@ static int dsi_display_init_dsi(struct dsi_data *dsi) err3: dsi_cio_uninit(dsi); err2: - dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK); + regulator_disable(dsi->vdds_dsi_reg); + dsi->vdds_dsi_enabled = false; err1: - dss_pll_disable(&dsi->pll); + dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK); err0: + dss_pll_disable(&dsi->pll); + return r; } @@ -4257,7 +4254,12 @@ static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes, dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK); dsi_cio_uninit(dsi); - dsi_pll_uninit(dsi, disconnect_lanes); + dss_pll_disable(&dsi->pll); + + if (disconnect_lanes) { + regulator_disable(dsi->vdds_dsi_reg); + dsi->vdds_dsi_enabled = false; + } } static int dsi_display_enable(struct omap_dss_device *dssdev) diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c index 87fa316e1d7b09..58ccf648b70fbd 100644 --- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c +++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c @@ -248,6 +248,9 @@ static int otm8009a_init_sequence(struct otm8009a *ctx) /* Send Command GRAM memory write (no parameters) */ dcs_write_seq(ctx, MIPI_DCS_WRITE_MEMORY_START); + /* Wait a short while to let the panel be ready before the 1st frame */ + mdelay(10); + return 0; } diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c index 754f6b25f2652e..6d9f78612deeb8 100644 --- a/drivers/gpu/drm/pl111/pl111_display.c +++ b/drivers/gpu/drm/pl111/pl111_display.c @@ -531,14 +531,15 @@ pl111_init_clock_divider(struct drm_device *drm) dev_err(drm->dev, "CLCD: unable to get clcdclk.\n"); return PTR_ERR(parent); } + + spin_lock_init(&priv->tim2_lock); + /* If the clock divider is broken, use the parent directly */ if (priv->variant->broken_clockdivider) { priv->clk = parent; return 0; } parent_name = __clk_get_name(parent); - - spin_lock_init(&priv->tim2_lock); div->init = &init; ret = devm_clk_hw_register(drm->dev, div); diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c index b9baefdba38a17..1c318ad32a8cd3 100644 --- a/drivers/gpu/drm/pl111/pl111_versatile.c +++ b/drivers/gpu/drm/pl111/pl111_versatile.c @@ -330,6 +330,7 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv) ret = vexpress_muxfpga_init(); if (ret) { dev_err(dev, "unable to initialize muxfpga driver\n"); + of_node_put(np); return ret; } @@ -337,17 +338,20 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv) pdev = of_find_device_by_node(np); if (!pdev) { dev_err(dev, "can't find the sysreg device, deferring\n"); + of_node_put(np); return -EPROBE_DEFER; } map = dev_get_drvdata(&pdev->dev); if (!map) { dev_err(dev, "sysreg has not yet probed\n"); platform_device_put(pdev); + of_node_put(np); return -EPROBE_DEFER; } } else { map = syscon_node_to_regmap(np); } + of_node_put(np); if (IS_ERR(map)) { dev_err(dev, "no Versatile syscon regmap\n"); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 9d3ac8b981dab3..d8e2d7b3b83658 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -921,12 +921,12 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, ref_div_max = max(min(100 / post_div, ref_div_max), 1u); /* get matching reference and feedback divider */ - *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); + *ref_div = min(max(den/post_div, 1u), ref_div_max); *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den); /* limit fb divider to its maximum */ if (*fb_div > fb_div_max) { - *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div); + *ref_div = (*ref_div * fb_div_max)/(*fb_div); *fb_div = fb_div_max; } } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index f814d37b1db21d..00a06768edb201 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -442,6 +442,14 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev) return 0; } +static void rockchip_drm_platform_shutdown(struct platform_device *pdev) +{ + struct drm_device *drm = platform_get_drvdata(pdev); + + if (drm) + drm_atomic_helper_shutdown(drm); +} + static const struct of_device_id rockchip_drm_dt_ids[] = { { .compatible = "rockchip,display-subsystem", }, { /* sentinel */ }, @@ -451,6 +459,7 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids); static struct platform_driver rockchip_drm_platform_driver = { .probe = rockchip_drm_platform_probe, .remove = rockchip_drm_platform_remove, + .shutdown = rockchip_drm_platform_shutdown, .driver = { .name = "rockchip-drm", .of_match_table = rockchip_drm_dt_ids, diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index e3b34a3455460f..97a0573cc51459 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -357,7 +357,13 @@ static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi, static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi, struct drm_display_mode *mode) { - return mode->vtotal - (mode->vsync_end - mode->vdisplay) + 1; + u16 start = clamp(mode->vtotal - mode->vdisplay - 10, 8, 100); + u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + start; + + if (delay > mode->vtotal) + delay = delay % mode->vtotal; + + return max_t(u16, delay, 1); } static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi, diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c index a564b5dfe08283..dc9b1398adb91f 100644 --- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c +++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c @@ -177,7 +177,8 @@ static int sun8i_hdmi_phy_config_h3(struct dw_hdmi *hdmi, SUN8I_HDMI_PHY_ANA_CFG2_REG_BIGSW | SUN8I_HDMI_PHY_ANA_CFG2_REG_SLV(4); ana_cfg3_init |= SUN8I_HDMI_PHY_ANA_CFG3_REG_AMPCK(9) | - SUN8I_HDMI_PHY_ANA_CFG3_REG_AMP(13); + SUN8I_HDMI_PHY_ANA_CFG3_REG_AMP(13) | + SUN8I_HDMI_PHY_ANA_CFG3_REG_EMP(3); } regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG, @@ -501,22 +502,13 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node) goto err_put_clk_pll0; } } - - ret = sun8i_phy_clk_create(phy, dev, - phy->variant->has_second_pll); - if (ret) { - dev_err(dev, "Couldn't create the PHY clock\n"); - goto err_put_clk_pll1; - } - - clk_prepare_enable(phy->clk_phy); } phy->rst_phy = of_reset_control_get_shared(node, "phy"); if (IS_ERR(phy->rst_phy)) { dev_err(dev, "Could not get phy reset control\n"); ret = PTR_ERR(phy->rst_phy); - goto err_disable_clk_phy; + goto err_put_clk_pll1; } ret = reset_control_deassert(phy->rst_phy); @@ -537,18 +529,29 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node) goto err_disable_clk_bus; } + if (phy->variant->has_phy_clk) { + ret = sun8i_phy_clk_create(phy, dev, + phy->variant->has_second_pll); + if (ret) { + dev_err(dev, "Couldn't create the PHY clock\n"); + goto err_disable_clk_mod; + } + + clk_prepare_enable(phy->clk_phy); + } + hdmi->phy = phy; return 0; +err_disable_clk_mod: + clk_disable_unprepare(phy->clk_mod); err_disable_clk_bus: clk_disable_unprepare(phy->clk_bus); err_deassert_rst_phy: reset_control_assert(phy->rst_phy); err_put_rst_phy: reset_control_put(phy->rst_phy); -err_disable_clk_phy: - clk_disable_unprepare(phy->clk_phy); err_put_clk_pll1: clk_put(phy->clk_pll1); err_put_clk_pll0: diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index 4f80100ff5f34b..4cce11fd8836f9 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -204,7 +204,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) { if (bo->pages) { dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents, - DMA_BIDIRECTIONAL); + DMA_FROM_DEVICE); drm_gem_put_pages(&bo->gem, bo->pages, true, true); sg_free_table(bo->sgt); kfree(bo->sgt); @@ -230,7 +230,7 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) } err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents, - DMA_BIDIRECTIONAL); + DMA_FROM_DEVICE); if (err == 0) { err = -EFAULT; goto free_sgt; diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c index 455fefe012f591..6044a01069ce2d 100644 --- a/drivers/gpu/drm/tinydrm/ili9225.c +++ b/drivers/gpu/drm/tinydrm/ili9225.c @@ -278,7 +278,7 @@ static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe) mipi->enabled = false; } -static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par, +static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par, size_t num) { struct spi_device *spi = mipi->spi; @@ -288,11 +288,11 @@ static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par, gpiod_set_value_cansleep(mipi->dc, 0); speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1); - ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1); + ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1); if (ret || !num) return ret; - if (cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes) + if (*cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes) bpw = 16; gpiod_set_value_cansleep(mipi->dc, 1); diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c index cb3441e51d5f03..e772a8a9da80ec 100644 --- a/drivers/gpu/drm/tinydrm/mipi-dbi.c +++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c @@ -144,16 +144,42 @@ EXPORT_SYMBOL(mipi_dbi_command_read); */ int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len) { + u8 *cmdbuf; int ret; + /* SPI requires dma-safe buffers */ + cmdbuf = kmemdup(&cmd, 1, GFP_KERNEL); + if (!cmdbuf) + return -ENOMEM; + mutex_lock(&mipi->cmdlock); - ret = mipi->command(mipi, cmd, data, len); + ret = mipi->command(mipi, cmdbuf, data, len); mutex_unlock(&mipi->cmdlock); + kfree(cmdbuf); + return ret; } EXPORT_SYMBOL(mipi_dbi_command_buf); +/* This should only be used by mipi_dbi_command() */ +int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len) +{ + u8 *buf; + int ret; + + buf = kmemdup(data, len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = mipi_dbi_command_buf(mipi, cmd, buf, len); + + kfree(buf); + + return ret; +} +EXPORT_SYMBOL(mipi_dbi_command_stackbuf); + /** * mipi_dbi_buf_copy - Copy a framebuffer, transforming it if necessary * @dst: The destination buffer @@ -741,18 +767,18 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc, return 0; } -static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd, +static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 *cmd, u8 *parameters, size_t num) { - unsigned int bpw = (cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8; + unsigned int bpw = (*cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8; int ret; - if (mipi_dbi_command_is_read(mipi, cmd)) + if (mipi_dbi_command_is_read(mipi, *cmd)) return -ENOTSUPP; - MIPI_DBI_DEBUG_COMMAND(cmd, parameters, num); + MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num); - ret = mipi_dbi_spi1_transfer(mipi, 0, &cmd, 1, 8); + ret = mipi_dbi_spi1_transfer(mipi, 0, cmd, 1, 8); if (ret || !num) return ret; @@ -761,7 +787,7 @@ static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd, /* MIPI DBI Type C Option 3 */ -static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd, +static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 *cmd, u8 *data, size_t len) { struct spi_device *spi = mipi->spi; @@ -770,7 +796,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd, struct spi_transfer tr[2] = { { .speed_hz = speed_hz, - .tx_buf = &cmd, + .tx_buf = cmd, .len = 1, }, { .speed_hz = speed_hz, @@ -788,8 +814,8 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd, * Support non-standard 24-bit and 32-bit Nokia read commands which * start with a dummy clock, so we need to read an extra byte. */ - if (cmd == MIPI_DCS_GET_DISPLAY_ID || - cmd == MIPI_DCS_GET_DISPLAY_STATUS) { + if (*cmd == MIPI_DCS_GET_DISPLAY_ID || + *cmd == MIPI_DCS_GET_DISPLAY_STATUS) { if (!(len == 3 || len == 4)) return -EINVAL; @@ -819,7 +845,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd, data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7)); } - MIPI_DBI_DEBUG_COMMAND(cmd, data, len); + MIPI_DBI_DEBUG_COMMAND(*cmd, data, len); err_free: kfree(buf); @@ -827,7 +853,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd, return ret; } -static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd, +static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par, size_t num) { struct spi_device *spi = mipi->spi; @@ -835,18 +861,18 @@ static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd, u32 speed_hz; int ret; - if (mipi_dbi_command_is_read(mipi, cmd)) + if (mipi_dbi_command_is_read(mipi, *cmd)) return mipi_dbi_typec3_command_read(mipi, cmd, par, num); - MIPI_DBI_DEBUG_COMMAND(cmd, par, num); + MIPI_DBI_DEBUG_COMMAND(*cmd, par, num); gpiod_set_value_cansleep(mipi->dc, 0); speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1); - ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1); + ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1); if (ret || !num) return ret; - if (cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes) + if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes) bpw = 16; gpiod_set_value_cansleep(mipi->dc, 1); diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 2a85fa68ffea51..2a4c6187e675f7 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -305,14 +305,18 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) if (ret) goto dev_destroy; - v3d_irq_init(v3d); + ret = v3d_irq_init(v3d); + if (ret) + goto gem_destroy; ret = drm_dev_register(drm, 0); if (ret) - goto gem_destroy; + goto irq_disable; return 0; +irq_disable: + v3d_irq_disable(v3d); gem_destroy: v3d_gem_destroy(drm); dev_destroy: diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index e6fed696ad869e..0ad73f4b7509a0 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -284,7 +284,7 @@ void v3d_invalidate_caches(struct v3d_dev *v3d); void v3d_flush_caches(struct v3d_dev *v3d); /* v3d_irq.c */ -void v3d_irq_init(struct v3d_dev *v3d); +int v3d_irq_init(struct v3d_dev *v3d); void v3d_irq_enable(struct v3d_dev *v3d); void v3d_irq_disable(struct v3d_dev *v3d); void v3d_irq_reset(struct v3d_dev *v3d); diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index e07514eb11b511..22be0f2dff99c7 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -137,7 +137,7 @@ v3d_hub_irq(int irq, void *arg) return status; } -void +int v3d_irq_init(struct v3d_dev *v3d) { int ret, core; @@ -154,13 +154,22 @@ v3d_irq_init(struct v3d_dev *v3d) ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0), v3d_hub_irq, IRQF_SHARED, "v3d_hub", v3d); + if (ret) + goto fail; + ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1), v3d_irq, IRQF_SHARED, "v3d_core0", v3d); if (ret) - dev_err(v3d->dev, "IRQ setup failed: %d\n", ret); + goto fail; v3d_irq_enable(v3d); + return 0; + +fail: + if (ret != -EPROBE_DEFER) + dev_err(v3d->dev, "IRQ setup failed: %d\n", ret); + return ret; } void diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index ab39315c9078bb..39e608271263db 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -818,6 +818,7 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane, drm_atomic_set_fb_for_plane(plane->state, state->fb); } + swap(plane->state->fb, state->fb); /* Set the cursor's position on the screen. This is the * expected change from the drm_mode_cursor_universal() * helper. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index c72b942f2bdffe..82ae687166968a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1291,7 +1291,13 @@ static int vmw_master_set(struct drm_device *dev, } dev_priv->active_master = vmaster; - drm_sysfs_hotplug_event(dev); + + /* + * Inform a new master that the layout may have changed while + * it was gone. + */ + if (!from_open) + drm_sysfs_hotplug_event(dev); return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index c3e2022bda5d62..3834aa71c9c4c9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -2493,7 +2493,8 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, cmd = container_of(header, typeof(*cmd), header); - if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) { + if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX || + cmd->body.type < SVGA3D_SHADERTYPE_MIN) { DRM_ERROR("Illegal shader type %u.\n", (unsigned) cmd->body.type); return -EINVAL; @@ -2732,6 +2733,10 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, if (view_type == vmw_view_max) return -EINVAL; cmd = container_of(header, typeof(*cmd), header); + if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) { + DRM_ERROR("Invalid surface id.\n"); + return -EINVAL; + } ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, user_surface_converter, &cmd->sid, &srf_node); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 8b9270f314091a..e4e09d47c5c0e0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -136,6 +136,114 @@ static int vmw_close_channel(struct rpc_channel *channel) return 0; } +/** + * vmw_port_hb_out - Send the message payload either through the + * high-bandwidth port if available, or through the backdoor otherwise. + * @channel: The rpc channel. + * @msg: NULL-terminated message. + * @hb: Whether the high-bandwidth port is available. + * + * Return: The port status. + */ +static unsigned long vmw_port_hb_out(struct rpc_channel *channel, + const char *msg, bool hb) +{ + unsigned long si, di, eax, ebx, ecx, edx; + unsigned long msg_len = strlen(msg); + + if (hb) { + unsigned long bp = channel->cookie_high; + + si = (uintptr_t) msg; + di = channel->cookie_low; + + VMW_PORT_HB_OUT( + (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, + msg_len, si, di, + VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), + VMW_HYPERVISOR_MAGIC, bp, + eax, ebx, ecx, edx, si, di); + + return ebx; + } + + /* HB port not available. Send the message 4 bytes at a time. */ + ecx = MESSAGE_STATUS_SUCCESS << 16; + while (msg_len && (HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS)) { + unsigned int bytes = min_t(size_t, msg_len, 4); + unsigned long word = 0; + + memcpy(&word, msg, bytes); + msg_len -= bytes; + msg += bytes; + si = channel->cookie_high; + di = channel->cookie_low; + + VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_SENDPAYLOAD << 16), + word, si, di, + VMW_HYPERVISOR_PORT | (channel->channel_id << 16), + VMW_HYPERVISOR_MAGIC, + eax, ebx, ecx, edx, si, di); + } + + return ecx; +} + +/** + * vmw_port_hb_in - Receive the message payload either through the + * high-bandwidth port if available, or through the backdoor otherwise. + * @channel: The rpc channel. + * @reply: Pointer to buffer holding reply. + * @reply_len: Length of the reply. + * @hb: Whether the high-bandwidth port is available. + * + * Return: The port status. + */ +static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply, + unsigned long reply_len, bool hb) +{ + unsigned long si, di, eax, ebx, ecx, edx; + + if (hb) { + unsigned long bp = channel->cookie_low; + + si = channel->cookie_high; + di = (uintptr_t) reply; + + VMW_PORT_HB_IN( + (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, + reply_len, si, di, + VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), + VMW_HYPERVISOR_MAGIC, bp, + eax, ebx, ecx, edx, si, di); + + return ebx; + } + + /* HB port not available. Retrieve the message 4 bytes at a time. */ + ecx = MESSAGE_STATUS_SUCCESS << 16; + while (reply_len) { + unsigned int bytes = min_t(unsigned long, reply_len, 4); + + si = channel->cookie_high; + di = channel->cookie_low; + + VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_RECVPAYLOAD << 16), + MESSAGE_STATUS_SUCCESS, si, di, + VMW_HYPERVISOR_PORT | (channel->channel_id << 16), + VMW_HYPERVISOR_MAGIC, + eax, ebx, ecx, edx, si, di); + + if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) + break; + + memcpy(reply, &ebx, bytes); + reply_len -= bytes; + reply += bytes; + } + + return ecx; +} /** @@ -148,11 +256,10 @@ static int vmw_close_channel(struct rpc_channel *channel) */ static int vmw_send_msg(struct rpc_channel *channel, const char *msg) { - unsigned long eax, ebx, ecx, edx, si, di, bp; + unsigned long eax, ebx, ecx, edx, si, di; size_t msg_len = strlen(msg); int retries = 0; - while (retries < RETRIES) { retries++; @@ -166,23 +273,14 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg) VMW_HYPERVISOR_MAGIC, eax, ebx, ecx, edx, si, di); - if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 || - (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) { - /* Expected success + high-bandwidth. Give up. */ + if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { + /* Expected success. Give up. */ return -EINVAL; } /* Send msg */ - si = (uintptr_t) msg; - di = channel->cookie_low; - bp = channel->cookie_high; - - VMW_PORT_HB_OUT( - (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, - msg_len, si, di, - VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), - VMW_HYPERVISOR_MAGIC, bp, - eax, ebx, ecx, edx, si, di); + ebx = vmw_port_hb_out(channel, msg, + !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB)); if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) { return 0; @@ -211,7 +309,7 @@ STACK_FRAME_NON_STANDARD(vmw_send_msg); static int vmw_recv_msg(struct rpc_channel *channel, void **msg, size_t *msg_len) { - unsigned long eax, ebx, ecx, edx, si, di, bp; + unsigned long eax, ebx, ecx, edx, si, di; char *reply; size_t reply_len; int retries = 0; @@ -233,8 +331,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, VMW_HYPERVISOR_MAGIC, eax, ebx, ecx, edx, si, di); - if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 || - (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) { + if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { DRM_ERROR("Failed to get reply size for host message.\n"); return -EINVAL; } @@ -252,17 +349,8 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, /* Receive buffer */ - si = channel->cookie_high; - di = (uintptr_t) reply; - bp = channel->cookie_low; - - VMW_PORT_HB_IN( - (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, - reply_len, si, di, - VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), - VMW_HYPERVISOR_MAGIC, bp, - eax, ebx, ecx, edx, si, di); - + ebx = vmw_port_hb_in(channel, reply, reply_len, + !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB)); if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) { kfree(reply); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 44564f61e9cc3c..861375561156cd 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -215,13 +215,14 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type) * Add a usage to the temporary parser table. */ -static int hid_add_usage(struct hid_parser *parser, unsigned usage) +static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size) { if (parser->local.usage_index >= HID_MAX_USAGES) { hid_err(parser->device, "usage index exceeded\n"); return -1; } parser->local.usage[parser->local.usage_index] = usage; + parser->local.usage_size[parser->local.usage_index] = size; parser->local.collection_index[parser->local.usage_index] = parser->collection_stack_ptr ? parser->collection_stack[parser->collection_stack_ptr - 1] : 0; @@ -482,10 +483,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) return 0; } - if (item->size <= 2) - data = (parser->global.usage_page << 16) + data; - - return hid_add_usage(parser, data); + return hid_add_usage(parser, data, item->size); case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM: @@ -494,9 +492,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) return 0; } - if (item->size <= 2) - data = (parser->global.usage_page << 16) + data; - parser->local.usage_minimum = data; return 0; @@ -507,9 +502,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) return 0; } - if (item->size <= 2) - data = (parser->global.usage_page << 16) + data; - count = data - parser->local.usage_minimum; if (count + parser->local.usage_index >= HID_MAX_USAGES) { /* @@ -529,7 +521,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) } for (n = parser->local.usage_minimum; n <= data; n++) - if (hid_add_usage(parser, n)) { + if (hid_add_usage(parser, n, item->size)) { dbg_hid("hid_add_usage failed\n"); return -1; } @@ -543,6 +535,22 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) return 0; } +/* + * Concatenate Usage Pages into Usages where relevant: + * As per specification, 6.2.2.8: "When the parser encounters a main item it + * concatenates the last declared Usage Page with a Usage to form a complete + * usage value." + */ + +static void hid_concatenate_usage_page(struct hid_parser *parser) +{ + int i; + + for (i = 0; i < parser->local.usage_index; i++) + if (parser->local.usage_size[i] <= 2) + parser->local.usage[i] += parser->global.usage_page << 16; +} + /* * Process a main item. */ @@ -552,6 +560,8 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item) __u32 data; int ret; + hid_concatenate_usage_page(parser); + data = item_udata(item); switch (item->tag) { @@ -761,6 +771,8 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item) __u32 data; int i; + hid_concatenate_usage_page(parser); + data = item_udata(item); switch (item->tag) { diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 8425d3548a4141..e642cfaf303b49 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -725,13 +725,16 @@ static int hidpp_root_get_feature(struct hidpp_device *hidpp, u16 feature, static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp) { + const u8 ping_byte = 0x5a; + u8 ping_data[3] = { 0, 0, ping_byte }; struct hidpp_report response; int ret; - ret = hidpp_send_fap_command_sync(hidpp, + ret = hidpp_send_rap_command_sync(hidpp, + REPORT_ID_HIDPP_SHORT, HIDPP_PAGE_ROOT_IDX, CMD_ROOT_GET_PROTOCOL_VERSION, - NULL, 0, &response); + ping_data, sizeof(ping_data), &response); if (ret == HIDPP_ERROR_INVALID_SUBID) { hidpp->protocol_major = 1; @@ -751,8 +754,14 @@ static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp) if (ret) return ret; - hidpp->protocol_major = response.fap.params[0]; - hidpp->protocol_minor = response.fap.params[1]; + if (response.rap.params[2] != ping_byte) { + hid_err(hidpp->hid_dev, "%s: ping mismatch 0x%02x != 0x%02x\n", + __func__, response.rap.params[2], ping_byte); + return -EPROTO; + } + + hidpp->protocol_major = response.rap.params[0]; + hidpp->protocol_minor = response.rap.params[1]; return ret; } @@ -901,7 +910,11 @@ static int hidpp_map_battery_level(int capacity) { if (capacity < 11) return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; - else if (capacity < 31) + /* + * The spec says this should be < 31 but some devices report 30 + * with brand new batteries and Windows reports 30 as "Good". + */ + else if (capacity < 30) return POWER_SUPPLY_CAPACITY_LEVEL_LOW; else if (capacity < 81) return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 2faf5421fdd0c6..184e49036e1dce 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -641,6 +641,13 @@ static void mt_store_field(struct hid_device *hdev, if (*target != DEFAULT_TRUE && *target != DEFAULT_FALSE && *target != DEFAULT_ZERO) { + if (usage->contactid == DEFAULT_ZERO || + usage->x == DEFAULT_ZERO || + usage->y == DEFAULT_ZERO) { + hid_dbg(hdev, + "ignoring duplicate usage on incomplete"); + return; + } usage = mt_allocate_usage(hdev, application); if (!usage) return; diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 5dd3a8245f0fd5..d7c3f4ac2c045d 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -1234,13 +1234,13 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) /* Add back in missing bits of ID for non-USI pens */ wacom->id[0] |= (wacom->serial[0] >> 32) & 0xFFFFF; } - wacom->tool[0] = wacom_intuos_get_tool_type(wacom_intuos_id_mangle(wacom->id[0])); for (i = 0; i < pen_frames; i++) { unsigned char *frame = &data[i*pen_frame_len + 1]; bool valid = frame[0] & 0x80; bool prox = frame[0] & 0x40; bool range = frame[0] & 0x20; + bool invert = frame[0] & 0x10; if (!valid) continue; @@ -1249,9 +1249,24 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) wacom->shared->stylus_in_proximity = false; wacom_exit_report(wacom); input_sync(pen_input); + + wacom->tool[0] = 0; + wacom->id[0] = 0; + wacom->serial[0] = 0; return; } + if (range) { + if (!wacom->tool[0]) { /* first in range */ + /* Going into range select tool */ + if (invert) + wacom->tool[0] = BTN_TOOL_RUBBER; + else if (wacom->id[0]) + wacom->tool[0] = wacom_intuos_get_tool_type(wacom->id[0]); + else + wacom->tool[0] = BTN_TOOL_PEN; + } + input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1])); input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3])); @@ -1273,23 +1288,26 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) get_unaligned_le16(&frame[11])); } } - input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5])); - if (wacom->features.type == INTUOSP2_BT) { - input_report_abs(pen_input, ABS_DISTANCE, - range ? frame[13] : wacom->features.distance_max); - } else { - input_report_abs(pen_input, ABS_DISTANCE, - range ? frame[7] : wacom->features.distance_max); - } - input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x01); - input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02); - input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04); + if (wacom->tool[0]) { + input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5])); + if (wacom->features.type == INTUOSP2_BT) { + input_report_abs(pen_input, ABS_DISTANCE, + range ? frame[13] : wacom->features.distance_max); + } else { + input_report_abs(pen_input, ABS_DISTANCE, + range ? frame[7] : wacom->features.distance_max); + } - input_report_key(pen_input, wacom->tool[0], prox); - input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]); - input_report_abs(pen_input, ABS_MISC, - wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */ + input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x09); + input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02); + input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04); + + input_report_key(pen_input, wacom->tool[0], prox); + input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]); + input_report_abs(pen_input, ABS_MISC, + wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */ + } wacom->shared->stylus_in_proximity = prox; @@ -1351,11 +1369,17 @@ static void wacom_intuos_pro2_bt_touch(struct wacom_wac *wacom) if (wacom->num_contacts_left <= 0) { wacom->num_contacts_left = 0; wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom); + input_sync(touch_input); } } - input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7)); - input_sync(touch_input); + if (wacom->num_contacts_left == 0) { + // Be careful that we don't accidentally call input_sync with + // only a partial set of fingers of processed + input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7)); + input_sync(touch_input); + } + } static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom) @@ -1363,7 +1387,7 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom) struct input_dev *pad_input = wacom->pad_input; unsigned char *data = wacom->data; - int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01); + int buttons = data[282] | ((data[281] & 0x40) << 2); int ring = data[285] & 0x7F; bool ringstatus = data[285] & 0x80; bool prox = buttons || ringstatus; @@ -3832,7 +3856,7 @@ static void wacom_24hd_update_leds(struct wacom *wacom, int mask, int group) static bool wacom_is_led_toggled(struct wacom *wacom, int button_count, int mask, int group) { - int button_per_group; + int group_button; /* * 21UX2 has LED group 1 to the left and LED group 0 @@ -3842,9 +3866,12 @@ static bool wacom_is_led_toggled(struct wacom *wacom, int button_count, if (wacom->wacom_wac.features.type == WACOM_21UX2) group = 1 - group; - button_per_group = button_count/wacom->led.count; + group_button = group * (button_count/wacom->led.count); + + if (wacom->wacom_wac.features.type == INTUOSP2_BT) + group_button = 8; - return mask & (1 << (group * button_per_group)); + return mask & (1 << group_button); } static void wacom_update_led(struct wacom *wacom, int button_count, int mask, diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c index 73c681162653bf..623736d2a7c1d7 100644 --- a/drivers/hwmon/f71805f.c +++ b/drivers/hwmon/f71805f.c @@ -96,17 +96,23 @@ superio_select(int base, int ld) outb(ld, base + 1); } -static inline void +static inline int superio_enter(int base) { + if (!request_muxed_region(base, 2, DRVNAME)) + return -EBUSY; + outb(0x87, base); outb(0x87, base); + + return 0; } static inline void superio_exit(int base) { outb(0xaa, base); + release_region(base, 2); } /* @@ -1561,7 +1567,7 @@ static int __init f71805f_device_add(unsigned short address, static int __init f71805f_find(int sioaddr, unsigned short *address, struct f71805f_sio_data *sio_data) { - int err = -ENODEV; + int err; u16 devid; static const char * const names[] = { @@ -1569,8 +1575,11 @@ static int __init f71805f_find(int sioaddr, unsigned short *address, "F71872F/FG or F71806F/FG", }; - superio_enter(sioaddr); + err = superio_enter(sioaddr); + if (err) + return err; + err = -ENODEV; devid = superio_inw(sioaddr, SIO_REG_MANID); if (devid != SIO_FINTEK_ID) goto exit; diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index fcdbac4a56e390..6b3559f58b67d8 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -619,7 +619,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, if (err) goto free_hwmon; - if (dev && chip && chip->ops->read && + if (dev && dev->of_node && chip && chip->ops->read && chip->info[0]->type == hwmon_chip && (chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) { const struct hwmon_channel_info **info = chip->info; diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c index dc5a9d5ada516c..81a05cd1a5121a 100644 --- a/drivers/hwmon/pc87427.c +++ b/drivers/hwmon/pc87427.c @@ -106,6 +106,13 @@ static const char *logdev_str[2] = { DRVNAME " FMC", DRVNAME " HMC" }; #define LD_IN 1 #define LD_TEMP 1 +static inline int superio_enter(int sioaddr) +{ + if (!request_muxed_region(sioaddr, 2, DRVNAME)) + return -EBUSY; + return 0; +} + static inline void superio_outb(int sioaddr, int reg, int val) { outb(reg, sioaddr); @@ -122,6 +129,7 @@ static inline void superio_exit(int sioaddr) { outb(0x02, sioaddr); outb(0x02, sioaddr + 1); + release_region(sioaddr, 2); } /* @@ -1220,7 +1228,11 @@ static int __init pc87427_find(int sioaddr, struct pc87427_sio_data *sio_data) { u16 val; u8 cfg, cfg_b; - int i, err = 0; + int i, err; + + err = superio_enter(sioaddr); + if (err) + return err; /* Identify device */ val = force_id ? force_id : superio_inb(sioaddr, SIOREG_DEVID); diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 2e2b5851139c2f..cd24b375df1eea 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -1230,7 +1230,8 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client, const struct pmbus_driver_info *info, const char *name, int index, int page, - const struct pmbus_sensor_attr *attr) + const struct pmbus_sensor_attr *attr, + bool paged) { struct pmbus_sensor *base; bool upper = !!(attr->gbit & 0xff00); /* need to check STATUS_WORD */ @@ -1238,7 +1239,7 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client, if (attr->label) { ret = pmbus_add_label(data, name, index, attr->label, - attr->paged ? page + 1 : 0); + paged ? page + 1 : 0); if (ret) return ret; } @@ -1271,6 +1272,30 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client, return 0; } +static bool pmbus_sensor_is_paged(const struct pmbus_driver_info *info, + const struct pmbus_sensor_attr *attr) +{ + int p; + + if (attr->paged) + return true; + + /* + * Some attributes may be present on more than one page despite + * not being marked with the paged attribute. If that is the case, + * then treat the sensor as being paged and add the page suffix to the + * attribute name. + * We don't just add the paged attribute to all such attributes, in + * order to maintain the un-suffixed labels in the case where the + * attribute is only on page 0. + */ + for (p = 1; p < info->pages; p++) { + if (info->func[p] & attr->func) + return true; + } + return false; +} + static int pmbus_add_sensor_attrs(struct i2c_client *client, struct pmbus_data *data, const char *name, @@ -1284,14 +1309,15 @@ static int pmbus_add_sensor_attrs(struct i2c_client *client, index = 1; for (i = 0; i < nattrs; i++) { int page, pages; + bool paged = pmbus_sensor_is_paged(info, attrs); - pages = attrs->paged ? info->pages : 1; + pages = paged ? info->pages : 1; for (page = 0; page < pages; page++) { if (!(info->func[page] & attrs->func)) continue; ret = pmbus_add_sensor_attrs_one(client, data, info, name, index, page, - attrs); + attrs, paged); if (ret) return ret; index++; diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c index 6bd2007565603e..cbdb5c4991ae31 100644 --- a/drivers/hwmon/smsc47b397.c +++ b/drivers/hwmon/smsc47b397.c @@ -72,14 +72,19 @@ static inline void superio_select(int ld) superio_outb(0x07, ld); } -static inline void superio_enter(void) +static inline int superio_enter(void) { + if (!request_muxed_region(REG, 2, DRVNAME)) + return -EBUSY; + outb(0x55, REG); + return 0; } static inline void superio_exit(void) { outb(0xAA, REG); + release_region(REG, 2); } #define SUPERIO_REG_DEVID 0x20 @@ -300,8 +305,12 @@ static int __init smsc47b397_find(void) u8 id, rev; char *name; unsigned short addr; + int err; + + err = superio_enter(); + if (err) + return err; - superio_enter(); id = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID); switch (id) { diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c index c7b6a425e2c027..5eeac9853d0ae8 100644 --- a/drivers/hwmon/smsc47m1.c +++ b/drivers/hwmon/smsc47m1.c @@ -73,16 +73,21 @@ superio_inb(int reg) /* logical device for fans is 0x0A */ #define superio_select() superio_outb(0x07, 0x0A) -static inline void +static inline int superio_enter(void) { + if (!request_muxed_region(REG, 2, DRVNAME)) + return -EBUSY; + outb(0x55, REG); + return 0; } static inline void superio_exit(void) { outb(0xAA, REG); + release_region(REG, 2); } #define SUPERIO_REG_ACT 0x30 @@ -531,8 +536,12 @@ static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data) { u8 val; unsigned short addr; + int err; + + err = superio_enter(); + if (err) + return err; - superio_enter(); val = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID); /* @@ -608,13 +617,14 @@ static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data) static void smsc47m1_restore(const struct smsc47m1_sio_data *sio_data) { if ((sio_data->activate & 0x01) == 0) { - superio_enter(); - superio_select(); - - pr_info("Disabling device\n"); - superio_outb(SUPERIO_REG_ACT, sio_data->activate); - - superio_exit(); + if (!superio_enter()) { + superio_select(); + pr_info("Disabling device\n"); + superio_outb(SUPERIO_REG_ACT, sio_data->activate); + superio_exit(); + } else { + pr_warn("Failed to disable device\n"); + } } } diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c index 3a6bfa51cb94f6..95d5e8ec8b7fcb 100644 --- a/drivers/hwmon/vt1211.c +++ b/drivers/hwmon/vt1211.c @@ -226,15 +226,21 @@ static inline void superio_select(int sio_cip, int ldn) outb(ldn, sio_cip + 1); } -static inline void superio_enter(int sio_cip) +static inline int superio_enter(int sio_cip) { + if (!request_muxed_region(sio_cip, 2, DRVNAME)) + return -EBUSY; + outb(0x87, sio_cip); outb(0x87, sio_cip); + + return 0; } static inline void superio_exit(int sio_cip) { outb(0xaa, sio_cip); + release_region(sio_cip, 2); } /* --------------------------------------------------------------------- @@ -1282,11 +1288,14 @@ static int __init vt1211_device_add(unsigned short address) static int __init vt1211_find(int sio_cip, unsigned short *address) { - int err = -ENODEV; + int err; int devid; - superio_enter(sio_cip); + err = superio_enter(sio_cip); + if (err) + return err; + err = -ENODEV; devid = force_id ? force_id : superio_inb(sio_cip, SIO_VT1211_DEVID); if (devid != SIO_VT1211_ID) goto EXIT; diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index ba7aaf421f36c4..8ff326c0c406c9 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -84,6 +84,7 @@ struct msc_iter { * @reg_base: register window base address * @thdev: intel_th_device pointer * @win_list: list of windows in multiblock mode + * @single_sgt: single mode buffer * @nr_pages: total number of pages allocated for this buffer * @single_sz: amount of data in single mode * @single_wrap: single mode wrap occurred @@ -104,6 +105,7 @@ struct msc { struct intel_th_device *thdev; struct list_head win_list; + struct sg_table single_sgt; unsigned long nr_pages; unsigned long single_sz; unsigned int single_wrap : 1; @@ -617,22 +619,45 @@ static void intel_th_msc_deactivate(struct intel_th_device *thdev) */ static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size) { + unsigned long nr_pages = size >> PAGE_SHIFT; unsigned int order = get_order(size); struct page *page; + int ret; if (!size) return 0; + ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL); + if (ret) + goto err_out; + + ret = -ENOMEM; page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); if (!page) - return -ENOMEM; + goto err_free_sgt; split_page(page, order); - msc->nr_pages = size >> PAGE_SHIFT; + sg_set_buf(msc->single_sgt.sgl, page_address(page), size); + + ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1, + DMA_FROM_DEVICE); + if (ret < 0) + goto err_free_pages; + + msc->nr_pages = nr_pages; msc->base = page_address(page); - msc->base_addr = page_to_phys(page); + msc->base_addr = sg_dma_address(msc->single_sgt.sgl); return 0; + +err_free_pages: + __free_pages(page, order); + +err_free_sgt: + sg_free_table(&msc->single_sgt); + +err_out: + return ret; } /** @@ -643,6 +668,10 @@ static void msc_buffer_contig_free(struct msc *msc) { unsigned long off; + dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, + 1, DMA_FROM_DEVICE); + sg_free_table(&msc->single_sgt); + for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) { struct page *page = virt_to_page(msc->base + off); diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index 9d55e104400c86..9ec9197edffaf6 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c @@ -166,11 +166,10 @@ stm_master(struct stm_device *stm, unsigned int idx) static int stp_master_alloc(struct stm_device *stm, unsigned int idx) { struct stp_master *master; - size_t size; - size = ALIGN(stm->data->sw_nchannels, 8) / 8; - size += sizeof(struct stp_master); - master = kzalloc(size, GFP_ATOMIC); + master = kzalloc(struct_size(master, chan_map, + BITS_TO_LONGS(stm->data->sw_nchannels)), + GFP_ATOMIC); if (!master) return -ENOMEM; @@ -218,8 +217,8 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output) bitmap_release_region(&master->chan_map[0], output->channel, ilog2(output->nr_chans)); - output->nr_chans = 0; master->nr_free += output->nr_chans; + output->nr_chans = 0; } /* diff --git a/drivers/i2c/busses/i2c-acorn.c b/drivers/i2c/busses/i2c-acorn.c index f4a5ae69bf6a48..fa3763e4b3ee26 100644 --- a/drivers/i2c/busses/i2c-acorn.c +++ b/drivers/i2c/busses/i2c-acorn.c @@ -81,6 +81,7 @@ static struct i2c_algo_bit_data ioc_data = { static struct i2c_adapter ioc_ops = { .nr = 0, + .name = "ioc", .algo_data = &ioc_data, }; diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c index 745ed43a22d65c..2fd717d8dd30ec 100644 --- a/drivers/i2c/busses/i2c-mlxcpld.c +++ b/drivers/i2c/busses/i2c-mlxcpld.c @@ -503,6 +503,7 @@ static int mlxcpld_i2c_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); priv->dev = &pdev->dev; + priv->base_addr = MLXPLAT_CPLD_LPC_I2C_BASE_ADDR; /* Register with i2c layer */ mlxcpld_i2c_adapter.timeout = usecs_to_jiffies(MLXCPLD_I2C_XFER_TO); @@ -518,7 +519,6 @@ static int mlxcpld_i2c_probe(struct platform_device *pdev) mlxcpld_i2c_adapter.nr = pdev->id; priv->adap = mlxcpld_i2c_adapter; priv->adap.dev.parent = &pdev->dev; - priv->base_addr = MLXPLAT_CPLD_LPC_I2C_BASE_ADDR; i2c_set_adapdata(&priv->adap, priv); err = i2c_add_numbered_adapter(&priv->adap); diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c index e6c554e6ba588e..e47380b96b1d78 100644 --- a/drivers/i2c/busses/i2c-synquacer.c +++ b/drivers/i2c/busses/i2c-synquacer.c @@ -356,7 +356,7 @@ static int synquacer_i2c_doxfer(struct synquacer_i2c *i2c, /* wait 2 clock periods to ensure the stop has been through the bus */ udelay(DIV_ROUND_UP(2 * 1000, i2c->speed_khz)); - return 0; + return ret; } static irqreturn_t synquacer_i2c_isr(int irq, void *dev_id) diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 0c51c0ffdda9d9..8d6b6eeef71c47 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -718,11 +718,16 @@ static const struct i2c_algorithm xiic_algorithm = { .functionality = xiic_func, }; +static const struct i2c_adapter_quirks xiic_quirks = { + .max_read_len = 255, +}; + static const struct i2c_adapter xiic_adapter = { .owner = THIS_MODULE, .name = DRIVER_NAME, .class = I2C_CLASS_DEPRECATED, .algo = &xiic_algorithm, + .quirks = &xiic_quirks, }; diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index ccd76c71af098d..cb07651f4b467f 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -283,6 +283,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client, msgs[i].len < 1 || msgs[i].buf[0] < 1 || msgs[i].len < msgs[i].buf[0] + I2C_SMBUS_BLOCK_MAX) { + i++; res = -EINVAL; break; } diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 4a754921fb6f9d..9421c1ec86f735 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -696,6 +696,7 @@ config STM32_DFSDM_ADC depends on (ARCH_STM32 && OF) || COMPILE_TEST select STM32_DFSDM_CORE select REGMAP_MMIO + select IIO_BUFFER select IIO_BUFFER_HW_CONSUMER help Select this option to support ADCSigma delta modulator for diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index ae2a5097f44934..25af4c76b57fe1 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c @@ -62,7 +62,7 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg, struct spi_transfer t = { .tx_buf = data, .len = size + 1, - .cs_change = sigma_delta->bus_locked, + .cs_change = sigma_delta->keep_cs_asserted, }; struct spi_message m; int ret; @@ -218,6 +218,7 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta, spi_bus_lock(sigma_delta->spi->master); sigma_delta->bus_locked = true; + sigma_delta->keep_cs_asserted = true; reinit_completion(&sigma_delta->completion); ret = ad_sigma_delta_set_mode(sigma_delta, mode); @@ -235,9 +236,10 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta, ret = 0; } out: + sigma_delta->keep_cs_asserted = false; + ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE); sigma_delta->bus_locked = false; spi_bus_unlock(sigma_delta->spi->master); - ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE); return ret; } @@ -289,6 +291,7 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev, spi_bus_lock(sigma_delta->spi->master); sigma_delta->bus_locked = true; + sigma_delta->keep_cs_asserted = true; reinit_completion(&sigma_delta->completion); ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_SINGLE); @@ -298,9 +301,6 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev, ret = wait_for_completion_interruptible_timeout( &sigma_delta->completion, HZ); - sigma_delta->bus_locked = false; - spi_bus_unlock(sigma_delta->spi->master); - if (ret == 0) ret = -EIO; if (ret < 0) @@ -316,7 +316,10 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev, sigma_delta->irq_dis = true; } + sigma_delta->keep_cs_asserted = false; ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE); + sigma_delta->bus_locked = false; + spi_bus_unlock(sigma_delta->spi->master); mutex_unlock(&indio_dev->mlock); if (ret) @@ -353,6 +356,8 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev) spi_bus_lock(sigma_delta->spi->master); sigma_delta->bus_locked = true; + sigma_delta->keep_cs_asserted = true; + ret = ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_CONTINUOUS); if (ret) goto err_unlock; @@ -381,6 +386,7 @@ static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev) sigma_delta->irq_dis = true; } + sigma_delta->keep_cs_asserted = false; ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE); sigma_delta->bus_locked = false; diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c index a5bd5944bc6608..c9cd7e5c1b6148 100644 --- a/drivers/iio/adc/ti-ads7950.c +++ b/drivers/iio/adc/ti-ads7950.c @@ -56,6 +56,9 @@ struct ti_ads7950_state { struct spi_message ring_msg; struct spi_message scan_single_msg; + /* Lock to protect the spi xfer buffers */ + struct mutex slock; + struct regulator *reg; unsigned int vref_mv; @@ -277,6 +280,7 @@ static irqreturn_t ti_ads7950_trigger_handler(int irq, void *p) struct ti_ads7950_state *st = iio_priv(indio_dev); int ret; + mutex_lock(&st->slock); ret = spi_sync(st->spi, &st->ring_msg); if (ret < 0) goto out; @@ -285,6 +289,7 @@ static irqreturn_t ti_ads7950_trigger_handler(int irq, void *p) iio_get_time_ns(indio_dev)); out: + mutex_unlock(&st->slock); iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; @@ -295,7 +300,7 @@ static int ti_ads7950_scan_direct(struct iio_dev *indio_dev, unsigned int ch) struct ti_ads7950_state *st = iio_priv(indio_dev); int ret, cmd; - mutex_lock(&indio_dev->mlock); + mutex_lock(&st->slock); cmd = TI_ADS7950_CR_WRITE | TI_ADS7950_CR_CHAN(ch) | st->settings; st->single_tx = cpu_to_be16(cmd); @@ -307,7 +312,7 @@ static int ti_ads7950_scan_direct(struct iio_dev *indio_dev, unsigned int ch) ret = be16_to_cpu(st->single_rx); out: - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&st->slock); return ret; } @@ -423,16 +428,19 @@ static int ti_ads7950_probe(struct spi_device *spi) if (ACPI_COMPANION(&spi->dev)) st->vref_mv = TI_ADS7950_VA_MV_ACPI_DEFAULT; + mutex_init(&st->slock); + st->reg = devm_regulator_get(&spi->dev, "vref"); if (IS_ERR(st->reg)) { dev_err(&spi->dev, "Failed get get regulator \"vref\"\n"); - return PTR_ERR(st->reg); + ret = PTR_ERR(st->reg); + goto error_destroy_mutex; } ret = regulator_enable(st->reg); if (ret) { dev_err(&spi->dev, "Failed to enable regulator \"vref\"\n"); - return ret; + goto error_destroy_mutex; } ret = iio_triggered_buffer_setup(indio_dev, NULL, @@ -454,6 +462,8 @@ static int ti_ads7950_probe(struct spi_device *spi) iio_triggered_buffer_cleanup(indio_dev); error_disable_reg: regulator_disable(st->reg); +error_destroy_mutex: + mutex_destroy(&st->slock); return ret; } @@ -466,6 +476,7 @@ static int ti_ads7950_remove(struct spi_device *spi) iio_device_unregister(indio_dev); iio_triggered_buffer_cleanup(indio_dev); regulator_disable(st->reg); + mutex_destroy(&st->slock); return 0; } diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c index 8b4568edd5cb6e..7f16c77b99fb4a 100644 --- a/drivers/iio/adc/ti-ads8688.c +++ b/drivers/iio/adc/ti-ads8688.c @@ -397,7 +397,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p) } iio_push_to_buffers_with_timestamp(indio_dev, buffer, - pf->timestamp); + iio_get_time_ns(indio_dev)); iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/common/ssp_sensors/ssp_iio.c b/drivers/iio/common/ssp_sensors/ssp_iio.c index 645f2e3975db45..e38f704d88b7e7 100644 --- a/drivers/iio/common/ssp_sensors/ssp_iio.c +++ b/drivers/iio/common/ssp_sensors/ssp_iio.c @@ -81,7 +81,7 @@ int ssp_common_process_data(struct iio_dev *indio_dev, void *buf, unsigned int len, int64_t timestamp) { __le32 time; - int64_t calculated_time; + int64_t calculated_time = 0; struct ssp_sensor_data *spd = iio_priv(indio_dev); if (indio_dev->scan_bytes == 0) diff --git a/drivers/iio/dac/ds4424.c b/drivers/iio/dac/ds4424.c index 883a475620550d..714a97f9131997 100644 --- a/drivers/iio/dac/ds4424.c +++ b/drivers/iio/dac/ds4424.c @@ -166,7 +166,7 @@ static int ds4424_verify_chip(struct iio_dev *indio_dev) { int ret, val; - ret = ds4424_get_value(indio_dev, &val, DS4424_DAC_ADDR(0)); + ret = ds4424_get_value(indio_dev, &val, 0); if (ret < 0) dev_err(&indio_dev->dev, "%s failed. ret: %d\n", __func__, ret); diff --git a/drivers/iio/magnetometer/hmc5843_i2c.c b/drivers/iio/magnetometer/hmc5843_i2c.c index 3de7f4426ac409..86abba5827a257 100644 --- a/drivers/iio/magnetometer/hmc5843_i2c.c +++ b/drivers/iio/magnetometer/hmc5843_i2c.c @@ -58,8 +58,13 @@ static const struct regmap_config hmc5843_i2c_regmap_config = { static int hmc5843_i2c_probe(struct i2c_client *cli, const struct i2c_device_id *id) { + struct regmap *regmap = devm_regmap_init_i2c(cli, + &hmc5843_i2c_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + return hmc5843_common_probe(&cli->dev, - devm_regmap_init_i2c(cli, &hmc5843_i2c_regmap_config), + regmap, id->driver_data, id->name); } diff --git a/drivers/iio/magnetometer/hmc5843_spi.c b/drivers/iio/magnetometer/hmc5843_spi.c index 535f03a70d630f..79b2b707f90e70 100644 --- a/drivers/iio/magnetometer/hmc5843_spi.c +++ b/drivers/iio/magnetometer/hmc5843_spi.c @@ -58,6 +58,7 @@ static const struct regmap_config hmc5843_spi_regmap_config = { static int hmc5843_spi_probe(struct spi_device *spi) { int ret; + struct regmap *regmap; const struct spi_device_id *id = spi_get_device_id(spi); spi->mode = SPI_MODE_3; @@ -67,8 +68,12 @@ static int hmc5843_spi_probe(struct spi_device *spi) if (ret) return ret; + regmap = devm_regmap_init_spi(spi, &hmc5843_spi_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + return hmc5843_common_probe(&spi->dev, - devm_regmap_init_spi(spi, &hmc5843_spi_regmap_config), + regmap, id->driver_data, id->name); } diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c index 9851311aa3fdc9..2d54d9cac61dc0 100644 --- a/drivers/iio/temperature/mlx90632.c +++ b/drivers/iio/temperature/mlx90632.c @@ -81,6 +81,8 @@ /* Magic constants */ #define MLX90632_ID_MEDICAL 0x0105 /* EEPROM DSPv5 Medical device id */ #define MLX90632_ID_CONSUMER 0x0205 /* EEPROM DSPv5 Consumer device id */ +#define MLX90632_DSP_VERSION 5 /* DSP version */ +#define MLX90632_DSP_MASK GENMASK(7, 0) /* DSP version in EE_VERSION */ #define MLX90632_RESET_CMD 0x0006 /* Reset sensor (address or global) */ #define MLX90632_REF_12 12LL /**< ResCtrlRef value of Ch 1 or Ch 2 */ #define MLX90632_REF_3 12LL /**< ResCtrlRef value of Channel 3 */ @@ -666,10 +668,13 @@ static int mlx90632_probe(struct i2c_client *client, } else if (read == MLX90632_ID_CONSUMER) { dev_dbg(&client->dev, "Detected Consumer EEPROM calibration %x\n", read); + } else if ((read & MLX90632_DSP_MASK) == MLX90632_DSP_VERSION) { + dev_dbg(&client->dev, + "Detected Unknown EEPROM calibration %x\n", read); } else { dev_err(&client->dev, - "EEPROM version mismatch %x (expected %x or %x)\n", - read, MLX90632_ID_CONSUMER, MLX90632_ID_MEDICAL); + "Wrong DSP version %x (expected %x)\n", + read, MLX90632_DSP_VERSION); return -EPROTONOSUPPORT; } diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 6f5be780247621..39dc7be56884ab 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1078,18 +1078,31 @@ static inline bool cma_any_addr(const struct sockaddr *addr) return cma_zero_addr(addr) || cma_loopback_addr(addr); } -static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst) +static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst) { if (src->sa_family != dst->sa_family) return -1; switch (src->sa_family) { case AF_INET: - return ((struct sockaddr_in *) src)->sin_addr.s_addr != - ((struct sockaddr_in *) dst)->sin_addr.s_addr; - case AF_INET6: - return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr, - &((struct sockaddr_in6 *) dst)->sin6_addr); + return ((struct sockaddr_in *)src)->sin_addr.s_addr != + ((struct sockaddr_in *)dst)->sin_addr.s_addr; + case AF_INET6: { + struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src; + struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst; + bool link_local; + + if (ipv6_addr_cmp(&src_addr6->sin6_addr, + &dst_addr6->sin6_addr)) + return 1; + link_local = ipv6_addr_type(&dst_addr6->sin6_addr) & + IPV6_ADDR_LINKLOCAL; + /* Link local must match their scope_ids */ + return link_local ? (src_addr6->sin6_scope_id != + dst_addr6->sin6_scope_id) : + 0; + } + default: return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr, &((struct sockaddr_ib *) dst)->sib_addr); diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index a68569ec86bf97..3be6405d9855ec 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -458,6 +458,8 @@ static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) skb_reset_transport_header(skb); } else { skb = alloc_skb(len, gfp); + if (!skb) + return NULL; } t4_set_arp_err_handler(skb, NULL, NULL); return skb; diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index b12c8ff8ed6669..d8eb4dc04d6903 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -9849,6 +9849,7 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd) /* disable the port */ clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); + cancel_work_sync(&ppd->freeze_work); } static inline int init_cpu_counters(struct hfi1_devdata *dd) diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c index e2290f32c8d90c..7eaff4dcbfd77d 100644 --- a/drivers/infiniband/hw/hfi1/fault.c +++ b/drivers/infiniband/hw/hfi1/fault.c @@ -153,6 +153,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf, char *dash; unsigned long range_start, range_end, i; bool remove = false; + unsigned long bound = 1U << BITS_PER_BYTE; end = strchr(ptr, ','); if (end) @@ -178,6 +179,10 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf, BITS_PER_BYTE); break; } + /* Check the inputs */ + if (range_start >= bound || range_end >= bound) + break; + for (i = range_start; i <= range_end; i++) { if (remove) clear_bit(i, fault->opcodes); diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index da786eb18558d6..368f4f08b68666 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -798,7 +798,8 @@ static int create_workqueues(struct hfi1_devdata *dd) ppd->hfi1_wq = alloc_workqueue( "hfi%d_%d", - WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE, + WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE | + WQ_MEM_RECLAIM, HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES, dd->unit, pidx); if (!ppd->hfi1_wq) diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c index dbe7d14a5c76d1..4e986ca4dd3544 100644 --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c @@ -324,6 +324,9 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd, u32 *tidlist = NULL; struct tid_user_buf *tidbuf; + if (!PAGE_ALIGNED(tinfo->vaddr)) + return -EINVAL; + tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL); if (!tidbuf) return -ENOMEM; diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 48692adbe811ee..27d9c4cefdc7a5 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -1418,8 +1418,6 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd) rdi->dparms.props.max_cq = hfi1_max_cqs; rdi->dparms.props.max_ah = hfi1_max_ahs; rdi->dparms.props.max_cqe = hfi1_max_cqes; - rdi->dparms.props.max_mr = rdi->lkey_table.max; - rdi->dparms.props.max_fmr = rdi->lkey_table.max; rdi->dparms.props.max_map_per_fmr = 32767; rdi->dparms.props.max_pd = hfi1_max_pds; rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC; diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c index c4ab2d5b4502ee..8f766dd3f61c89 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.c +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c @@ -100,7 +100,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { struct hfi1_qp_priv *priv; - tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC); + tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP); if (tx) goto out; priv = qp->priv; diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h index 1c19bbc764b2d6..b1a78985b4ecad 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.h +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h @@ -72,6 +72,7 @@ struct hfi1_ibdev; struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, struct rvt_qp *qp); +#define VERBS_TXREQ_GFP (GFP_ATOMIC | __GFP_NOWARN) static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, struct rvt_qp *qp) __must_hold(&qp->slock) @@ -79,7 +80,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, struct verbs_txreq *tx; struct hfi1_qp_priv *priv = qp->priv; - tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC); + tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP); if (unlikely(!tx)) { /* call slow path to get the lock */ tx = __get_txreq(dev, qp); diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index 0d96c5bb38cdf5..d2d4ab9ab071ca 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c @@ -66,7 +66,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, HNS_ROCE_VLAN_SL_BIT_MASK) << HNS_ROCE_VLAN_SL_SHIFT; - ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn | + ah->av.port_pd = cpu_to_le32(to_hr_pd(ibpd)->pdn | (rdma_ah_get_port_num(ah_attr) << HNS_ROCE_PORT_NUM_SHIFT)); ah->av.gid_index = grh->sgid_index; diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 41babbc0db583c..803c3544c75b5f 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -1495,8 +1495,6 @@ static void qib_fill_device_attr(struct qib_devdata *dd) rdi->dparms.props.max_cq = ib_qib_max_cqs; rdi->dparms.props.max_cqe = ib_qib_max_cqes; rdi->dparms.props.max_ah = ib_qib_max_ahs; - rdi->dparms.props.max_mr = rdi->lkey_table.max; - rdi->dparms.props.max_fmr = rdi->lkey_table.max; rdi->dparms.props.max_map_per_fmr = 32767; rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC; rdi->dparms.props.max_qp_init_rd_atom = 255; diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c index 5819c9d6ffdc8a..39d101df229d15 100644 --- a/drivers/infiniband/sw/rdmavt/mr.c +++ b/drivers/infiniband/sw/rdmavt/mr.c @@ -96,6 +96,8 @@ int rvt_driver_mr_init(struct rvt_dev_info *rdi) for (i = 0; i < rdi->lkey_table.max; i++) RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL); + rdi->dparms.props.max_mr = rdi->lkey_table.max; + rdi->dparms.props.max_fmr = rdi->lkey_table.max; return 0; } diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 5ce403c6cddba4..7d03680afd9145 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -412,7 +412,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, offset = qpt->incr | ((offset & 1) ^ 1); } /* there can be no set bits in low-order QoS bits */ - WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1)); + WARN_ON(rdi->dparms.qos_shift > 1 && + offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1)); qpn = mk_qpn(qpt, map, offset); } diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 26ec603fe22085..83d1499fe02152 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -1051,13 +1051,31 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg) #ifdef CONFIG_COMPAT -#define UI_SET_PHYS_COMPAT _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t) +/* + * These IOCTLs change their size and thus their numbers between + * 32 and 64 bits. + */ +#define UI_SET_PHYS_COMPAT \ + _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t) +#define UI_BEGIN_FF_UPLOAD_COMPAT \ + _IOWR(UINPUT_IOCTL_BASE, 200, struct uinput_ff_upload_compat) +#define UI_END_FF_UPLOAD_COMPAT \ + _IOW(UINPUT_IOCTL_BASE, 201, struct uinput_ff_upload_compat) static long uinput_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - if (cmd == UI_SET_PHYS_COMPAT) + switch (cmd) { + case UI_SET_PHYS_COMPAT: cmd = UI_SET_PHYS; + break; + case UI_BEGIN_FF_UPLOAD_COMPAT: + cmd = UI_BEGIN_FF_UPLOAD; + break; + case UI_END_FF_UPLOAD_COMPAT: + cmd = UI_END_FF_UPLOAD; + break; + } return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg)); } diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index b6da0c1267e36e..8e6077d8e434a3 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -179,6 +179,8 @@ static const char * const smbus_pnp_ids[] = { "LEN0096", /* X280 */ "LEN0097", /* X280 -> ALPS trackpoint */ "LEN200f", /* T450s */ + "LEN2054", /* E480 */ + "LEN2055", /* E580 */ "SYN3052", /* HP EliteBook 840 G4 */ "SYN3221", /* HP 15-ay000 */ NULL diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c index d196ac3d8b8cda..e5c3b066bd2a19 100644 --- a/drivers/input/touchscreen/silead.c +++ b/drivers/input/touchscreen/silead.c @@ -604,6 +604,7 @@ static const struct acpi_device_id silead_ts_acpi_match[] = { { "MSSL1680", 0 }, { "MSSL0001", 0 }, { "MSSL0002", 0 }, + { "MSSL0017", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, silead_ts_acpi_match); diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 9ae3678844ebf0..40fbf20d69e5a5 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -2414,13 +2414,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) /* Clear CR0 and sync (disables SMMU and queue processing) */ reg = readl_relaxed(smmu->base + ARM_SMMU_CR0); if (reg & CR0_SMMUEN) { - if (is_kdump_kernel()) { - arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0); - arm_smmu_device_disable(smmu); - return -EBUSY; - } - dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n"); + WARN_ON(is_kdump_kernel() && !disable_bypass); + arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0); } ret = arm_smmu_device_disable(smmu); @@ -2513,6 +2509,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) return ret; } + if (is_kdump_kernel()) + enables &= ~(CR0_EVTQEN | CR0_PRIQEN); /* Enable the SMMU interface, or ensure bypass */ if (!bypass || disable_bypass) { diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index ce119cb279c3f3..0c3b8f1c7225e8 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -56,6 +56,15 @@ #include "io-pgtable.h" #include "arm-smmu-regs.h" +/* + * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU + * global register space are still, in fact, using a hypervisor to mediate it + * by trapping and emulating register accesses. Sadly, some deployed versions + * of said trapping code have bugs wherein they go horribly wrong for stores + * using r31 (i.e. XZR/WZR) as the source register. + */ +#define QCOM_DUMMY_VAL -1 + #define ARM_MMU500_ACTLR_CPRE (1 << 1) #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26) @@ -398,7 +407,7 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, { unsigned int spin_cnt, delay; - writel_relaxed(0, sync); + writel_relaxed(QCOM_DUMMY_VAL, sync); for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) { for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) { if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE)) @@ -1637,8 +1646,8 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) } /* Invalidate the TLB, just in case */ - writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); - writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); + writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLH); + writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 603bf5233a998b..c1439019dd127e 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -4033,9 +4033,7 @@ static void __init init_no_remapping_devices(void) /* This IOMMU has *only* gfx devices. Either bypass it or set the gfx_mapped flag, as appropriate */ - if (dmar_map_gfx) { - intel_iommu_gfx_mapped = 1; - } else { + if (!dmar_map_gfx) { drhd->ignored = 1; for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) @@ -4831,6 +4829,9 @@ int __init intel_iommu_init(void) goto out_free_reserved_range; } + if (dmar_map_gfx) + intel_iommu_gfx_mapped = 1; + init_no_remapping_devices(); ret = init_dmars(); diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 0d03341317c466..121d3cb7ddd1de 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -102,7 +102,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0) #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0) #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0) -#define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24) #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \ SMMU_TLB_FLUSH_VA_MATCH_SECTION) #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \ @@ -205,8 +204,12 @@ static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu, { u32 value; - value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | - SMMU_TLB_FLUSH_VA_MATCH_ALL; + if (smmu->soc->num_asids == 4) + value = (asid & 0x3) << 29; + else + value = (asid & 0x7f) << 24; + + value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL; smmu_writel(smmu, value, SMMU_TLB_FLUSH); } @@ -216,8 +219,12 @@ static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu, { u32 value; - value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | - SMMU_TLB_FLUSH_VA_SECTION(iova); + if (smmu->soc->num_asids == 4) + value = (asid & 0x3) << 29; + else + value = (asid & 0x7f) << 24; + + value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova); smmu_writel(smmu, value, SMMU_TLB_FLUSH); } @@ -227,8 +234,12 @@ static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu, { u32 value; - value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | - SMMU_TLB_FLUSH_VA_GROUP(iova); + if (smmu->soc->num_asids == 4) + value = (asid & 0x3) << 29; + else + value = (asid & 0x7f) << 24; + + value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova); smmu_writel(smmu, value, SMMU_TLB_FLUSH); } diff --git a/drivers/irqchip/irq-ath79-misc.c b/drivers/irqchip/irq-ath79-misc.c index 0390603170b405..aa729078463601 100644 --- a/drivers/irqchip/irq-ath79-misc.c +++ b/drivers/irqchip/irq-ath79-misc.c @@ -22,15 +22,6 @@ #define AR71XX_RESET_REG_MISC_INT_ENABLE 4 #define ATH79_MISC_IRQ_COUNT 32 -#define ATH79_MISC_PERF_IRQ 5 - -static int ath79_perfcount_irq; - -int get_c0_perfcount_int(void) -{ - return ath79_perfcount_irq; -} -EXPORT_SYMBOL_GPL(get_c0_perfcount_int); static void ath79_misc_irq_handler(struct irq_desc *desc) { @@ -122,8 +113,6 @@ static void __init ath79_misc_intc_domain_init( { void __iomem *base = domain->host_data; - ath79_perfcount_irq = irq_create_mapping(domain, ATH79_MISC_PERF_IRQ); - /* Disable and clear all interrupts */ __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE); __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS); diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index b2abc44fa5cb8b..a73337b74f410c 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -394,7 +394,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) memcpy(di.channelmap, dev->channelmap, sizeof(di.channelmap)); di.nrbchan = dev->nrbchan; - strcpy(di.name, dev_name(&dev->dev)); + strscpy(di.name, dev_name(&dev->dev), sizeof(di.name)); if (copy_to_user((void __user *)arg, &di, sizeof(di))) err = -EFAULT; } else @@ -677,7 +677,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) memcpy(di.channelmap, dev->channelmap, sizeof(di.channelmap)); di.nrbchan = dev->nrbchan; - strcpy(di.name, dev_name(&dev->dev)); + strscpy(di.name, dev_name(&dev->dev), sizeof(di.name)); if (copy_to_user((void __user *)arg, &di, sizeof(di))) err = -EFAULT; } else @@ -691,6 +691,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) err = -EFAULT; break; } + dn.name[sizeof(dn.name) - 1] = '\0'; dev = get_mdevice(dn.id); if (dev) err = device_rename(&dev->dev, dn.name); diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c index 533b0da5235d43..ca1f993c0de306 100644 --- a/drivers/mailbox/stm32-ipcc.c +++ b/drivers/mailbox/stm32-ipcc.c @@ -8,9 +8,9 @@ #include #include #include +#include #include #include -#include #include #include @@ -240,9 +240,11 @@ static int stm32_ipcc_probe(struct platform_device *pdev) /* irq */ for (i = 0; i < IPCC_IRQ_NUM; i++) { - ipcc->irqs[i] = of_irq_get_byname(dev->of_node, irq_name[i]); + ipcc->irqs[i] = platform_get_irq_byname(pdev, irq_name[i]); if (ipcc->irqs[i] < 0) { - dev_err(dev, "no IRQ specified %s\n", irq_name[i]); + if (ipcc->irqs[i] != -EPROBE_DEFER) + dev_err(dev, "no IRQ specified %s\n", + irq_name[i]); ret = ipcc->irqs[i]; goto err_clk; } @@ -263,9 +265,10 @@ static int stm32_ipcc_probe(struct platform_device *pdev) /* wakeup */ if (of_property_read_bool(np, "wakeup-source")) { - ipcc->wkp = of_irq_get_byname(dev->of_node, "wakeup"); + ipcc->wkp = platform_get_irq_byname(pdev, "wakeup"); if (ipcc->wkp < 0) { - dev_err(dev, "could not get wakeup IRQ\n"); + if (ipcc->wkp != -EPROBE_DEFER) + dev_err(dev, "could not get wakeup IRQ\n"); ret = ipcc->wkp; goto err_clk; } diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 7a28232d868bd1..de85b3af3b39dd 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -327,10 +327,11 @@ static int bch_allocator_thread(void *arg) * possibly issue discards to them, then we add the bucket to * the free list: */ - while (!fifo_empty(&ca->free_inc)) { + while (1) { long bucket; - fifo_pop(&ca->free_inc, bucket); + if (!fifo_pop(&ca->free_inc, bucket)) + break; if (ca->discard) { mutex_unlock(&ca->set->bucket_lock); diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 8f07fa6e17394b..268f1b6850840a 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -887,12 +887,22 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k, struct bset *i = bset_tree_last(b)->data; struct bkey *m, *prev = NULL; struct btree_iter iter; + struct bkey preceding_key_on_stack = ZERO_KEY; + struct bkey *preceding_key_p = &preceding_key_on_stack; BUG_ON(b->ops->is_extents && !KEY_SIZE(k)); - m = bch_btree_iter_init(b, &iter, b->ops->is_extents - ? PRECEDING_KEY(&START_KEY(k)) - : PRECEDING_KEY(k)); + /* + * If k has preceding key, preceding_key_p will be set to address + * of k's preceding key; otherwise preceding_key_p will be set + * to NULL inside preceding_key(). + */ + if (b->ops->is_extents) + preceding_key(&START_KEY(k), &preceding_key_p); + else + preceding_key(k, &preceding_key_p); + + m = bch_btree_iter_init(b, &iter, preceding_key_p); if (b->ops->insert_fixup(b, k, &iter, replace_key)) return status; diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index bac76aabca6d79..c71365e7c1fac7 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -434,20 +434,26 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k) return __bch_cut_back(where, k); } -#define PRECEDING_KEY(_k) \ -({ \ - struct bkey *_ret = NULL; \ - \ - if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \ - _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \ - \ - if (!_ret->low) \ - _ret->high--; \ - _ret->low--; \ - } \ - \ - _ret; \ -}) +/* + * Pointer '*preceding_key_p' points to a memory object to store preceding + * key of k. If the preceding key does not exist, set '*preceding_key_p' to + * NULL. So the caller of preceding_key() needs to take care of memory + * which '*preceding_key_p' pointed to before calling preceding_key(). + * Currently the only caller of preceding_key() is bch_btree_insert_key(), + * and it points to an on-stack variable, so the memory release is handled + * by stackframe itself. + */ +static inline void preceding_key(struct bkey *k, struct bkey **preceding_key_p) +{ + if (KEY_INODE(k) || KEY_OFFSET(k)) { + (**preceding_key_p) = KEY(KEY_INODE(k), KEY_OFFSET(k), 0); + if (!(*preceding_key_p)->low) + (*preceding_key_p)->high--; + (*preceding_key_p)->low--; + } else { + (*preceding_key_p) = NULL; + } +} static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k) { diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 522c7426f3a05c..f880e5eba8dd9a 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -317,6 +317,18 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list) } } +bool is_discard_enabled(struct cache_set *s) +{ + struct cache *ca; + unsigned int i; + + for_each_cache(ca, s, i) + if (ca->discard) + return true; + + return false; +} + int bch_journal_replay(struct cache_set *s, struct list_head *list) { int ret = 0, keys = 0, entries = 0; @@ -330,9 +342,17 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list) list_for_each_entry(i, list, list) { BUG_ON(i->pin && atomic_read(i->pin) != 1); - cache_set_err_on(n != i->j.seq, s, -"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)", - n, i->j.seq - 1, start, end); + if (n != i->j.seq) { + if (n == start && is_discard_enabled(s)) + pr_info("bcache: journal entries %llu-%llu may be discarded! (replaying %llu-%llu)", + n, i->j.seq - 1, start, end); + else { + pr_err("bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)", + n, i->j.seq - 1, start, end); + ret = -EIO; + goto err; + } + } for (k = i->j.start; k < bset_bkey_last(&i->j); @@ -540,11 +560,11 @@ static void journal_reclaim(struct cache_set *c) ca->sb.nr_this_dev); } - bkey_init(k); - SET_KEY_PTRS(k, n); - - if (n) + if (n) { + bkey_init(k); + SET_KEY_PTRS(k, n); c->journal.blocks_free = c->sb.bucket_size >> c->block_bits; + } out: if (!journal_full(&c->journal)) __closure_wake_up(&c->journal.wait); @@ -671,6 +691,9 @@ static void journal_write_unlocked(struct closure *cl) ca->journal.seq[ca->journal.cur_idx] = w->data->seq; } + /* If KEY_PTRS(k) == 0, this jset gets lost in air */ + BUG_ON(i == 0); + atomic_dec_bug(&fifo_back(&c->journal.pin)); bch_journal_next(&c->journal); journal_reclaim(c); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 03bb5cee2b8356..2409507d7bff85 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1511,6 +1511,7 @@ static void cache_set_free(struct closure *cl) bch_btree_cache_free(c); bch_journal_free(c); + mutex_lock(&bch_register_lock); for_each_cache(ca, c, i) if (ca) { ca->set = NULL; @@ -1529,7 +1530,6 @@ static void cache_set_free(struct closure *cl) mempool_exit(&c->search); kfree(c->devices); - mutex_lock(&bch_register_lock); list_del(&c->list); mutex_unlock(&bch_register_lock); @@ -1770,13 +1770,15 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) return NULL; } -static void run_cache_set(struct cache_set *c) +static int run_cache_set(struct cache_set *c) { const char *err = "cannot allocate memory"; struct cached_dev *dc, *t; struct cache *ca; struct closure cl; unsigned int i; + LIST_HEAD(journal); + struct journal_replay *l; closure_init_stack(&cl); @@ -1864,7 +1866,9 @@ static void run_cache_set(struct cache_set *c) if (j->version < BCACHE_JSET_VERSION_UUID) __uuid_write(c); - bch_journal_replay(c, &journal); + err = "bcache: replay journal failed"; + if (bch_journal_replay(c, &journal)) + goto err; } else { pr_notice("invalidating existing data"); @@ -1932,11 +1936,19 @@ static void run_cache_set(struct cache_set *c) flash_devs_run(c); set_bit(CACHE_SET_RUNNING, &c->flags); - return; + return 0; err: + while (!list_empty(&journal)) { + l = list_first_entry(&journal, struct journal_replay, list); + list_del(&l->list); + kfree(l); + } + closure_sync(&cl); /* XXX: test this, it's broken */ bch_cache_set_error(c, "%s", err); + + return -EIO; } static bool can_attach_cache(struct cache *ca, struct cache_set *c) @@ -2000,8 +2012,11 @@ static const char *register_cache_set(struct cache *ca) ca->set->cache[ca->sb.nr_this_dev] = ca; c->cache_by_alloc[c->caches_loaded++] = ca; - if (c->caches_loaded == c->sb.nr_in_set) - run_cache_set(c); + if (c->caches_loaded == c->sb.nr_in_set) { + err = "failed to run cache set"; + if (run_cache_set(c) < 0) + goto err; + } return NULL; err: diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index d9481640b3e116..541454b4f479ca 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -393,8 +393,13 @@ STORE(bch_cached_dev) if (attr == &sysfs_writeback_running) bch_writeback_queue(dc); + /* + * Only set BCACHE_DEV_WB_RUNNING when cached device attached to + * a cache set, otherwise it doesn't make sense. + */ if (attr == &sysfs_writeback_percent) - if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) + if ((dc->disk.c != NULL) && + (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))) schedule_delayed_work(&dc->writeback_rate_update, dc->writeback_rate_update_seconds * HZ); diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 6fc93834da4464..151aa95775be2d 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -1167,11 +1167,18 @@ static int __load_discards(struct dm_cache_metadata *cmd, if (r) return r; - for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) { + for (b = 0; ; b++) { r = fn(context, cmd->discard_block_size, to_dblock(b), dm_bitset_cursor_get_value(&c)); if (r) break; + + if (b >= (from_dblock(cmd->discard_nr_blocks) - 1)) + break; + + r = dm_bitset_cursor_next(&c); + if (r) + break; } dm_bitset_cursor_end(&c); diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index fddffe251bf6bf..f496213f8b6753 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -121,7 +121,8 @@ static void delay_dtr(struct dm_target *ti) { struct delay_c *dc = ti->private; - destroy_workqueue(dc->kdelayd_wq); + if (dc->kdelayd_wq) + destroy_workqueue(dc->kdelayd_wq); if (dc->read.dev) dm_put_device(ti, dc->read.dev); diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index bc6ef2303f0bdd..dbdcc543832dfe 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -2557,7 +2557,7 @@ static int calculate_device_limits(struct dm_integrity_c *ic) if (last_sector < ic->start || last_sector >= ic->meta_device_sectors) return -EINVAL; } else { - __u64 meta_size = ic->provided_data_sectors * ic->tag_size; + __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size; meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1)) >> (ic->log2_buffer_sectors + SECTOR_SHIFT); meta_size <<= ic->log2_buffer_sectors; @@ -3428,7 +3428,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections)); DEBUG_print(" journal_entries %u\n", ic->journal_entries); DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors); - DEBUG_print(" device_sectors 0x%llx\n", (unsigned long long)ic->device_sectors); + DEBUG_print(" data_device_sectors 0x%llx\n", (unsigned long long)ic->data_device_sectors); DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors); DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run); DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 419362c2d8aca1..baa966e2778c0b 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -892,6 +892,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps if (attached_handler_name || m->hw_handler_name) { INIT_DELAYED_WORK(&p->activate_path, activate_path_work); r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error); + kfree(attached_handler_name); if (r) { dm_put_device(ti, p->path.dev); goto bad; @@ -906,7 +907,6 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps return p; bad: - kfree(attached_handler_name); free_pgpath(p); return ERR_PTR(r); } diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index fa68336560c34d..d8334cd45d7cb5 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -1169,6 +1169,9 @@ static int dmz_init_zones(struct dmz_metadata *zmd) goto out; } + if (!nr_blkz) + break; + /* Process report */ for (i = 0; i < nr_blkz; i++) { ret = dmz_init_zone(zmd, zone, &blkz[i]); @@ -1204,6 +1207,8 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone) /* Get zone information from disk */ ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone), &blkz, &nr_blkz, GFP_NOIO); + if (!nr_blkz) + ret = -EIO; if (ret) { dmz_dev_err(zmd->dev, "Get zone %u report failed", dmz_id(zmd, zone)); diff --git a/drivers/md/md.c b/drivers/md/md.c index 8668793262d099..b924f62e2cd58f 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -132,24 +132,6 @@ static inline int speed_max(struct mddev *mddev) mddev->sync_speed_max : sysctl_speed_limit_max; } -static void * flush_info_alloc(gfp_t gfp_flags, void *data) -{ - return kzalloc(sizeof(struct flush_info), gfp_flags); -} -static void flush_info_free(void *flush_info, void *data) -{ - kfree(flush_info); -} - -static void * flush_bio_alloc(gfp_t gfp_flags, void *data) -{ - return kzalloc(sizeof(struct flush_bio), gfp_flags); -} -static void flush_bio_free(void *flush_bio, void *data) -{ - kfree(flush_bio); -} - static struct ctl_table_header *raid_table_header; static struct ctl_table raid_table[] = { @@ -429,54 +411,31 @@ static int md_congested(void *data, int bits) /* * Generic flush handling for md */ -static void submit_flushes(struct work_struct *ws) -{ - struct flush_info *fi = container_of(ws, struct flush_info, flush_work); - struct mddev *mddev = fi->mddev; - struct bio *bio = fi->bio; - - bio->bi_opf &= ~REQ_PREFLUSH; - md_handle_request(mddev, bio); - - mempool_free(fi, mddev->flush_pool); -} -static void md_end_flush(struct bio *fbio) +static void md_end_flush(struct bio *bio) { - struct flush_bio *fb = fbio->bi_private; - struct md_rdev *rdev = fb->rdev; - struct flush_info *fi = fb->fi; - struct bio *bio = fi->bio; - struct mddev *mddev = fi->mddev; + struct md_rdev *rdev = bio->bi_private; + struct mddev *mddev = rdev->mddev; rdev_dec_pending(rdev, mddev); - if (atomic_dec_and_test(&fi->flush_pending)) { - if (bio->bi_iter.bi_size == 0) { - /* an empty barrier - all done */ - bio_endio(bio); - mempool_free(fi, mddev->flush_pool); - } else { - INIT_WORK(&fi->flush_work, submit_flushes); - queue_work(md_wq, &fi->flush_work); - } + if (atomic_dec_and_test(&mddev->flush_pending)) { + /* The pre-request flush has finished */ + queue_work(md_wq, &mddev->flush_work); } - - mempool_free(fb, mddev->flush_bio_pool); - bio_put(fbio); + bio_put(bio); } -void md_flush_request(struct mddev *mddev, struct bio *bio) +static void md_submit_flush_data(struct work_struct *ws); + +static void submit_flushes(struct work_struct *ws) { + struct mddev *mddev = container_of(ws, struct mddev, flush_work); struct md_rdev *rdev; - struct flush_info *fi; - - fi = mempool_alloc(mddev->flush_pool, GFP_NOIO); - - fi->bio = bio; - fi->mddev = mddev; - atomic_set(&fi->flush_pending, 1); + mddev->start_flush = ktime_get_boottime(); + INIT_WORK(&mddev->flush_work, md_submit_flush_data); + atomic_set(&mddev->flush_pending, 1); rcu_read_lock(); rdev_for_each_rcu(rdev, mddev) if (rdev->raid_disk >= 0 && @@ -486,37 +445,74 @@ void md_flush_request(struct mddev *mddev, struct bio *bio) * we reclaim rcu_read_lock */ struct bio *bi; - struct flush_bio *fb; atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending); rcu_read_unlock(); - - fb = mempool_alloc(mddev->flush_bio_pool, GFP_NOIO); - fb->fi = fi; - fb->rdev = rdev; - bi = bio_alloc_mddev(GFP_NOIO, 0, mddev); - bio_set_dev(bi, rdev->bdev); bi->bi_end_io = md_end_flush; - bi->bi_private = fb; + bi->bi_private = rdev; + bio_set_dev(bi, rdev->bdev); bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; - - atomic_inc(&fi->flush_pending); + atomic_inc(&mddev->flush_pending); submit_bio(bi); - rcu_read_lock(); rdev_dec_pending(rdev, mddev); } rcu_read_unlock(); + if (atomic_dec_and_test(&mddev->flush_pending)) + queue_work(md_wq, &mddev->flush_work); +} + +static void md_submit_flush_data(struct work_struct *ws) +{ + struct mddev *mddev = container_of(ws, struct mddev, flush_work); + struct bio *bio = mddev->flush_bio; + + /* + * must reset flush_bio before calling into md_handle_request to avoid a + * deadlock, because other bios passed md_handle_request suspend check + * could wait for this and below md_handle_request could wait for those + * bios because of suspend check + */ + mddev->last_flush = mddev->start_flush; + mddev->flush_bio = NULL; + wake_up(&mddev->sb_wait); - if (atomic_dec_and_test(&fi->flush_pending)) { - if (bio->bi_iter.bi_size == 0) { + if (bio->bi_iter.bi_size == 0) { + /* an empty barrier - all done */ + bio_endio(bio); + } else { + bio->bi_opf &= ~REQ_PREFLUSH; + md_handle_request(mddev, bio); + } +} + +void md_flush_request(struct mddev *mddev, struct bio *bio) +{ + ktime_t start = ktime_get_boottime(); + spin_lock_irq(&mddev->lock); + wait_event_lock_irq(mddev->sb_wait, + !mddev->flush_bio || + ktime_after(mddev->last_flush, start), + mddev->lock); + if (!ktime_after(mddev->last_flush, start)) { + WARN_ON(mddev->flush_bio); + mddev->flush_bio = bio; + bio = NULL; + } + spin_unlock_irq(&mddev->lock); + + if (!bio) { + INIT_WORK(&mddev->flush_work, submit_flushes); + queue_work(md_wq, &mddev->flush_work); + } else { + /* flush was performed for some other bio while we waited. */ + if (bio->bi_iter.bi_size == 0) /* an empty barrier - all done */ bio_endio(bio); - mempool_free(fi, mddev->flush_pool); - } else { - INIT_WORK(&fi->flush_work, submit_flushes); - queue_work(md_wq, &fi->flush_work); + else { + bio->bi_opf &= ~REQ_PREFLUSH; + mddev->pers->make_request(mddev, bio); } } } @@ -566,6 +562,7 @@ void mddev_init(struct mddev *mddev) atomic_set(&mddev->openers, 0); atomic_set(&mddev->active_io, 0); spin_lock_init(&mddev->lock); + atomic_set(&mddev->flush_pending, 0); init_waitqueue_head(&mddev->sb_wait); init_waitqueue_head(&mddev->recovery_wait); mddev->reshape_position = MaxSector; @@ -2863,8 +2860,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) err = 0; } } else if (cmd_match(buf, "re-add")) { - if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) && - rdev->saved_raid_disk >= 0) { + if (!rdev->mddev->pers) + err = -EINVAL; + else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) && + rdev->saved_raid_disk >= 0) { /* clear_bit is performed _after_ all the devices * have their local Faulty bit cleared. If any writes * happen in the meantime in the local node, they @@ -5519,22 +5518,6 @@ int md_run(struct mddev *mddev) if (err) return err; } - if (mddev->flush_pool == NULL) { - mddev->flush_pool = mempool_create(NR_FLUSH_INFOS, flush_info_alloc, - flush_info_free, mddev); - if (!mddev->flush_pool) { - err = -ENOMEM; - goto abort; - } - } - if (mddev->flush_bio_pool == NULL) { - mddev->flush_bio_pool = mempool_create(NR_FLUSH_BIOS, flush_bio_alloc, - flush_bio_free, mddev); - if (!mddev->flush_bio_pool) { - err = -ENOMEM; - goto abort; - } - } spin_lock(&pers_lock); pers = find_pers(mddev->level, mddev->clevel); @@ -5694,15 +5677,8 @@ int md_run(struct mddev *mddev) return 0; abort: - if (mddev->flush_bio_pool) { - mempool_destroy(mddev->flush_bio_pool); - mddev->flush_bio_pool = NULL; - } - if (mddev->flush_pool){ - mempool_destroy(mddev->flush_pool); - mddev->flush_pool = NULL; - } - + bioset_exit(&mddev->bio_set); + bioset_exit(&mddev->sync_set); return err; } EXPORT_SYMBOL_GPL(md_run); @@ -5906,14 +5882,6 @@ static void __md_stop(struct mddev *mddev) mddev->to_remove = &md_redundancy_group; module_put(pers->owner); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - if (mddev->flush_bio_pool) { - mempool_destroy(mddev->flush_bio_pool); - mddev->flush_bio_pool = NULL; - } - if (mddev->flush_pool) { - mempool_destroy(mddev->flush_pool); - mddev->flush_pool = NULL; - } } void md_stop(struct mddev *mddev) diff --git a/drivers/md/md.h b/drivers/md/md.h index 8afd6bfdbfb9b5..325cb2136a49e3 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -252,19 +252,6 @@ enum mddev_sb_flags { MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */ }; -#define NR_FLUSH_INFOS 8 -#define NR_FLUSH_BIOS 64 -struct flush_info { - struct bio *bio; - struct mddev *mddev; - struct work_struct flush_work; - atomic_t flush_pending; -}; -struct flush_bio { - struct flush_info *fi; - struct md_rdev *rdev; -}; - struct mddev { void *private; struct md_personality *pers; @@ -470,8 +457,16 @@ struct mddev { * metadata and bitmap writes */ - mempool_t *flush_pool; - mempool_t *flush_bio_pool; + /* Generic flush handling. + * The last to finish preflush schedules a worker to submit + * the rest of the request (without the REQ_PREFLUSH flag). + */ + struct bio *flush_bio; + atomic_t flush_pending; + ktime_t start_flush, last_flush; /* last_flush is when the last completed + * flush was started. + */ + struct work_struct flush_work; struct work_struct event_work; /* used by dm to report failure event */ void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); struct md_cluster_info *cluster_info; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 828d86605fb659..f237d6f307529a 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4185,7 +4185,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, /* now write out any block on a failed drive, * or P or Q if they were recomputed */ - BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */ + dev = NULL; if (s->failed == 2) { dev = &sh->dev[s->failed_num[1]]; s->locked++; @@ -4210,6 +4210,14 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, set_bit(R5_LOCKED, &dev->flags); set_bit(R5_Wantwrite, &dev->flags); } + if (WARN_ONCE(dev && !test_bit(R5_UPTODATE, &dev->flags), + "%s: disk%td not up to date\n", + mdname(conf->mddev), + dev - (struct r5dev *) &sh->dev)) { + clear_bit(R5_LOCKED, &dev->flags); + clear_bit(R5_Wantwrite, &dev->flags); + s->locked--; + } clear_bit(STRIPE_DEGRADED, &sh->state); set_bit(STRIPE_INSYNC, &sh->state); @@ -4221,15 +4229,26 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, case check_state_check_result: sh->check_state = check_state_idle; - if (s->failed > 1) - break; /* handle a successful check operation, if parity is correct * we are done. Otherwise update the mismatch count and repair * parity if !MD_RECOVERY_CHECK */ if (sh->ops.zero_sum_result == 0) { - /* Any parity checked was correct */ - set_bit(STRIPE_INSYNC, &sh->state); + /* both parities are correct */ + if (!s->failed) + set_bit(STRIPE_INSYNC, &sh->state); + else { + /* in contrast to the raid5 case we can validate + * parity, but still have a failure to write + * back + */ + sh->check_state = check_state_compute_result; + /* Returning at this point means that we may go + * off and bring p and/or q uptodate again so + * we make sure to check zero_sum_result again + * to verify if p or q need writeback + */ + } } else { atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index 6889c25c62cbd7..9226dca44e907d 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -668,6 +668,11 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, return -EBUSY; } + if (q->waiting_in_dqbuf && *count) { + dprintk(1, "another dup()ped fd is waiting for a buffer\n"); + return -EBUSY; + } + if (*count == 0 || q->num_buffers != 0 || (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory)) { /* @@ -797,6 +802,10 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, } if (!q->num_buffers) { + if (q->waiting_in_dqbuf && *count) { + dprintk(1, "another dup()ped fd is waiting for a buffer\n"); + return -EBUSY; + } memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); q->memory = memory; q->waiting_for_buffers = !q->is_output; @@ -1466,6 +1475,11 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) for (;;) { int ret; + if (q->waiting_in_dqbuf) { + dprintk(1, "another dup()ped fd is waiting for a buffer\n"); + return -EBUSY; + } + if (!q->streaming) { dprintk(1, "streaming off, will not wait for buffers\n"); return -EINVAL; @@ -1493,6 +1507,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) return -EAGAIN; } + q->waiting_in_dqbuf = 1; /* * We are streaming and blocking, wait for another buffer to * become ready or for streamoff. Driver's lock is released to @@ -1513,6 +1528,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) * the locks or return an error if one occurred. */ call_void_qop(q, wait_finish, q); + q->waiting_in_dqbuf = 0; if (ret) { dprintk(1, "sleep was interrupted\n"); return ret; @@ -2361,6 +2377,12 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_ if (!data) return -EINVAL; + if (q->waiting_in_dqbuf) { + dprintk(3, "another dup()ped fd is %s\n", + read ? "reading" : "writing"); + return -EBUSY; + } + /* * Initialize emulator on first call. */ diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c index dffd2d4bf1c8b9..c25c9279740890 100644 --- a/drivers/media/dvb-frontends/m88ds3103.c +++ b/drivers/media/dvb-frontends/m88ds3103.c @@ -309,6 +309,9 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe) u16 u16tmp; u32 tuner_frequency_khz, target_mclk; s32 s32tmp; + static const struct reg_sequence reset_buf[] = { + {0x07, 0x80}, {0x07, 0x00} + }; dev_dbg(&client->dev, "delivery_system=%d modulation=%d frequency=%u symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n", @@ -321,11 +324,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe) } /* reset */ - ret = regmap_write(dev->regmap, 0x07, 0x80); - if (ret) - goto err; - - ret = regmap_write(dev->regmap, 0x07, 0x00); + ret = regmap_multi_reg_write(dev->regmap, reset_buf, 2); if (ret) goto err; diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c index feacd8da421daf..d55d8f169dca64 100644 --- a/drivers/media/dvb-frontends/si2165.c +++ b/drivers/media/dvb-frontends/si2165.c @@ -275,18 +275,20 @@ static u32 si2165_get_fe_clk(struct si2165_state *state) static int si2165_wait_init_done(struct si2165_state *state) { - int ret = -EINVAL; + int ret; u8 val = 0; int i; for (i = 0; i < 3; ++i) { - si2165_readreg8(state, REG_INIT_DONE, &val); + ret = si2165_readreg8(state, REG_INIT_DONE, &val); + if (ret < 0) + return ret; if (val == 0x01) return 0; usleep_range(1000, 50000); } dev_err(&state->client->dev, "init_done was not set\n"); - return ret; + return -EINVAL; } static int si2165_upload_firmware_block(struct si2165_state *state, diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c index 4715edc8ca33e2..e6a8b5669b9ccc 100644 --- a/drivers/media/i2c/ov2659.c +++ b/drivers/media/i2c/ov2659.c @@ -1117,8 +1117,10 @@ static int ov2659_set_fmt(struct v4l2_subdev *sd, if (ov2659_formats[index].code == mf->code) break; - if (index < 0) - return -EINVAL; + if (index < 0) { + index = 0; + mf->code = ov2659_formats[index].code; + } mf->colorspace = V4L2_COLORSPACE_SRGB; mf->field = V4L2_FIELD_NONE; diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c index 17a34b4a819d32..edded869d7920d 100644 --- a/drivers/media/i2c/ov6650.c +++ b/drivers/media/i2c/ov6650.c @@ -811,9 +811,18 @@ static int ov6650_video_probe(struct i2c_client *client) u8 pidh, pidl, midh, midl; int ret; + priv->clk = v4l2_clk_get(&client->dev, NULL); + if (IS_ERR(priv->clk)) { + ret = PTR_ERR(priv->clk); + dev_err(&client->dev, "v4l2_clk request err: %d\n", ret); + return ret; + } + ret = ov6650_s_power(&priv->subdev, 1); if (ret < 0) - return ret; + goto eclkput; + + msleep(20); /* * check and show product ID and manufacturer ID @@ -848,6 +857,11 @@ static int ov6650_video_probe(struct i2c_client *client) done: ov6650_s_power(&priv->subdev, 0); + if (!ret) + return 0; +eclkput: + v4l2_clk_put(priv->clk); + return ret; } @@ -990,18 +1004,9 @@ static int ov6650_probe(struct i2c_client *client, priv->code = MEDIA_BUS_FMT_YUYV8_2X8; priv->colorspace = V4L2_COLORSPACE_JPEG; - priv->clk = v4l2_clk_get(&client->dev, NULL); - if (IS_ERR(priv->clk)) { - ret = PTR_ERR(priv->clk); - goto eclkget; - } - ret = ov6650_video_probe(client); - if (ret) { - v4l2_clk_put(priv->clk); -eclkget: + if (ret) v4l2_ctrl_handler_free(&priv->hdl); - } return ret; } diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c index 5817d9cde4d0c0..6d8e4afe9673af 100644 --- a/drivers/media/pci/saa7146/hexium_gemini.c +++ b/drivers/media/pci/saa7146/hexium_gemini.c @@ -270,9 +270,8 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d /* enable i2c-port pins */ saa7146_write(dev, MC1, (MASK_08 | MASK_24 | MASK_10 | MASK_26)); - hexium->i2c_adapter = (struct i2c_adapter) { - .name = "hexium gemini", - }; + strscpy(hexium->i2c_adapter.name, "hexium gemini", + sizeof(hexium->i2c_adapter.name)); saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480); if (i2c_add_adapter(&hexium->i2c_adapter) < 0) { DEB_S("cannot register i2c-device. skipping.\n"); diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c index 0a05176c18ab6e..a794f9e5f99087 100644 --- a/drivers/media/pci/saa7146/hexium_orion.c +++ b/drivers/media/pci/saa7146/hexium_orion.c @@ -231,9 +231,8 @@ static int hexium_probe(struct saa7146_dev *dev) saa7146_write(dev, DD1_STREAM_B, 0x00000000); saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26)); - hexium->i2c_adapter = (struct i2c_adapter) { - .name = "hexium orion", - }; + strscpy(hexium->i2c_adapter.name, "hexium orion", + sizeof(hexium->i2c_adapter.name)); saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480); if (i2c_add_adapter(&hexium->i2c_adapter) < 0) { DEB_S("cannot register i2c-device. skipping.\n"); diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c index d20d3df5778bcf..a3cfefdbee1275 100644 --- a/drivers/media/platform/coda/coda-bit.c +++ b/drivers/media/platform/coda/coda-bit.c @@ -1999,6 +1999,9 @@ static int coda_prepare_decode(struct coda_ctx *ctx) /* Clear decode success flag */ coda_write(dev, 0, CODA_RET_DEC_PIC_SUCCESS); + /* Clear error return value */ + coda_write(dev, 0, CODA_RET_DEC_PIC_ERR_MB); + trace_coda_dec_pic_run(ctx, meta); coda_command_async(ctx, CODA_COMMAND_PIC_RUN); diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c index 721564176d8c07..d3868226589223 100644 --- a/drivers/media/platform/stm32/stm32-dcmi.c +++ b/drivers/media/platform/stm32/stm32-dcmi.c @@ -808,6 +808,9 @@ static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f, sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat); if (!sd_fmt) { + if (!dcmi->num_of_sd_formats) + return -ENODATA; + sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1]; pix->pixelformat = sd_fmt->fourcc; } @@ -986,6 +989,9 @@ static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi, sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat); if (!sd_fmt) { + if (!dcmi->num_of_sd_formats) + return -ENODATA; + sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1]; pix->pixelformat = sd_fmt->fourcc; } @@ -1645,7 +1651,7 @@ static int dcmi_probe(struct platform_device *pdev) dcmi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (IS_ERR(dcmi->rstc)) { dev_err(&pdev->dev, "Could not get reset control\n"); - return -ENODEV; + return PTR_ERR(dcmi->rstc); } /* Get bus characteristics from devicetree */ @@ -1660,7 +1666,7 @@ static int dcmi_probe(struct platform_device *pdev) of_node_put(np); if (ret) { dev_err(&pdev->dev, "Could not parse the endpoint\n"); - return -ENODEV; + return ret; } if (ep.bus_type == V4L2_MBUS_CSI2) { @@ -1673,8 +1679,9 @@ static int dcmi_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq <= 0) { - dev_err(&pdev->dev, "Could not get irq\n"); - return -ENODEV; + if (irq != -EPROBE_DEFER) + dev_err(&pdev->dev, "Could not get irq\n"); + return irq; } dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1694,12 +1701,13 @@ static int dcmi_probe(struct platform_device *pdev) dev_name(&pdev->dev), dcmi); if (ret) { dev_err(&pdev->dev, "Unable to request irq %d\n", irq); - return -ENODEV; + return ret; } mclk = devm_clk_get(&pdev->dev, "mclk"); if (IS_ERR(mclk)) { - dev_err(&pdev->dev, "Unable to get mclk\n"); + if (PTR_ERR(mclk) != -EPROBE_DEFER) + dev_err(&pdev->dev, "Unable to get mclk\n"); return PTR_ERR(mclk); } diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c index c01e1592ad0a8f..c8ffe7bff77f16 100644 --- a/drivers/media/platform/video-mux.c +++ b/drivers/media/platform/video-mux.c @@ -365,9 +365,14 @@ static int video_mux_probe(struct platform_device *pdev) vmux->active = -1; vmux->pads = devm_kcalloc(dev, num_pads, sizeof(*vmux->pads), GFP_KERNEL); + if (!vmux->pads) + return -ENOMEM; + vmux->format_mbus = devm_kcalloc(dev, num_pads, sizeof(*vmux->format_mbus), GFP_KERNEL); + if (!vmux->format_mbus) + return -ENOMEM; for (i = 0; i < num_pads; i++) { vmux->pads[i].flags = (i < num_pads - 1) ? MEDIA_PAD_FL_SINK diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c index 9246f265de31b9..27db8835c2410b 100644 --- a/drivers/media/platform/vimc/vimc-core.c +++ b/drivers/media/platform/vimc/vimc-core.c @@ -303,6 +303,8 @@ static int vimc_probe(struct platform_device *pdev) dev_dbg(&pdev->dev, "probe"); + memset(&vimc->mdev, 0, sizeof(vimc->mdev)); + /* Create platform_device for each entity in the topology*/ vimc->subdevs = devm_kcalloc(&vimc->pdev.dev, vimc->pipe_cfg->num_ents, sizeof(*vimc->subdevs), GFP_KERNEL); diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c index fcc897fb247bc2..392754c18046cb 100644 --- a/drivers/media/platform/vimc/vimc-streamer.c +++ b/drivers/media/platform/vimc/vimc-streamer.c @@ -120,7 +120,6 @@ static int vimc_streamer_thread(void *data) int i; set_freezable(); - set_current_state(TASK_UNINTERRUPTIBLE); for (;;) { try_to_freeze(); @@ -137,6 +136,7 @@ static int vimc_streamer_thread(void *data) break; } //wait for 60hz + set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(HZ / 60); } diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c index baa7c83ee6e020..3b09ffceefd56a 100644 --- a/drivers/media/platform/vivid/vivid-vid-cap.c +++ b/drivers/media/platform/vivid/vivid-vid-cap.c @@ -992,7 +992,7 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect); if (dev->bitmap_cap && (compose->width != s->r.width || compose->height != s->r.height)) { - kfree(dev->bitmap_cap); + vfree(dev->bitmap_cap); dev->bitmap_cap = NULL; } *compose = s->r; diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c index 800d69c3f80b86..1cf4019689a566 100644 --- a/drivers/media/radio/wl128x/fmdrv_common.c +++ b/drivers/media/radio/wl128x/fmdrv_common.c @@ -489,7 +489,8 @@ int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload, return -EIO; } /* Send response data to caller */ - if (response != NULL && response_len != NULL && evt_hdr->dlen) { + if (response != NULL && response_len != NULL && evt_hdr->dlen && + evt_hdr->dlen <= payload_len) { /* Skip header info and copy only response data */ skb_pull(skb, sizeof(struct fm_event_msg_hdr)); memcpy(response, skb->data, evt_hdr->dlen); @@ -583,6 +584,8 @@ static void fm_irq_handle_flag_getcmd_resp(struct fmdev *fmdev) return; fm_evt_hdr = (void *)skb->data; + if (fm_evt_hdr->dlen > sizeof(fmdev->irq_info.flag)) + return; /* Skip header info and copy only response data */ skb_pull(skb, sizeof(struct fm_event_msg_hdr)); @@ -1308,7 +1311,7 @@ static int load_default_rx_configuration(struct fmdev *fmdev) static int fm_power_up(struct fmdev *fmdev, u8 mode) { u16 payload; - __be16 asic_id, asic_ver; + __be16 asic_id = 0, asic_ver = 0; int resp_len, ret; u8 fw_name[50]; diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c index 8bf5637b3a69ac..e613c0175591ec 100644 --- a/drivers/media/rc/serial_ir.c +++ b/drivers/media/rc/serial_ir.c @@ -773,8 +773,6 @@ static void serial_ir_exit(void) static int __init serial_ir_init_module(void) { - int result; - switch (type) { case IR_HOMEBREW: case IR_IRDEO: @@ -802,12 +800,7 @@ static int __init serial_ir_init_module(void) if (sense != -1) sense = !!sense; - result = serial_ir_init(); - if (!result) - return 0; - - serial_ir_exit(); - return result; + return serial_ir_init(); } static void __exit serial_ir_exit_module(void) diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c index 62b45062b1e685..3e111f7f56dfee 100644 --- a/drivers/media/usb/au0828/au0828-video.c +++ b/drivers/media/usb/au0828/au0828-video.c @@ -758,6 +758,9 @@ static int au0828_analog_stream_enable(struct au0828_dev *d) dprintk(1, "au0828_analog_stream_enable called\n"); + if (test_bit(DEV_DISCONNECTED, &d->dev_state)) + return -ENODEV; + iface = usb_ifnum_to_if(d->usbdev, 0); if (iface && iface->cur_altsetting->desc.bAlternateSetting != 5) { dprintk(1, "Changing intf#0 to alt 5\n"); @@ -839,9 +842,9 @@ int au0828_start_analog_streaming(struct vb2_queue *vq, unsigned int count) return rc; } + v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 1); + if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { - v4l2_device_call_all(&dev->v4l2_dev, 0, video, - s_stream, 1); dev->vid_timeout_running = 1; mod_timer(&dev->vid_timeout, jiffies + (HZ / 10)); } else if (vq->type == V4L2_BUF_TYPE_VBI_CAPTURE) { @@ -861,10 +864,11 @@ static void au0828_stop_streaming(struct vb2_queue *vq) dprintk(1, "au0828_stop_streaming called %d\n", dev->streaming_users); - if (dev->streaming_users-- == 1) + if (dev->streaming_users-- == 1) { au0828_uninit_isoc(dev); + v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0); + } - v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0); dev->vid_timeout_running = 0; del_timer_sync(&dev->vid_timeout); @@ -893,8 +897,10 @@ void au0828_stop_vbi_streaming(struct vb2_queue *vq) dprintk(1, "au0828_stop_vbi_streaming called %d\n", dev->streaming_users); - if (dev->streaming_users-- == 1) + if (dev->streaming_users-- == 1) { au0828_uninit_isoc(dev); + v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0); + } spin_lock_irqsave(&dev->slock, flags); if (dev->isoc_ctl.vbi_buf != NULL) { diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c index 99f106b13280f1..d47318958fe566 100644 --- a/drivers/media/usb/cpia2/cpia2_v4l.c +++ b/drivers/media/usb/cpia2/cpia2_v4l.c @@ -1244,8 +1244,7 @@ static int __init cpia2_init(void) LOG("%s v%s\n", ABOUT, CPIA_VERSION); check_parameters(); - cpia2_usb_init(); - return 0; + return cpia2_usb_init(); } diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c index e28bd8836751e4..ae0814dd202a6f 100644 --- a/drivers/media/usb/dvb-usb-v2/dvbsky.c +++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c @@ -615,16 +615,18 @@ static int dvbsky_init(struct dvb_usb_device *d) return 0; } -static void dvbsky_exit(struct dvb_usb_device *d) +static int dvbsky_frontend_detach(struct dvb_usb_adapter *adap) { + struct dvb_usb_device *d = adap_to_d(adap); struct dvbsky_state *state = d_to_priv(d); - struct dvb_usb_adapter *adap = &d->adapter[0]; + + dev_dbg(&d->udev->dev, "%s: adap=%d\n", __func__, adap->id); dvb_module_release(state->i2c_client_tuner); dvb_module_release(state->i2c_client_demod); dvb_module_release(state->i2c_client_ci); - adap->fe[0] = NULL; + return 0; } /* DVB USB Driver stuff */ @@ -640,11 +642,11 @@ static struct dvb_usb_device_properties dvbsky_s960_props = { .i2c_algo = &dvbsky_i2c_algo, .frontend_attach = dvbsky_s960_attach, + .frontend_detach = dvbsky_frontend_detach, .init = dvbsky_init, .get_rc_config = dvbsky_get_rc_config, .streaming_ctrl = dvbsky_streaming_ctrl, .identify_state = dvbsky_identify_state, - .exit = dvbsky_exit, .read_mac_address = dvbsky_read_mac_addr, .num_adapters = 1, @@ -667,11 +669,11 @@ static struct dvb_usb_device_properties dvbsky_s960c_props = { .i2c_algo = &dvbsky_i2c_algo, .frontend_attach = dvbsky_s960c_attach, + .frontend_detach = dvbsky_frontend_detach, .init = dvbsky_init, .get_rc_config = dvbsky_get_rc_config, .streaming_ctrl = dvbsky_streaming_ctrl, .identify_state = dvbsky_identify_state, - .exit = dvbsky_exit, .read_mac_address = dvbsky_read_mac_addr, .num_adapters = 1, @@ -694,11 +696,11 @@ static struct dvb_usb_device_properties dvbsky_t680c_props = { .i2c_algo = &dvbsky_i2c_algo, .frontend_attach = dvbsky_t680c_attach, + .frontend_detach = dvbsky_frontend_detach, .init = dvbsky_init, .get_rc_config = dvbsky_get_rc_config, .streaming_ctrl = dvbsky_streaming_ctrl, .identify_state = dvbsky_identify_state, - .exit = dvbsky_exit, .read_mac_address = dvbsky_read_mac_addr, .num_adapters = 1, @@ -721,11 +723,11 @@ static struct dvb_usb_device_properties dvbsky_t330_props = { .i2c_algo = &dvbsky_i2c_algo, .frontend_attach = dvbsky_t330_attach, + .frontend_detach = dvbsky_frontend_detach, .init = dvbsky_init, .get_rc_config = dvbsky_get_rc_config, .streaming_ctrl = dvbsky_streaming_ctrl, .identify_state = dvbsky_identify_state, - .exit = dvbsky_exit, .read_mac_address = dvbsky_read_mac_addr, .num_adapters = 1, @@ -748,11 +750,11 @@ static struct dvb_usb_device_properties mygica_t230c_props = { .i2c_algo = &dvbsky_i2c_algo, .frontend_attach = dvbsky_mygica_t230c_attach, + .frontend_detach = dvbsky_frontend_detach, .init = dvbsky_init, .get_rc_config = dvbsky_get_rc_config, .streaming_ctrl = dvbsky_streaming_ctrl, .identify_state = dvbsky_identify_state, - .exit = dvbsky_exit, .num_adapters = 1, .adapter = { diff --git a/drivers/media/usb/go7007/go7007-fw.c b/drivers/media/usb/go7007/go7007-fw.c index 24f5b615dc7af7..dfa9f899d0c255 100644 --- a/drivers/media/usb/go7007/go7007-fw.c +++ b/drivers/media/usb/go7007/go7007-fw.c @@ -1499,8 +1499,8 @@ static int modet_to_package(struct go7007 *go, __le16 *code, int space) return cnt; } -static int do_special(struct go7007 *go, u16 type, __le16 *code, int space, - int *framelen) +static noinline_for_stack int do_special(struct go7007 *go, u16 type, + __le16 *code, int space, int *framelen) { switch (type) { case SPECIAL_FRM_HEAD: diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c index 405a6a76d820f7..b12356c533a656 100644 --- a/drivers/media/usb/gspca/gspca.c +++ b/drivers/media/usb/gspca/gspca.c @@ -294,7 +294,7 @@ static void fill_frame(struct gspca_dev *gspca_dev, /* check the packet status and length */ st = urb->iso_frame_desc[i].status; if (st) { - pr_err("ISOC data error: [%d] len=%d, status=%d\n", + gspca_dbg(gspca_dev, D_PACK, "ISOC data error: [%d] len=%d, status=%d\n", i, len, st); gspca_dev->last_packet_type = DISCARD_PACKET; continue; @@ -314,6 +314,8 @@ static void fill_frame(struct gspca_dev *gspca_dev, } resubmit: + if (!gspca_dev->streaming) + return; /* resubmit the URB */ st = usb_submit_urb(urb, GFP_ATOMIC); if (st < 0) @@ -330,7 +332,7 @@ static void isoc_irq(struct urb *urb) struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context; gspca_dbg(gspca_dev, D_PACK, "isoc irq\n"); - if (!vb2_start_streaming_called(&gspca_dev->queue)) + if (!gspca_dev->streaming) return; fill_frame(gspca_dev, urb); } @@ -344,7 +346,7 @@ static void bulk_irq(struct urb *urb) int st; gspca_dbg(gspca_dev, D_PACK, "bulk irq\n"); - if (!vb2_start_streaming_called(&gspca_dev->queue)) + if (!gspca_dev->streaming) return; switch (urb->status) { case 0: @@ -367,6 +369,8 @@ static void bulk_irq(struct urb *urb) urb->actual_length); resubmit: + if (!gspca_dev->streaming) + return; /* resubmit the URB */ if (gspca_dev->cam.bulk_nurbs != 0) { st = usb_submit_urb(urb, GFP_ATOMIC); @@ -1630,6 +1634,8 @@ void gspca_disconnect(struct usb_interface *intf) mutex_lock(&gspca_dev->usb_lock); gspca_dev->present = false; + destroy_urbs(gspca_dev); + gspca_input_destroy_urb(gspca_dev); vb2_queue_error(&gspca_dev->queue); diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c index a8519da0020bf8..673fdca8d2dac0 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c @@ -666,6 +666,8 @@ static int ctrl_get_input(struct pvr2_ctrl *cptr,int *vp) static int ctrl_check_input(struct pvr2_ctrl *cptr,int v) { + if (v < 0 || v > PVR2_CVAL_INPUT_MAX) + return 0; return ((1 << v) & cptr->hdw->input_allowed_mask) != 0; } diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h index 25648add77e58c..bd2b7a67b7322d 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h @@ -50,6 +50,7 @@ #define PVR2_CVAL_INPUT_COMPOSITE 2 #define PVR2_CVAL_INPUT_SVIDEO 3 #define PVR2_CVAL_INPUT_RADIO 4 +#define PVR2_CVAL_INPUT_MAX PVR2_CVAL_INPUT_RADIO enum pvr2_config { pvr2_config_empty, /* No configuration */ diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c index be3634407f1f36..3071d9bc77f488 100644 --- a/drivers/media/usb/siano/smsusb.c +++ b/drivers/media/usb/siano/smsusb.c @@ -401,6 +401,7 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id) struct smsusb_device_t *dev; void *mdev; int i, rc; + int align = 0; /* create device object */ dev = kzalloc(sizeof(struct smsusb_device_t), GFP_KERNEL); @@ -412,6 +413,24 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id) dev->udev = interface_to_usbdev(intf); dev->state = SMSUSB_DISCONNECTED; + for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { + struct usb_endpoint_descriptor *desc = + &intf->cur_altsetting->endpoint[i].desc; + + if (desc->bEndpointAddress & USB_DIR_IN) { + dev->in_ep = desc->bEndpointAddress; + align = usb_endpoint_maxp(desc) - sizeof(struct sms_msg_hdr); + } else { + dev->out_ep = desc->bEndpointAddress; + } + } + + pr_debug("in_ep = %02x, out_ep = %02x\n", dev->in_ep, dev->out_ep); + if (!dev->in_ep || !dev->out_ep || align < 0) { /* Missing endpoints? */ + smsusb_term_device(intf); + return -ENODEV; + } + params.device_type = sms_get_board(board_id)->type; switch (params.device_type) { @@ -426,24 +445,12 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id) /* fall-thru */ default: dev->buffer_size = USB2_BUFFER_SIZE; - dev->response_alignment = - le16_to_cpu(dev->udev->ep_in[1]->desc.wMaxPacketSize) - - sizeof(struct sms_msg_hdr); + dev->response_alignment = align; params.flags |= SMS_DEVICE_FAMILY2; break; } - for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { - if (intf->cur_altsetting->endpoint[i].desc. bEndpointAddress & USB_DIR_IN) - dev->in_ep = intf->cur_altsetting->endpoint[i].desc.bEndpointAddress; - else - dev->out_ep = intf->cur_altsetting->endpoint[i].desc.bEndpointAddress; - } - - pr_debug("in_ep = %02x, out_ep = %02x\n", - dev->in_ep, dev->out_ep); - params.device = &dev->udev->dev; params.usb_device = dev->udev; params.buffer_size = dev->buffer_size; diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index 6f1fd40fce1030..c3ddbf6c202a23 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -914,7 +914,7 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id, unsigned int size; unsigned int i; - extra_size = ALIGN(extra_size, sizeof(*entity->pads)); + extra_size = roundup(extra_size, sizeof(*entity->pads)); num_inputs = (type & UVC_TERM_OUTPUT) ? num_pads : num_pads - 1; size = sizeof(*entity) + extra_size + sizeof(*entity->pads) * num_pads + num_inputs; diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c index bd25faf6d13d31..c8f16666256cc3 100644 --- a/drivers/memory/tegra/mc.c +++ b/drivers/memory/tegra/mc.c @@ -280,7 +280,7 @@ static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc) u32 value; /* compute the number of MC clock cycles per tick */ - tick = mc->tick * clk_get_rate(mc->clk); + tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk); do_div(tick, NSEC_PER_SEC); value = readl(mc->regs + MC_EMEM_ARB_CFG); diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c index 50bffc3382d77f..ff3fba16e7359c 100644 --- a/drivers/mfd/intel-lpss.c +++ b/drivers/mfd/intel-lpss.c @@ -273,6 +273,9 @@ static void intel_lpss_init_dev(const struct intel_lpss *lpss) { u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN; + /* Set the device in reset state */ + writel(0, lpss->priv + LPSS_PRIV_RESETS); + intel_lpss_deassert_reset(lpss); intel_lpss_set_remap_addr(lpss); diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c index 3bd75061f77768..f78be039e46376 100644 --- a/drivers/mfd/tps65912-spi.c +++ b/drivers/mfd/tps65912-spi.c @@ -27,6 +27,7 @@ static const struct of_device_id tps65912_spi_of_match_table[] = { { .compatible = "ti,tps65912", }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, tps65912_spi_of_match_table); static int tps65912_spi_probe(struct spi_device *spi) { diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c index dd19f17a1b6375..2b8c479dbfa6e1 100644 --- a/drivers/mfd/twl6040.c +++ b/drivers/mfd/twl6040.c @@ -322,8 +322,19 @@ int twl6040_power(struct twl6040 *twl6040, int on) } } + /* + * Register access can produce errors after power-up unless we + * wait at least 8ms based on measurements on duovero. + */ + usleep_range(10000, 12000); + /* Sync with the HW */ - regcache_sync(twl6040->regmap); + ret = regcache_sync(twl6040->regmap); + if (ret) { + dev_err(twl6040->dev, "Failed to sync with the HW: %i\n", + ret); + goto out; + } /* Default PLL configuration after power up */ twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL; diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c index 8c1b63a4337be3..d2098b4d29451e 100644 --- a/drivers/misc/genwqe/card_dev.c +++ b/drivers/misc/genwqe/card_dev.c @@ -780,6 +780,8 @@ static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m) if ((m->addr == 0x0) || (m->size == 0)) return -EINVAL; + if (m->size > ULONG_MAX - PAGE_SIZE - (m->addr & ~PAGE_MASK)) + return -EINVAL; map_addr = (m->addr & PAGE_MASK); map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE); diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c index f4f8ab6024428e..f68435df76d48b 100644 --- a/drivers/misc/genwqe/card_utils.c +++ b/drivers/misc/genwqe/card_utils.c @@ -587,6 +587,10 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr, /* determine space needed for page_list. */ data = (unsigned long)uaddr; offs = offset_in_page(data); + if (size > ULONG_MAX - PAGE_SIZE - offs) { + m->size = 0; /* mark unused and not added */ + return -EINVAL; + } m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE); m->page_list = kcalloc(m->nr_pages, diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index 6193270e7b3dca..eb4d90b7d99e16 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -1139,7 +1139,7 @@ static void kgdbts_put_char(u8 chr) static int param_set_kgdbts_var(const char *kmessage, const struct kernel_param *kp) { - int len = strlen(kmessage); + size_t len = strlen(kmessage); if (len >= MAX_CONFIG_LEN) { printk(KERN_ERR "kgdbts: config string too long\n"); @@ -1159,7 +1159,7 @@ static int param_set_kgdbts_var(const char *kmessage, strcpy(config, kmessage); /* Chop out \n char as a result of echo */ - if (config[len - 1] == '\n') + if (len && config[len - 1] == '\n') config[len - 1] = '\0'; /* Go and configure with the new params. */ diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index 896e2df9400fd2..fd33a3b9c66f62 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -662,6 +662,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, data = (struct pci_endpoint_test_data *)ent->driver_data; if (data) { test_reg_bar = data->test_reg_bar; + test->test_reg_bar = test_reg_bar; test->alignment = data->alignment; irq_type = data->irq_type; } diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 6600b3466dfbc6..0a74785e575ba9 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -144,8 +144,9 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) int err = cmd->error; /* Flag re-tuning needed on CRC errors */ - if ((cmd->opcode != MMC_SEND_TUNING_BLOCK && - cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) && + if (cmd->opcode != MMC_SEND_TUNING_BLOCK && + cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200 && + !host->retune_crc_disable && (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || (mrq->data && mrq->data->error == -EILSEQ) || (mrq->stop && mrq->stop->error == -EILSEQ))) diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c index efb8a7965dd4a9..154f4204d58cbd 100644 --- a/drivers/mmc/core/pwrseq_emmc.c +++ b/drivers/mmc/core/pwrseq_emmc.c @@ -30,19 +30,14 @@ struct mmc_pwrseq_emmc { #define to_pwrseq_emmc(p) container_of(p, struct mmc_pwrseq_emmc, pwrseq) -static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq) -{ - gpiod_set_value(pwrseq->reset_gpio, 1); - udelay(1); - gpiod_set_value(pwrseq->reset_gpio, 0); - udelay(200); -} - static void mmc_pwrseq_emmc_reset(struct mmc_host *host) { struct mmc_pwrseq_emmc *pwrseq = to_pwrseq_emmc(host->pwrseq); - __mmc_pwrseq_emmc_reset(pwrseq); + gpiod_set_value_cansleep(pwrseq->reset_gpio, 1); + udelay(1); + gpiod_set_value_cansleep(pwrseq->reset_gpio, 0); + udelay(200); } static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this, @@ -50,8 +45,11 @@ static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this, { struct mmc_pwrseq_emmc *pwrseq = container_of(this, struct mmc_pwrseq_emmc, reset_nb); + gpiod_set_value(pwrseq->reset_gpio, 1); + udelay(1); + gpiod_set_value(pwrseq->reset_gpio, 0); + udelay(200); - __mmc_pwrseq_emmc_reset(pwrseq); return NOTIFY_DONE; } @@ -72,14 +70,18 @@ static int mmc_pwrseq_emmc_probe(struct platform_device *pdev) if (IS_ERR(pwrseq->reset_gpio)) return PTR_ERR(pwrseq->reset_gpio); - /* - * register reset handler to ensure emmc reset also from - * emergency_reboot(), priority 255 is the highest priority - * so it will be executed before any system reboot handler. - */ - pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb; - pwrseq->reset_nb.priority = 255; - register_restart_handler(&pwrseq->reset_nb); + if (!gpiod_cansleep(pwrseq->reset_gpio)) { + /* + * register reset handler to ensure emmc reset also from + * emergency_reboot(), priority 255 is the highest priority + * so it will be executed before any system reboot handler. + */ + pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb; + pwrseq->reset_nb.priority = 255; + register_restart_handler(&pwrseq->reset_nb); + } else { + dev_notice(dev, "EMMC reset pin tied to a sleepy GPIO driver; reset on emergency-reboot disabled\n"); + } pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops; pwrseq->pwrseq.dev = dev; diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 6edffeed995349..18aae28845ec99 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -494,6 +494,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq) blk_mq_unquiesce_queue(q); blk_cleanup_queue(q); + blk_mq_free_tag_set(&mq->tag_set); /* * A request can be completed before the next request, potentially diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index d0d9f90e7cdfb7..cfb8ee24eaba15 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -216,6 +216,14 @@ static int mmc_decode_scr(struct mmc_card *card) if (scr->sda_spec3) scr->cmds = UNSTUFF_BITS(resp, 32, 2); + + /* SD Spec says: any SD Card shall set at least bits 0 and 2 */ + if (!(scr->bus_widths & SD_SCR_BUS_WIDTH_1) || + !(scr->bus_widths & SD_SCR_BUS_WIDTH_4)) { + pr_err("%s: invalid bus width\n", mmc_hostname(card->host)); + return -EINVAL; + } + return 0; } diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index d8e17ea6126de8..0aa99694b93798 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -934,6 +934,10 @@ static int mmc_sdio_pre_suspend(struct mmc_host *host) */ static int mmc_sdio_suspend(struct mmc_host *host) { + /* Prevent processing of SDIO IRQs in suspended state. */ + mmc_card_set_suspended(host->card); + cancel_delayed_work_sync(&host->sdio_irq_work); + mmc_claim_host(host); if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) @@ -982,13 +986,20 @@ static int mmc_sdio_resume(struct mmc_host *host) err = sdio_enable_4bit_bus(host->card); } - if (!err && host->sdio_irqs) { + if (err) + goto out; + + /* Allow SDIO IRQs to be processed again. */ + mmc_card_clr_suspended(host->card); + + if (host->sdio_irqs) { if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) wake_up_process(host->sdio_irq_thread); else if (host->caps & MMC_CAP_SDIO_IRQ) host->ops->enable_sdio_irq(host, 1); } +out: mmc_release_host(host); host->pm_flags &= ~MMC_PM_KEEP_POWER; diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c index d40744bbafa9a9..ed2d8c48ea1780 100644 --- a/drivers/mmc/core/sdio_io.c +++ b/drivers/mmc/core/sdio_io.c @@ -18,6 +18,7 @@ #include "sdio_ops.h" #include "core.h" #include "card.h" +#include "host.h" /** * sdio_claim_host - exclusively claim a bus for a certain SDIO function @@ -725,3 +726,79 @@ int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags) return 0; } EXPORT_SYMBOL_GPL(sdio_set_host_pm_flags); + +/** + * sdio_retune_crc_disable - temporarily disable retuning on CRC errors + * @func: SDIO function attached to host + * + * If the SDIO card is known to be in a state where it might produce + * CRC errors on the bus in response to commands (like if we know it is + * transitioning between power states), an SDIO function driver can + * call this function to temporarily disable the SD/MMC core behavior of + * triggering an automatic retuning. + * + * This function should be called while the host is claimed and the host + * should remain claimed until sdio_retune_crc_enable() is called. + * Specifically, the expected sequence of calls is: + * - sdio_claim_host() + * - sdio_retune_crc_disable() + * - some number of calls like sdio_writeb() and sdio_readb() + * - sdio_retune_crc_enable() + * - sdio_release_host() + */ +void sdio_retune_crc_disable(struct sdio_func *func) +{ + func->card->host->retune_crc_disable = true; +} +EXPORT_SYMBOL_GPL(sdio_retune_crc_disable); + +/** + * sdio_retune_crc_enable - re-enable retuning on CRC errors + * @func: SDIO function attached to host + * + * This is the compement to sdio_retune_crc_disable(). + */ +void sdio_retune_crc_enable(struct sdio_func *func) +{ + func->card->host->retune_crc_disable = false; +} +EXPORT_SYMBOL_GPL(sdio_retune_crc_enable); + +/** + * sdio_retune_hold_now - start deferring retuning requests till release + * @func: SDIO function attached to host + * + * This function can be called if it's currently a bad time to do + * a retune of the SDIO card. Retune requests made during this time + * will be held and we'll actually do the retune sometime after the + * release. + * + * This function could be useful if an SDIO card is in a power state + * where it can respond to a small subset of commands that doesn't + * include the retuning command. Care should be taken when using + * this function since (presumably) the retuning request we might be + * deferring was made for a good reason. + * + * This function should be called while the host is claimed. + */ +void sdio_retune_hold_now(struct sdio_func *func) +{ + mmc_retune_hold_now(func->card->host); +} +EXPORT_SYMBOL_GPL(sdio_retune_hold_now); + +/** + * sdio_retune_release - signal that it's OK to retune now + * @func: SDIO function attached to host + * + * This is the complement to sdio_retune_hold_now(). Calling this + * function won't make a retune happen right away but will allow + * them to be scheduled normally. + * + * This function should be called while the host is claimed. + */ +void sdio_retune_release(struct sdio_func *func) +{ + mmc_retune_release(func->card->host); +} +EXPORT_SYMBOL_GPL(sdio_retune_release); diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c index 7ca7b99413f0d9..b299a24d33f965 100644 --- a/drivers/mmc/core/sdio_irq.c +++ b/drivers/mmc/core/sdio_irq.c @@ -38,6 +38,10 @@ static int process_sdio_pending_irqs(struct mmc_host *host) unsigned char pending; struct sdio_func *func; + /* Don't process SDIO IRQs if the card is suspended. */ + if (mmc_card_suspended(card)) + return 0; + /* * Optimization, if there is only 1 function interrupt registered * and we know an IRQ was signaled then call irq handler directly. diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 67f6bd24a9d0c1..ea254d00541f12 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -819,6 +819,10 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t, } status = spi_sync_locked(spi, &host->m); + if (status < 0) { + dev_dbg(&spi->dev, "read error %d\n", status); + return status; + } if (host->dma_dev) { dma_sync_single_for_cpu(host->dma_dev, diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 1841d250e9e2c6..eb1a65cb878f03 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1295,9 +1295,10 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) } /* - * Don't poll for busy completion in irq context. + * Busy detection has been handled by mmci_cmd_irq() above. + * Clear the status bit to prevent polling in IRQ context. */ - if (host->variant->busy_detect && host->busy_status) + if (host->variant->busy_detect_flag) status &= ~host->variant->busy_detect_flag; ret = 1; diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c index 94eeed2a1b5331..f903ab96aa21e4 100644 --- a/drivers/mmc/host/sdhci-iproc.c +++ b/drivers/mmc/host/sdhci-iproc.c @@ -185,7 +185,8 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = { }; static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = { - .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, + .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_NO_HISPD_BIT, .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON, .ops = &sdhci_iproc_32only_ops, }; @@ -208,7 +209,8 @@ static const struct sdhci_iproc_data iproc_cygnus_data = { static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = { .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | - SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 | + SDHCI_QUIRK_NO_HISPD_BIT, .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN, .ops = &sdhci_iproc_ops, }; diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index a40bcc27f18789..7fdac277e382f0 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -814,7 +814,10 @@ static int sdhci_arasan_probe(struct platform_device *pdev) host->mmc_host_ops.start_signal_voltage_switch = sdhci_arasan_voltage_switch; sdhci_arasan->has_cqe = true; - host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + host->mmc->caps2 |= MMC_CAP2_CQE; + + if (!of_property_read_bool(np, "disable-cqe-dcmd")) + host->mmc->caps2 |= MMC_CAP2_CQE_DCMD; } ret = sdhci_arasan_add_host(sdhci_arasan); diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index a7bf8515116fdb..e5c598ae5f2441 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -643,6 +643,9 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask) sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) + mdelay(5); + if (mask & SDHCI_RESET_ALL) { val = sdhci_readl(host, ESDHC_TBCTL); val &= ~ESDHC_TB_EN; @@ -917,6 +920,11 @@ static int sdhci_esdhc_probe(struct platform_device *pdev) if (esdhc->vendor_ver > VENDOR_V_22) host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; + if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) { + host->quirks2 |= SDHCI_QUIRK_RESET_AFTER_REQUEST; + host->quirks2 |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + } + if (of_device_is_compatible(np, "fsl,p5040-esdhc") || of_device_is_compatible(np, "fsl,p5020-esdhc") || of_device_is_compatible(np, "fsl,p4080-esdhc") || diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c index cc3ffeffd7a2e6..fa8d9da2ab7f6b 100644 --- a/drivers/mmc/host/sdhci-pci-o2micro.c +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -117,6 +117,7 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode) */ if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) { current_bus_width = mmc->ios.bus_width; + mmc->ios.bus_width = MMC_BUS_WIDTH_4; sdhci_set_bus_width(host, MMC_BUS_WIDTH_4); } @@ -128,8 +129,10 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode) sdhci_end_tuning(host); - if (current_bus_width == MMC_BUS_WIDTH_8) + if (current_bus_width == MMC_BUS_WIDTH_8) { + mmc->ios.bus_width = MMC_BUS_WIDTH_8; sdhci_set_bus_width(host, current_bus_width); + } host->flags &= ~SDHCI_HS400_TUNING; return 0; diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c index 98f6b9c4b68403..d16b57081c95ab 100644 --- a/drivers/mtd/nand/spi/macronix.c +++ b/drivers/mtd/nand/spi/macronix.c @@ -10,6 +10,7 @@ #include #define SPINAND_MFR_MACRONIX 0xC2 +#define MACRONIX_ECCSR_MASK 0x0F static SPINAND_OP_VARIANTS(read_cache_variants, SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), @@ -55,7 +56,12 @@ static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr) SPI_MEM_OP_DUMMY(1, 1), SPI_MEM_OP_DATA_IN(1, eccsr, 1)); - return spi_mem_exec_op(spinand->spimem, &op); + int ret = spi_mem_exec_op(spinand->spimem, &op); + if (ret) + return ret; + + *eccsr &= MACRONIX_ECCSR_MASK; + return 0; } static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand, diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c index af0a220195163c..d60cbf23d9aa05 100644 --- a/drivers/mtd/spi-nor/intel-spi.c +++ b/drivers/mtd/spi-nor/intel-spi.c @@ -632,6 +632,10 @@ static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len, while (len > 0) { block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ); + /* Read cannot cross 4K boundary */ + block_size = min_t(loff_t, from + block_size, + round_up(from + 1, SZ_4K)) - from; + writel(from, ispi->base + FADDR); val = readl(ispi->base + HSFSTS_CTL); @@ -685,6 +689,10 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len, while (len > 0) { block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ); + /* Write cannot cross 4K boundary */ + block_size = min_t(loff_t, to + block_size, + round_up(to + 1, SZ_4K)) - to; + writel(to, ispi->base + FADDR); val = readl(ispi->base + HSFSTS_CTL); diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 21cde7e7862168..0d3ba056cda3ca 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -40,7 +40,7 @@ obj-$(CONFIG_ARCNET) += arcnet/ obj-$(CONFIG_DEV_APPLETALK) += appletalk/ obj-$(CONFIG_CAIF) += caif/ obj-$(CONFIG_CAN) += can/ -obj-$(CONFIG_NET_DSA) += dsa/ +obj-y += dsa/ obj-$(CONFIG_ETHERNET) += ethernet/ obj-$(CONFIG_FDDI) += fddi/ obj-$(CONFIG_HIPPI) += hippi/ diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 091b454e83fccf..039beb5e0fa278 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3107,13 +3107,18 @@ static int bond_slave_netdev_event(unsigned long event, case NETDEV_CHANGE: /* For 802.3ad mode only: * Getting invalid Speed/Duplex values here will put slave - * in weird state. So mark it as link-fail for the time - * being and let link-monitoring (miimon) set it right when - * correct speeds/duplex are available. + * in weird state. Mark it as link-fail if the link was + * previously up or link-down if it hasn't yet come up, and + * let link-monitoring (miimon) set it right when correct + * speeds/duplex are available. */ if (bond_update_speed_duplex(slave) && - BOND_MODE(bond) == BOND_MODE_8023AD) - slave->link = BOND_LINK_FAIL; + BOND_MODE(bond) == BOND_MODE_8023AD) { + if (slave->last_link_up) + slave->link = BOND_LINK_FAIL; + else + slave->link = BOND_LINK_DOWN; + } if (BOND_MODE(bond) == BOND_MODE_8023AD) bond_3ad_adapter_speed_duplex_changed(slave); diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 2646faffd36e9a..6f265d2e647ba8 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -165,7 +165,7 @@ #define FLEXCAN_MB_CNT_LENGTH(x) (((x) & 0xf) << 16) #define FLEXCAN_MB_CNT_TIMESTAMP(x) ((x) & 0xffff) -#define FLEXCAN_TIMEOUT_US (50) +#define FLEXCAN_TIMEOUT_US (250) /* FLEXCAN hardware feature flags * diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 045f0845e665e4..3df23487487f78 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -1424,7 +1424,7 @@ static const struct xcan_devtype_data xcan_canfd_data = { XCAN_FLAG_RXMNF | XCAN_FLAG_TX_MAILBOXES | XCAN_FLAG_RX_FIFO_MULTI, - .bittiming_const = &xcan_bittiming_const, + .bittiming_const = &xcan_bittiming_const_canfd, .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, .bus_clk_name = "s_axi_aclk", diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index d3035e0eacf078..181d39e564a165 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -877,7 +877,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip, err = mv88e6xxx_port_read(chip, port, s->reg + 1, ®); if (err) return U64_MAX; - high = reg; + low |= ((u32)reg) << 16; } break; case STATS_TYPE_BANK1: @@ -1484,7 +1484,7 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid, int err; if (!vid) - return -EINVAL; + return -EOPNOTSUPP; entry->vid = vid - 1; entry->valid = false; diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c index 6dedd43442cc57..35b767baf21f7c 100644 --- a/drivers/net/dsa/rtl8366.c +++ b/drivers/net/dsa/rtl8366.c @@ -307,7 +307,8 @@ int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) struct rtl8366_vlan_4k vlan4k; int ret; - if (!smi->ops->is_vlan_valid(smi, port)) + /* Use VLAN nr port + 1 since VLAN0 is not valid */ + if (!smi->ops->is_vlan_valid(smi, port + 1)) return -EINVAL; dev_info(smi->dev, "%s filtering on port %d\n", @@ -318,12 +319,12 @@ int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) * The hardware support filter ID (FID) 0..7, I have no clue how to * support this in the driver when the callback only says on/off. */ - ret = smi->ops->get_vlan_4k(smi, port, &vlan4k); + ret = smi->ops->get_vlan_4k(smi, port + 1, &vlan4k); if (ret) return ret; /* Just set the filter to FID 1 for now then */ - ret = rtl8366_set_vlan(smi, port, + ret = rtl8366_set_vlan(smi, port + 1, vlan4k.member, vlan4k.untag, 1); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 1b5f591cf0a23b..b5d72815776cb0 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2223,7 +2223,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) host_info->os_type = ENA_ADMIN_OS_LINUX; host_info->kernel_ver = LINUX_VERSION_CODE; - strncpy(host_info->kernel_ver_str, utsname()->version, + strlcpy(host_info->kernel_ver_str, utsname()->version, sizeof(host_info->kernel_ver_str) - 1); host_info->os_dist = 0; strncpy(host_info->os_dist_str, utsname()->release, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 6f3312350cac4c..b3c7994d73eb15 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -139,10 +139,10 @@ void aq_ring_queue_stop(struct aq_ring_s *ring) bool aq_ring_tx_clean(struct aq_ring_s *self) { struct device *dev = aq_nic_get_dev(self->aq_nic); - unsigned int budget = AQ_CFG_TX_CLEAN_BUDGET; + unsigned int budget; - for (; self->sw_head != self->hw_head && budget--; - self->sw_head = aq_ring_next_dx(self, self->sw_head)) { + for (budget = AQ_CFG_TX_CLEAN_BUDGET; + budget && self->sw_head != self->hw_head; budget--) { struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; if (likely(buff->is_mapped)) { @@ -167,6 +167,7 @@ bool aq_ring_tx_clean(struct aq_ring_s *self) buff->pa = 0U; buff->eop_index = 0xffffU; + self->sw_head = aq_ring_next_dx(self, self->sw_head); } return !!budget; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 56363ff5c89199..51cd1f98bcf07e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -695,38 +695,41 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) { /* MAC error or DMA error */ buff->is_error = 1U; - } else { - if (self->aq_nic_cfg->is_rss) { - /* last 4 byte */ - u16 rss_type = rxd_wb->type & 0xFU; - - if (rss_type && rss_type < 0x8U) { - buff->is_hash_l4 = (rss_type == 0x4 || - rss_type == 0x5); - buff->rss_hash = rxd_wb->rss_hash; - } + } + if (self->aq_nic_cfg->is_rss) { + /* last 4 byte */ + u16 rss_type = rxd_wb->type & 0xFU; + + if (rss_type && rss_type < 0x8U) { + buff->is_hash_l4 = (rss_type == 0x4 || + rss_type == 0x5); + buff->rss_hash = rxd_wb->rss_hash; } + } - if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { - buff->len = rxd_wb->pkt_len % - AQ_CFG_RX_FRAME_MAX; - buff->len = buff->len ? - buff->len : AQ_CFG_RX_FRAME_MAX; - buff->next = 0U; - buff->is_eop = 1U; + if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { + buff->len = rxd_wb->pkt_len % + AQ_CFG_RX_FRAME_MAX; + buff->len = buff->len ? + buff->len : AQ_CFG_RX_FRAME_MAX; + buff->next = 0U; + buff->is_eop = 1U; + } else { + buff->len = + rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ? + AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len; + + if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT & + rxd_wb->status) { + /* LRO */ + buff->next = rxd_wb->next_desc_ptr; + ++ring->stats.rx.lro_packets; } else { - if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT & - rxd_wb->status) { - /* LRO */ - buff->next = rxd_wb->next_desc_ptr; - ++ring->stats.rx.lro_packets; - } else { - /* jumbo */ - buff->next = - aq_ring_next_dx(ring, - ring->hw_head); - ++ring->stats.rx.jumbo_packets; - } + /* jumbo */ + buff->next = + aq_ring_next_dx(ring, + ring->hw_head); + ++ring->stats.rx.jumbo_packets; } } } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index de46331aefc16c..c54a74de7b0881 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1599,6 +1599,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); bnxt_reuse_rx_data(rxr, cons, data); if (!skb) { + if (agg_bufs) + bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); rc = -ENOMEM; goto next_rx; } diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h index c2fd323c407828..ea75f275023ffa 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h @@ -75,8 +75,8 @@ struct l2t_data { struct l2t_entry *rover; /* starting point for next allocation */ atomic_t nfree; /* number of free entries */ rwlock_t lock; - struct l2t_entry l2tab[0]; struct rcu_head rcu_head; /* to handle rcu cleanup */ + struct l2t_entry l2tab[]; }; typedef void (*arp_failure_handler_func)(struct t3cdev * dev, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 961e3087d1d38c..bb04c695ab9fdc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -6010,15 +6010,24 @@ static int __init cxgb4_init_module(void) ret = pci_register_driver(&cxgb4_driver); if (ret < 0) - debugfs_remove(cxgb4_debugfs_root); + goto err_pci; #if IS_ENABLED(CONFIG_IPV6) if (!inet6addr_registered) { - register_inet6addr_notifier(&cxgb4_inet6addr_notifier); - inet6addr_registered = true; + ret = register_inet6addr_notifier(&cxgb4_inet6addr_notifier); + if (ret) + pci_unregister_driver(&cxgb4_driver); + else + inet6addr_registered = true; } #endif + if (ret == 0) + return ret; + +err_pci: + debugfs_remove(cxgb4_debugfs_root); + return ret; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index c116f96956fe93..f2aba5b160c2d5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -228,6 +228,9 @@ static void cxgb4_process_flow_match(struct net_device *dev, fs->val.ivlan = vlan_tci; fs->mask.ivlan = vlan_tci_mask; + fs->val.ivlan_vld = 1; + fs->mask.ivlan_vld = 1; + /* Chelsio adapters use ivlan_vld bit to match vlan packets * as 802.1Q. Also, when vlan tag is present in packets, * ethtype match is used then to match on ethtype of inner @@ -238,8 +241,6 @@ static void cxgb4_process_flow_match(struct net_device *dev, * ethtype value with ethtype of inner header. */ if (fs->val.ethtype == ETH_P_8021Q) { - fs->val.ivlan_vld = 1; - fs->mask.ivlan_vld = 1; fs->val.ethtype = 0; fs->mask.ethtype = 0; } diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 66535d1653f6d8..f16853c3c851a7 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -2107,7 +2107,6 @@ static struct eisa_driver de4x5_eisa_driver = { .remove = de4x5_eisa_remove, } }; -MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids); #endif #ifdef CONFIG_PCI diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 3f6749fc889f97..bfb16a47449017 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -1105,7 +1105,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type); break; case ETHTOOL_GRXRINGS: - cmd->data = adapter->num_rx_qs - 1; + cmd->data = adapter->num_rx_qs; break; default: return -EINVAL; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 3d1c611be54065..56c492b4c94fc6 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3572,7 +3572,7 @@ fec_probe(struct platform_device *pdev) if (fep->reg_phy) regulator_disable(fep->reg_phy); failed_reset: - pm_runtime_put(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); failed_regulator: clk_disable_unprepare(fep->clk_ahb); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index e2710ff48fb0fb..1fa0cd527ead3d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -339,6 +339,7 @@ static int __lb_setup(struct net_device *ndev, static int __lb_up(struct net_device *ndev, enum hnae_loop loop_mode) { +#define NIC_LB_TEST_WAIT_PHY_LINK_TIME 300 struct hns_nic_priv *priv = netdev_priv(ndev); struct hnae_handle *h = priv->ae_handle; int speed, duplex; @@ -365,6 +366,9 @@ static int __lb_up(struct net_device *ndev, h->dev->ops->adjust_link(h, speed, duplex); + /* wait adjust link done and phy ready */ + msleep(NIC_LB_TEST_WAIT_PHY_LINK_TIME); + return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 340baf6a470cc1..4648c6a9d9e865 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -4300,8 +4300,11 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, return hclge_add_mac_vlan_tbl(vport, &req, NULL); /* check if we just hit the duplicate */ - if (!ret) - ret = -EINVAL; + if (!ret) { + dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n", + vport->vport_id, addr); + return 0; + } dev_err(&hdev->pdev->dev, "PF failed to add unicast entry(%pM) in the MAC table\n", diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 8b11682ebba22d..8cd339c92c1afb 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7329,7 +7329,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP); - if (pci_dev_run_wake(pdev)) + if (pci_dev_run_wake(pdev) && hw->mac.type < e1000_pch_cnp) pm_runtime_put_noidle(&pdev->dev); return 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f81ad0aa8b0953..4e04985fb4307a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2654,6 +2654,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) struct i40e_vsi_context ctxt; i40e_status ret; + /* Don't modify stripping options if a port VLAN is active */ + if (vsi->info.pvid) + return; + if ((vsi->info.valid_sections & cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) @@ -2684,6 +2688,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) struct i40e_vsi_context ctxt; i40e_status ret; + /* Don't modify stripping options if a port VLAN is active */ + if (vsi->info.pvid) + return; + if ((vsi->info.valid_sections & cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == @@ -6750,10 +6758,12 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data) struct i40e_pf *pf = vsi->back; u8 enabled_tc = 0, num_tc, hw; bool need_reset = false; + int old_queue_pairs; int ret = -EINVAL; u16 mode; int i; + old_queue_pairs = vsi->num_queue_pairs; num_tc = mqprio_qopt->qopt.num_tc; hw = mqprio_qopt->qopt.hw; mode = mqprio_qopt->mode; @@ -6854,6 +6864,7 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data) } ret = i40e_configure_queue_channels(vsi); if (ret) { + vsi->num_queue_pairs = old_queue_pairs; netdev_info(netdev, "Failed configuring queue channels\n"); need_reset = true; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index c6d24eaede184f..d86f3fa7aa6a4f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2399,8 +2399,10 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) (u8 *)&stats, sizeof(stats)); } -/* If the VF is not trusted restrict the number of MAC/VLAN it can program */ -#define I40E_VC_MAX_MAC_ADDR_PER_VF 12 +/* If the VF is not trusted restrict the number of MAC/VLAN it can program + * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast + */ +#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1) #define I40E_VC_MAX_VLAN_PER_VF 8 /** diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index db1543bca7013d..875f97aba6e0dd 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -652,6 +652,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) case ICE_FC_RX_PAUSE: fc = "RX"; break; + case ICE_FC_NONE: + fc = "None"; + break; default: fc = "Unknown"; break; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index aa39a068858e9c..5aa083d9a6c9ac 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3468,6 +3468,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) break; } } + + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP); + pm_runtime_put_noidle(&pdev->dev); return 0; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 2ba0d89aaf3c4e..28762314353f91 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4611,7 +4611,7 @@ static int mvneta_probe(struct platform_device *pdev) err = register_netdev(dev); if (err < 0) { dev_err(&pdev->dev, "failed to register\n"); - goto err_free_stats; + goto err_netdev; } netdev_info(dev, "Using %s mac address %pM\n", mac_from, @@ -4622,14 +4622,12 @@ static int mvneta_probe(struct platform_device *pdev) return 0; err_netdev: - unregister_netdev(dev); if (pp->bm_priv) { mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); mvneta_bm_put(pp->bm_priv); } -err_free_stats: free_percpu(pp->stats); err_free_ports: free_percpu(pp->ports); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 9b10abb604cb26..df5b74f289e192 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -1310,8 +1310,8 @@ static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, int i; for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) - memcpy(data + i * ETH_GSTRING_LEN, - &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN); + strscpy(data + i * ETH_GSTRING_LEN, + mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN); } } @@ -1404,7 +1404,7 @@ static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port) /* Set defaults to the MVPP2 port */ static void mvpp2_defaults_set(struct mvpp2_port *port) { - int tx_port_num, val, queue, ptxq, lrxq; + int tx_port_num, val, queue, lrxq; if (port->priv->hw_version == MVPP21) { /* Update TX FIFO MIN Threshold */ @@ -1422,11 +1422,9 @@ static void mvpp2_defaults_set(struct mvpp2_port *port) mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); /* Close bandwidth for all queues */ - for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { - ptxq = mvpp2_txq_phys(port->id, queue); + for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) mvpp2_write(port->priv, - MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0); - } + MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0); /* Set refill period to 1 usec, refill tokens * and bucket size to maximum @@ -2271,7 +2269,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, txq->descs_dma = 0; /* Set minimum bandwidth for disabled TXQs */ - mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); + mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0); /* Set Tx descriptors queue starting address and size */ cpu = get_cpu(); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 392fd895f27826..ae2240074d8ef8 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -1905,8 +1905,7 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv) } /* Find tcam entry with matched pair */ -static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid, - u16 mask) +static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask) { unsigned char byte[2], enable[2]; struct mvpp2_prs_entry pe; @@ -1914,13 +1913,13 @@ static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid, int tid; /* Go through the all entries with MVPP2_PRS_LU_VID */ - for (tid = MVPP2_PE_VID_FILT_RANGE_START; - tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) { - if (!priv->prs_shadow[tid].valid || - priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); + tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { + if (!port->priv->prs_shadow[tid].valid || + port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + mvpp2_prs_init_from_hw(port->priv, &pe, tid); mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); @@ -1950,7 +1949,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) memset(&pe, 0, sizeof(pe)); /* Scan TCAM and see if entry with this already exist */ - tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask); + tid = mvpp2_prs_vid_range_find(port, vid, mask); reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); if (reg_val & MVPP2_DSA_EXTENDED) @@ -2008,7 +2007,7 @@ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) int tid; /* Scan TCAM and see if entry with this already exist */ - tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff); + tid = mvpp2_prs_vid_range_find(port, vid, 0xfff); /* No such entry */ if (tid < 0) @@ -2026,8 +2025,10 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { - if (priv->prs_shadow[tid].valid) - mvpp2_prs_vid_entry_remove(port, tid); + if (priv->prs_shadow[tid].valid) { + mvpp2_prs_hw_inv(priv, tid); + priv->prs_shadow[tid].valid = false; + } } } diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index c7c4432de543b5..cb8a21ce17a4d7 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1784,6 +1784,7 @@ static void mtk_poll_controller(struct net_device *dev) static int mtk_start_dma(struct mtk_eth *eth) { + u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0; int err; err = mtk_dma_init(eth); @@ -1800,7 +1801,7 @@ static int mtk_start_dma(struct mtk_eth *eth) MTK_QDMA_GLO_CFG); mtk_w32(eth, - MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | + MTK_RX_DMA_EN | rx_2b_offset | MTK_RX_BT_32DWORDS | MTK_MULTI_EN, MTK_PDMA_GLO_CFG); @@ -2304,13 +2305,13 @@ static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, switch (cmd->cmd) { case ETHTOOL_GRXRINGS: - if (dev->features & NETIF_F_LRO) { + if (dev->hw_features & NETIF_F_LRO) { cmd->data = MTK_MAX_RX_RING_NUM; ret = 0; } break; case ETHTOOL_GRXCLSRLCNT: - if (dev->features & NETIF_F_LRO) { + if (dev->hw_features & NETIF_F_LRO) { struct mtk_mac *mac = netdev_priv(dev); cmd->rule_cnt = mac->hwlro_ip_cnt; @@ -2318,11 +2319,11 @@ static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, } break; case ETHTOOL_GRXCLSRULE: - if (dev->features & NETIF_F_LRO) + if (dev->hw_features & NETIF_F_LRO) ret = mtk_hwlro_get_fdir_entry(dev, cmd); break; case ETHTOOL_GRXCLSRLALL: - if (dev->features & NETIF_F_LRO) + if (dev->hw_features & NETIF_F_LRO) ret = mtk_hwlro_get_fdir_all(dev, cmd, rule_locs); break; @@ -2339,11 +2340,11 @@ static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) switch (cmd->cmd) { case ETHTOOL_SRXCLSRLINS: - if (dev->features & NETIF_F_LRO) + if (dev->hw_features & NETIF_F_LRO) ret = mtk_hwlro_add_ipaddr(dev, cmd); break; case ETHTOOL_SRXCLSRLDEL: - if (dev->features & NETIF_F_LRO) + if (dev->hw_features & NETIF_F_LRO) ret = mtk_hwlro_del_ipaddr(dev, cmd); break; default: diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index d290f0787dfbb2..94c59939a8cff3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -2010,6 +2010,8 @@ static int mlx4_en_set_tunable(struct net_device *dev, return ret; } +#define MLX4_EEPROM_PAGE_LEN 256 + static int mlx4_en_get_module_info(struct net_device *dev, struct ethtool_modinfo *modinfo) { @@ -2044,7 +2046,7 @@ static int mlx4_en_get_module_info(struct net_device *dev, break; case MLX4_MODULE_ID_SFP: modinfo->type = ETH_MODULE_SFF_8472; - modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + modinfo->eeprom_len = MLX4_EEPROM_PAGE_LEN; break; default: return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index ffed2d4c940302..9c481823b3e86f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -1492,7 +1492,7 @@ int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, rule.port = port; rule.qpn = qpn; INIT_LIST_HEAD(&rule.list); - mlx4_err(dev, "going promisc on %x\n", port); + mlx4_info(dev, "going promisc on %x\n", port); return mlx4_flow_attach(dev, &rule, regid_p); } diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 10fcc22f45908b..ba6ac31a339dc3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -2077,11 +2077,6 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, size -= offset + size - I2C_PAGE_SIZE; i2c_addr = I2C_ADDR_LOW; - if (offset >= I2C_PAGE_SIZE) { - /* Reset offset to high page */ - i2c_addr = I2C_ADDR_HIGH; - offset -= I2C_PAGE_SIZE; - } cable_info = (struct mlx4_cable_info *)inmad->data; cable_info->dev_mem_address = cpu_to_be16(offset); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 37a551436e4ab7..b7e3b8902e7e54 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -8,6 +8,7 @@ config MLX5_CORE depends on PCI imply PTP_1588_CLOCK imply VXLAN + imply MLXFW default n ---help--- Core driver for low level functionality of the ConnectX-4 and diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 37ba7c78859db1..1c225be9c7db99 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -342,11 +342,32 @@ void mlx5_unregister_interface(struct mlx5_interface *intf) } EXPORT_SYMBOL(mlx5_unregister_interface); +/* Must be called with intf_mutex held */ +static bool mlx5_has_added_dev_by_protocol(struct mlx5_core_dev *mdev, int protocol) +{ + struct mlx5_device_context *dev_ctx; + struct mlx5_interface *intf; + bool found = false; + + list_for_each_entry(intf, &intf_list, list) { + if (intf->protocol == protocol) { + dev_ctx = mlx5_get_device(intf, &mdev->priv); + if (dev_ctx && test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) + found = true; + break; + } + } + + return found; +} + void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol) { mutex_lock(&mlx5_intf_mutex); - mlx5_remove_dev_by_protocol(mdev, protocol); - mlx5_add_dev_by_protocol(mdev, protocol); + if (mlx5_has_added_dev_by_protocol(mdev, protocol)) { + mlx5_remove_dev_by_protocol(mdev, protocol); + mlx5_add_dev_by_protocol(mdev, protocol); + } mutex_unlock(&mlx5_intf_mutex); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 9ca4f88d7cf6b4..792bb8bc0cd34b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1609,6 +1609,22 @@ static int mlx5e_flash_device(struct net_device *dev, return mlx5e_ethtool_flash_device(priv, flash); } +#ifndef CONFIG_MLX5_EN_RXNFC +/* When CONFIG_MLX5_EN_RXNFC=n we only support ETHTOOL_GRXRINGS + * otherwise this function will be defined from en_fs_ethtool.c + */ +static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + if (info->cmd != ETHTOOL_GRXRINGS) + return -EOPNOTSUPP; + /* ring_count is needed by ethtool -x */ + info->data = priv->channels.params.num_channels; + return 0; +} +#endif + const struct ethtool_ops mlx5e_ethtool_ops = { .get_drvinfo = mlx5e_get_drvinfo, .get_link = ethtool_op_get_link, @@ -1627,8 +1643,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size, .get_rxfh = mlx5e_get_rxfh, .set_rxfh = mlx5e_set_rxfh, -#ifdef CONFIG_MLX5_EN_RXNFC .get_rxnfc = mlx5e_get_rxnfc, +#ifdef CONFIG_MLX5_EN_RXNFC .set_rxnfc = mlx5e_set_rxnfc, #endif .flash_device = mlx5e_flash_device, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b190c447aeb067..0f1c296c3ce4ec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3734,6 +3734,12 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n"); } + if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { + features &= ~NETIF_F_RXHASH; + if (netdev->features & NETIF_F_RXHASH) + netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n"); + } + mutex_unlock(&priv->state_lock); return features; @@ -3860,6 +3866,9 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) memcpy(&priv->tstamp, &config, sizeof(config)); mutex_unlock(&priv->state_lock); + /* might need to fix some features */ + netdev_update_features(priv->netdev); + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } @@ -4702,6 +4711,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) if (!priv->channels.params.scatter_fcs_en) netdev->features &= ~NETIF_F_RXFCS; + /* prefere CQE compression over rxhash */ + if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS)) + netdev->features &= ~NETIF_F_RXHASH; + #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f) if (FT_CAP(flow_modify_en) && FT_CAP(modify_root) && diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index d181645fd968ee..c079f85593d608 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -2220,7 +2220,7 @@ static struct mlx5_flow_root_namespace cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type); /* Create the root namespace */ - root_ns = kvzalloc(sizeof(*root_ns), GFP_KERNEL); + root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL); if (!root_ns) return NULL; @@ -2363,6 +2363,7 @@ static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev) cleanup_root_ns(steering->esw_egress_root_ns[i]); kfree(steering->esw_egress_root_ns); + steering->esw_egress_root_ns = NULL; } static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev) @@ -2377,6 +2378,7 @@ static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev) cleanup_root_ns(steering->esw_ingress_root_ns[i]); kfree(steering->esw_ingress_root_ns); + steering->esw_ingress_root_ns = NULL; } void mlx5_cleanup_fs(struct mlx5_core_dev *dev) @@ -2505,6 +2507,7 @@ static int init_egress_acls_root_ns(struct mlx5_core_dev *dev) for (i--; i >= 0; i--) cleanup_root_ns(steering->esw_egress_root_ns[i]); kfree(steering->esw_egress_root_ns); + steering->esw_egress_root_ns = NULL; return err; } @@ -2532,6 +2535,7 @@ static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev) for (i--; i >= 0; i--) cleanup_root_ns(steering->esw_ingress_root_ns[i]); kfree(steering->esw_ingress_root_ns); + steering->esw_ingress_root_ns = NULL; return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index c5b82e283d1349..ff2f6b8e2fabe5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2488,6 +2488,10 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev, mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); autoneg = cmd->base.autoneg == AUTONEG_ENABLE; + if (!autoneg && cmd->base.speed == SPEED_56000) { + netdev_err(dev, "56G not supported with autoneg off\n"); + return -EINVAL; + } eth_proto_new = autoneg ? mlxsw_sp_to_ptys_advert_link(cmd) : mlxsw_sp_to_ptys_speed(cmd->base.speed); diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 382bb93cb0900f..ff5c74120c123b 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -194,6 +194,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb) return; } + rcu_read_lock(); for (i = 0; i < count; i++) { ipv4_addr = payload->tun_info[i].ipv4; port = be32_to_cpu(payload->tun_info[i].egress_port); @@ -209,6 +210,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb) neigh_event_send(n, NULL); neigh_release(n); } + rcu_read_unlock(); } static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev) @@ -404,9 +406,10 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb) payload = nfp_flower_cmsg_get_data(skb); + rcu_read_lock(); netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port)); if (!netdev) - goto route_fail_warning; + goto fail_rcu_unlock; flow.daddr = payload->ipv4_addr; flow.flowi4_proto = IPPROTO_UDP; @@ -416,21 +419,23 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb) rt = ip_route_output_key(dev_net(netdev), &flow); err = PTR_ERR_OR_ZERO(rt); if (err) - goto route_fail_warning; + goto fail_rcu_unlock; #else - goto route_fail_warning; + goto fail_rcu_unlock; #endif /* Get the neighbour entry for the lookup */ n = dst_neigh_lookup(&rt->dst, &flow.daddr); ip_rt_put(rt); if (!n) - goto route_fail_warning; - nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL); + goto fail_rcu_unlock; + nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC); neigh_release(n); + rcu_read_unlock(); return; -route_fail_warning: +fail_rcu_unlock: + rcu_read_unlock(); nfp_flower_cmsg_warn(app, "Requested route not found.\n"); } diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index f27a0dc8c56331..5e3e6e262ba374 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1588,6 +1588,10 @@ static void sh_eth_dev_exit(struct net_device *ndev) sh_eth_get_stats(ndev); mdp->cd->soft_reset(ndev); + /* Set the RMII mode again if required */ + if (mdp->cd->rmiimode) + sh_eth_write(ndev, 0x1, RMIIMODE); + /* Set MAC address again */ update_mac_address(ndev); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 96014bde429f1a..53c51c2e8d9a22 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2198,6 +2198,10 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) if (priv->plat->axi) stmmac_axi(priv, priv->ioaddr, priv->plat->axi); + /* DMA CSR Channel configuration */ + for (chan = 0; chan < dma_csr_ch; chan++) + stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); + /* DMA RX Channel Configuration */ for (chan = 0; chan < rx_channels_count; chan++) { rx_q = &priv->rx_queue[chan]; @@ -2223,10 +2227,6 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) tx_q->tx_tail_addr, chan); } - /* DMA CSR Channel configuration */ - for (chan = 0; chan < dma_csr_ch; chan++) - stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); - return ret; } @@ -3322,6 +3322,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); } rx_q->dirty_rx = entry; + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); } /** diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index bdd351597b5525..093a223fe40882 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -267,7 +267,8 @@ int stmmac_mdio_reset(struct mii_bus *bus) of_property_read_u32_array(np, "snps,reset-delays-us", data->delays, 3); - if (gpio_request(data->reset_gpio, "mdio-reset")) + if (devm_gpio_request(priv->device, data->reset_gpio, + "mdio-reset")) return 0; } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 832bce07c38519..1afed85550c0a8 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2978,7 +2978,7 @@ static void cpsw_get_ringparam(struct net_device *ndev, struct cpsw_common *cpsw = priv->cpsw; /* not supported */ - ering->tx_max_pending = 0; + ering->tx_max_pending = descs_pool_size - CPSW_MAX_QUEUES; ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma); ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES; ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma); diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index fb12b63439c67f..35413041dcf817 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -872,12 +872,6 @@ static inline int netvsc_send_pkt( } else if (ret == -EAGAIN) { netif_tx_stop_queue(txq); ndev_ctx->eth_stats.stop_queue++; - if (atomic_read(&nvchan->queue_sends) < 1 && - !net_device->tx_disable) { - netif_tx_wake_queue(txq); - ndev_ctx->eth_stats.wake_queue++; - ret = -ENOSPC; - } } else { netdev_err(ndev, "Unable to send packet pages %u len %u, ret %d\n", @@ -885,6 +879,15 @@ static inline int netvsc_send_pkt( ret); } + if (netif_tx_queue_stopped(txq) && + atomic_read(&nvchan->queue_sends) < 1 && + !net_device->tx_disable) { + netif_tx_wake_queue(txq); + ndev_ctx->eth_stats.wake_queue++; + if (ret == -EAGAIN) + ret = -ENOSPC; + } + return ret; } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 9d699bd5f7151a..cf6b9b1771f12c 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2405,7 +2405,7 @@ static struct hv_driver netvsc_drv = { .probe = netvsc_probe, .remove = netvsc_remove, .driver = { - .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .probe_type = PROBE_FORCE_SYNCHRONOUS, }, }; diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index 04891429a55423..fe4057fca83d86 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c @@ -539,6 +539,8 @@ mcr20a_start(struct ieee802154_hw *hw) dev_dbg(printdev(lp), "no slotted operation\n"); ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1, DAR_PHY_CTRL1_SLOTTED, 0x0); + if (ret < 0) + return ret; /* enable irq */ enable_irq(lp->spi->irq); @@ -546,11 +548,15 @@ mcr20a_start(struct ieee802154_hw *hw) /* Unmask SEQ interrupt */ ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2, DAR_PHY_CTRL2_SEQMSK, 0x0); + if (ret < 0) + return ret; /* Start the RX sequence */ dev_dbg(printdev(lp), "start the RX sequence\n"); ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1, DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX); + if (ret < 0) + return ret; return 0; } diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 68b8007da82bd2..0115a2868933c6 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -178,7 +178,7 @@ static void ipvlan_port_destroy(struct net_device *dev) } #define IPVLAN_FEATURES \ - (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ + (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index b3935778b19fee..e4bf9e7d758384 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -260,10 +260,8 @@ static int dp83867_config_init(struct phy_device *phydev) ret = phy_write(phydev, MII_DP83867_PHYCTRL, val); if (ret) return ret; - } - if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) && - (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) { + /* Set up RGMII delays */ val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL); if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index f77a2d9e7f9d85..456a1f882b0978 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -27,6 +27,9 @@ #include enum { + MV_PMA_BOOT = 0xc050, + MV_PMA_BOOT_FATAL = BIT(0), + MV_PCS_BASE_T = 0x0000, MV_PCS_BASE_R = 0x1000, MV_PCS_1000BASEX = 0x2000, @@ -226,6 +229,16 @@ static int mv3310_probe(struct phy_device *phydev) (phydev->c45_ids.devices_in_package & mmd_mask) != mmd_mask) return -ENODEV; + ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MV_PMA_BOOT); + if (ret < 0) + return ret; + + if (ret & MV_PMA_BOOT_FATAL) { + dev_warn(&phydev->mdio.dev, + "PHY failed to boot firmware, status=%04x\n", ret); + return -ENODEV; + } + priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index f6e70f2dfd12b9..e029c7977a562d 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -54,6 +54,10 @@ struct phylink { /* The link configuration settings */ struct phylink_link_state link_config; + + /* The current settings */ + phy_interface_t cur_interface; + struct gpio_desc *link_gpio; struct timer_list link_poll; void (*get_fixed_state)(struct net_device *dev, @@ -477,12 +481,12 @@ static void phylink_resolve(struct work_struct *w) if (!link_state.link) { netif_carrier_off(ndev); pl->ops->mac_link_down(ndev, pl->link_an_mode, - pl->phy_state.interface); + pl->cur_interface); netdev_info(ndev, "Link is Down\n"); } else { + pl->cur_interface = link_state.interface; pl->ops->mac_link_up(ndev, pl->link_an_mode, - pl->phy_state.interface, - pl->phydev); + pl->cur_interface, pl->phydev); netif_carrier_on(ndev); diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 68c8fbf099f87c..8807a806cc47f3 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -280,6 +280,7 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf, { struct i2c_msg msgs[2]; u8 bus_addr = a2 ? 0x51 : 0x50; + size_t this_len; int ret; msgs[0].addr = bus_addr; @@ -291,11 +292,26 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf, msgs[1].len = len; msgs[1].buf = buf; - ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs)); - if (ret < 0) - return ret; + while (len) { + this_len = len; + if (this_len > 16) + this_len = 16; - return ret == ARRAY_SIZE(msgs) ? len : 0; + msgs[1].len = this_len; + + ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs)); + if (ret < 0) + return ret; + + if (ret != ARRAY_SIZE(msgs)) + break; + + msgs[1].buf += this_len; + dev_addr += this_len; + len -= this_len; + } + + return msgs[1].buf - (u8 *)buf; } static int sfp_i2c_write(struct sfp *sfp, bool a2, u8 dev_addr, void *buf, diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c index b5edc7f96a392d..685e875f51643a 100644 --- a/drivers/net/ppp/ppp_deflate.c +++ b/drivers/net/ppp/ppp_deflate.c @@ -610,12 +610,20 @@ static struct compressor ppp_deflate_draft = { static int __init deflate_init(void) { - int answer = ppp_register_compressor(&ppp_deflate); - if (answer == 0) - printk(KERN_INFO - "PPP Deflate Compression module registered\n"); - ppp_register_compressor(&ppp_deflate_draft); - return answer; + int rc; + + rc = ppp_register_compressor(&ppp_deflate); + if (rc) + return rc; + + rc = ppp_register_compressor(&ppp_deflate_draft); + if (rc) { + ppp_unregister_compressor(&ppp_deflate); + return rc; + } + + pr_info("PPP Deflate Compression module registered\n"); + return 0; } static void __exit deflate_cleanup(void) diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index e0d6760f321951..4b5af24139703a 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -1285,6 +1285,7 @@ static int __maybe_unused tbnet_suspend(struct device *dev) tbnet_tear_down(net, true); } + tb_unregister_protocol_handler(&net->handler); return 0; } @@ -1293,6 +1294,8 @@ static int __maybe_unused tbnet_resume(struct device *dev) struct tb_service *svc = tb_to_service(dev); struct tbnet *net = tb_service_get_drvdata(svc); + tb_register_protocol_handler(&net->handler); + netif_carrier_off(net->dev); if (netif_running(net->dev)) { netif_device_attach(net->dev); diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 3d8a70d3ea9bd6..3d71f17163902a 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -437,17 +437,18 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net) dev); dev->tx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + netif_stop_queue(net); retval = usb_submit_urb(dev->tx_urb, GFP_ATOMIC); if (retval) { dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n", __func__, retval); dev->net->stats.tx_errors++; dev_kfree_skb_any(skb); + netif_wake_queue(net); } else { dev->net->stats.tx_packets++; dev->net->stats.tx_bytes += skb->len; dev_consume_skb_any(skb); - netif_stop_queue(net); } return NETDEV_TX_OK; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 9195f3476b1d79..d9a6699abe592d 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -63,6 +63,7 @@ enum qmi_wwan_flags { enum qmi_wwan_quirks { QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */ + QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1, /* check num. endpoints */ }; struct qmimux_hdr { @@ -845,6 +846,16 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = { .data = QMI_WWAN_QUIRK_DTR, }; +static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = { + .description = "WWAN/QMI device", + .flags = FLAG_WWAN | FLAG_SEND_ZLP, + .bind = qmi_wwan_bind, + .unbind = qmi_wwan_unbind, + .manage_power = qmi_wwan_manage_power, + .rx_fixup = qmi_wwan_rx_fixup, + .data = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG, +}; + #define HUAWEI_VENDOR_ID 0x12D1 /* map QMI/wwan function by a fixed interface number */ @@ -865,6 +876,15 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = { #define QMI_GOBI_DEVICE(vend, prod) \ QMI_FIXED_INTF(vend, prod, 0) +/* Quectel does not use fixed interface numbers on at least some of their + * devices. We need to check the number of endpoints to ensure that we bind to + * the correct interface. + */ +#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \ + USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \ + USB_SUBCLASS_VENDOR_SPEC, 0xff), \ + .driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg + static const struct usb_device_id products[] = { /* 1. CDC ECM like devices match on the control interface */ { /* Huawei E392, E398 and possibly others sharing both device id and more... */ @@ -969,20 +989,9 @@ static const struct usb_device_id products[] = { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), .driver_info = (unsigned long)&qmi_wwan_info, }, - { /* Quectel EP06/EG06/EM06 */ - USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306, - USB_CLASS_VENDOR_SPEC, - USB_SUBCLASS_VENDOR_SPEC, - 0xff), - .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr, - }, - { /* Quectel EG12/EM12 */ - USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0512, - USB_CLASS_VENDOR_SPEC, - USB_SUBCLASS_VENDOR_SPEC, - 0xff), - .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr, - }, + {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ + {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */ + {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */ /* 3. Combined interface devices matching on interface number */ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ @@ -1122,9 +1131,16 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */ {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ + {QMI_FIXED_INTF(0x1435, 0x0918, 3)}, /* Wistron NeWeb D16Q1 */ + {QMI_FIXED_INTF(0x1435, 0x0918, 4)}, /* Wistron NeWeb D16Q1 */ + {QMI_FIXED_INTF(0x1435, 0x0918, 5)}, /* Wistron NeWeb D16Q1 */ + {QMI_FIXED_INTF(0x1435, 0x3185, 4)}, /* Wistron NeWeb M18Q5 */ + {QMI_FIXED_INTF(0x1435, 0xd111, 4)}, /* M9615A DM11-1 D51QC */ {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */ {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */ {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */ + {QMI_FIXED_INTF(0x1435, 0xd182, 4)}, /* Wistron NeWeb D18 */ + {QMI_FIXED_INTF(0x1435, 0xd182, 5)}, /* Wistron NeWeb D18 */ {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */ {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ @@ -1180,6 +1196,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */ {QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */ {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */ + {QMI_FIXED_INTF(0x19d2, 0x0396, 3)}, /* ZTE ZM8620 */ {QMI_FIXED_INTF(0x19d2, 0x0412, 4)}, /* Telewell TW-LTE 4G */ {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */ {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */ @@ -1200,7 +1217,9 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x1425, 2)}, {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ + {QMI_FIXED_INTF(0x19d2, 0x1432, 3)}, /* ZTE ME3620 */ {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ + {QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */ @@ -1240,6 +1259,8 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */ {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */ @@ -1271,7 +1292,6 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ - {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ @@ -1351,27 +1371,12 @@ static bool quectel_ec20_detected(struct usb_interface *intf) return false; } -static bool quectel_diag_detected(struct usb_interface *intf) -{ - struct usb_device *dev = interface_to_usbdev(intf); - struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc; - u16 id_vendor = le16_to_cpu(dev->descriptor.idVendor); - u16 id_product = le16_to_cpu(dev->descriptor.idProduct); - - if (id_vendor != 0x2c7c || intf_desc.bNumEndpoints != 2) - return false; - - if (id_product == 0x0306 || id_product == 0x0512) - return true; - else - return false; -} - static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id *prod) { struct usb_device_id *id = (struct usb_device_id *)prod; struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc; + const struct driver_info *info; /* Workaround to enable dynamic IDs. This disables usbnet * blacklisting functionality. Which, if required, can be @@ -1405,10 +1410,14 @@ static int qmi_wwan_probe(struct usb_interface *intf, * we need to match on class/subclass/protocol. These values are * identical for the diagnostic- and QMI-interface, but bNumEndpoints is * different. Ignore the current interface if the number of endpoints - * the number for the diag interface (two). + * equals the number for the diag interface (two). */ - if (quectel_diag_detected(intf)) - return -ENODEV; + info = (void *)&id->driver_info; + + if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) { + if (desc->bNumEndpoints == 2) + return -ENODEV; + } return usbnet_probe(intf, id); } diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 770aa624147f17..10854977c55f1a 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -506,6 +506,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) if (netif_running (dev->net) && netif_device_present (dev->net) && + test_bit(EVENT_DEV_OPEN, &dev->flags) && !test_bit (EVENT_RX_HALT, &dev->flags) && !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { @@ -1431,6 +1432,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, spin_unlock_irqrestore(&dev->txq.lock, flags); goto drop; } + if (netif_queue_stopped(net)) { + usb_autopm_put_interface_async(dev->intf); + spin_unlock_irqrestore(&dev->txq.lock, flags); + goto drop; + } #ifdef CONFIG_PM /* if this triggers the device is still a sleep */ diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 2daf33342b23da..1fc2bf66845c77 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -1131,7 +1131,12 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, params->wait); out: + /* when the sent packet was not acked by receiver(ACK=0), rc will + * be -EAGAIN. In this case this function needs to return success, + * the ACK=0 will be reflected in tx_status. + */ tx_status = (rc == 0); + rc = (rc == -EAGAIN) ? 0 : rc; cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len, tx_status, GFP_KERNEL); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 42c02a20ec97ca..6e3b3031f29bd7 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -3107,8 +3107,9 @@ int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len) rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, vif->mid, cmd, total, WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000); if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) { - wil_err(wil, "mgmt_tx failed with status %d\n", evt.evt.status); - rc = -EINVAL; + wil_dbg_wmi(wil, "mgmt_tx failed with status %d\n", + evt.evt.status); + rc = -EAGAIN; } kfree(cmd); @@ -3160,9 +3161,9 @@ int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len, rc = wmi_call(wil, WMI_SW_TX_REQ_EXT_CMDID, vif->mid, cmd, total, WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000); if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) { - wil_err(wil, "mgmt_tx_ext failed with status %d\n", - evt.evt.status); - rc = -EINVAL; + wil_dbg_wmi(wil, "mgmt_tx_ext failed with status %d\n", + evt.evt.status); + rc = -EAGAIN; } kfree(cmd); diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c index e99e766a302851..1cabae424839eb 100644 --- a/drivers/net/wireless/atmel/at76c50x-usb.c +++ b/drivers/net/wireless/atmel/at76c50x-usb.c @@ -2585,8 +2585,8 @@ static int __init at76_mod_init(void) if (result < 0) printk(KERN_ERR DRIVER_NAME ": usb_register failed (status %d)\n", result); - - led_trigger_register_simple("at76_usb-tx", &ledtrig_tx); + else + led_trigger_register_simple("at76_usb-tx", &ledtrig_tx); return result; } diff --git a/drivers/net/wireless/broadcom/b43/phy_lp.c b/drivers/net/wireless/broadcom/b43/phy_lp.c index 6922cbb99a044e..5a0699fb4b9ab3 100644 --- a/drivers/net/wireless/broadcom/b43/phy_lp.c +++ b/drivers/net/wireless/broadcom/b43/phy_lp.c @@ -1834,7 +1834,7 @@ static void lpphy_papd_cal(struct b43_wldev *dev, struct lpphy_tx_gains gains, static void lpphy_papd_cal_txpwr(struct b43_wldev *dev) { struct b43_phy_lp *lpphy = dev->phy.lp; - struct lpphy_tx_gains gains, oldgains; + struct lpphy_tx_gains oldgains; int old_txpctl, old_afe_ovr, old_rf, old_bbmult; lpphy_read_tx_pctl_mode_from_hardware(dev); @@ -1848,9 +1848,9 @@ static void lpphy_papd_cal_txpwr(struct b43_wldev *dev) lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF); if (dev->dev->chip_id == 0x4325 && dev->dev->chip_rev == 0) - lpphy_papd_cal(dev, gains, 0, 1, 30); + lpphy_papd_cal(dev, oldgains, 0, 1, 30); else - lpphy_papd_cal(dev, gains, 0, 1, 65); + lpphy_papd_cal(dev, oldgains, 0, 1, 65); if (old_afe_ovr) lpphy_set_tx_gains(dev, oldgains); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c index 1068a2a4494c46..144e0b83b24b80 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c @@ -490,11 +490,18 @@ int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) return -ENOMEM; } -void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) +void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr) +{ + struct brcmf_bcdc *bcdc = drvr->proto->pd; + + brcmf_fws_detach_pre_delif(bcdc->fws); +} + +void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr) { struct brcmf_bcdc *bcdc = drvr->proto->pd; drvr->proto->pd = NULL; - brcmf_fws_detach(bcdc->fws); + brcmf_fws_detach_post_delif(bcdc->fws); kfree(bcdc); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h index 3b0e9eff21b582..4bc52240ccea2e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h @@ -18,14 +18,16 @@ #ifdef CONFIG_BRCMFMAC_PROTO_BCDC int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr); -void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr); +void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr); +void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr); void brcmf_proto_bcdc_txflowblock(struct device *dev, bool state); void brcmf_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp, bool success); struct brcmf_fws_info *drvr_to_fws(struct brcmf_pub *drvr); #else static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; } -static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {} +static void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr) {}; +static inline void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr) {} #endif #endif /* BRCMFMAC_BCDC_H */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 6f3faaf1b1cbba..c7c520f327f2b8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -3466,6 +3466,8 @@ brcmf_wowl_nd_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e, } netinfo = brcmf_get_netinfo_array(pfn_result); + if (netinfo->SSID_len > IEEE80211_MAX_SSID_LEN) + netinfo->SSID_len = IEEE80211_MAX_SSID_LEN; memcpy(cfg->wowl.nd->ssid.ssid, netinfo->SSID, netinfo->SSID_len); cfg->wowl.nd->ssid.ssid_len = netinfo->SSID_len; cfg->wowl.nd->n_channels = 1; @@ -5366,6 +5368,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg, conn_info->req_ie = kmemdup(cfg->extra_buf, conn_info->req_ie_len, GFP_KERNEL); + if (!conn_info->req_ie) + conn_info->req_ie_len = 0; } else { conn_info->req_ie_len = 0; conn_info->req_ie = NULL; @@ -5382,6 +5386,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg, conn_info->resp_ie = kmemdup(cfg->extra_buf, conn_info->resp_ie_len, GFP_KERNEL); + if (!conn_info->resp_ie) + conn_info->resp_ie_len = 0; } else { conn_info->resp_ie_len = 0; conn_info->resp_ie = NULL; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 860a4372cb564f..0f56be13c7adf1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -464,7 +464,8 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event) } else { /* Process special event packets */ if (handle_event) - brcmf_fweh_process_skb(ifp->drvr, skb); + brcmf_fweh_process_skb(ifp->drvr, skb, + BCMILCP_SUBTYPE_VENDOR_LONG); brcmf_netif_rx(ifp, skb); } @@ -481,7 +482,7 @@ void brcmf_rx_event(struct device *dev, struct sk_buff *skb) if (brcmf_rx_hdrpull(drvr, skb, &ifp)) return; - brcmf_fweh_process_skb(ifp->drvr, skb); + brcmf_fweh_process_skb(ifp->drvr, skb, 0); brcmu_pkt_buf_free_skb(skb); } @@ -783,17 +784,17 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx, bool rtnl_locked) { struct brcmf_if *ifp; + int ifidx; ifp = drvr->iflist[bsscfgidx]; - drvr->iflist[bsscfgidx] = NULL; if (!ifp) { brcmf_err("Null interface, bsscfgidx=%d\n", bsscfgidx); return; } brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", bsscfgidx, ifp->ifidx); - if (drvr->if2bss[ifp->ifidx] == bsscfgidx) - drvr->if2bss[ifp->ifidx] = BRCMF_BSSIDX_INVALID; + ifidx = ifp->ifidx; + if (ifp->ndev) { if (bsscfgidx == 0) { if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) { @@ -821,6 +822,10 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx, brcmf_p2p_ifp_removed(ifp, rtnl_locked); kfree(ifp); } + + drvr->iflist[bsscfgidx] = NULL; + if (drvr->if2bss[ifidx] == bsscfgidx) + drvr->if2bss[ifidx] = BRCMF_BSSIDX_INVALID; } void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked) @@ -1239,6 +1244,8 @@ void brcmf_detach(struct device *dev) brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN); + brcmf_proto_detach_pre_delif(drvr); + /* make sure primary interface removed last */ for (i = BRCMF_MAX_IFS-1; i > -1; i--) brcmf_remove_interface(drvr->iflist[i], false); @@ -1248,7 +1255,7 @@ void brcmf_detach(struct device *dev) brcmf_bus_stop(drvr->bus_if); - brcmf_proto_detach(drvr); + brcmf_proto_detach_post_delif(drvr); bus_if->drvr = NULL; wiphy_free(drvr->wiphy); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h index 816f80ea925b11..ebd66fe0d949c9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h @@ -211,7 +211,7 @@ enum brcmf_fweh_event_code { */ #define BRCM_OUI "\x00\x10\x18" #define BCMILCP_BCM_SUBTYPE_EVENT 1 - +#define BCMILCP_SUBTYPE_VENDOR_LONG 32769 /** * struct brcm_ethhdr - broadcom specific ether header. @@ -334,10 +334,10 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr, void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing); static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr, - struct sk_buff *skb) + struct sk_buff *skb, u16 stype) { struct brcmf_event *event_packet; - u16 usr_stype; + u16 subtype, usr_stype; /* only process events when protocol matches */ if (skb->protocol != cpu_to_be16(ETH_P_LINK_CTL)) @@ -346,8 +346,16 @@ static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr, if ((skb->len + ETH_HLEN) < sizeof(*event_packet)) return; - /* check for BRCM oui match */ event_packet = (struct brcmf_event *)skb_mac_header(skb); + + /* check subtype if needed */ + if (unlikely(stype)) { + subtype = get_unaligned_be16(&event_packet->hdr.subtype); + if (subtype != stype) + return; + } + + /* check for BRCM oui match */ if (memcmp(BRCM_OUI, &event_packet->hdr.oui[0], sizeof(event_packet->hdr.oui))) return; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index f3cbf78c8899ca..1de8497d92b8a9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -579,24 +579,6 @@ static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg) return ifidx == *(int *)arg; } -static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q, - int ifidx) -{ - bool (*matchfn)(struct sk_buff *, void *) = NULL; - struct sk_buff *skb; - int prec; - - if (ifidx != -1) - matchfn = brcmf_fws_ifidx_match; - for (prec = 0; prec < q->num_prec; prec++) { - skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); - while (skb) { - brcmu_pkt_buf_free_skb(skb); - skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); - } - } -} - static void brcmf_fws_hanger_init(struct brcmf_fws_hanger *hanger) { int i; @@ -668,6 +650,28 @@ static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h, return 0; } +static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q, + int ifidx) +{ + bool (*matchfn)(struct sk_buff *, void *) = NULL; + struct sk_buff *skb; + int prec; + u32 hslot; + + if (ifidx != -1) + matchfn = brcmf_fws_ifidx_match; + for (prec = 0; prec < q->num_prec; prec++) { + skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); + while (skb) { + hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); + brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, + true); + brcmu_pkt_buf_free_skb(skb); + skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); + } + } +} + static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h, u32 slot_id) { @@ -2168,6 +2172,8 @@ void brcmf_fws_del_interface(struct brcmf_if *ifp) brcmf_fws_lock(fws); ifp->fws_desc = NULL; brcmf_dbg(TRACE, "deleting %s\n", entry->name); + brcmf_fws_macdesc_cleanup(fws, &fws->desc.iface[ifp->ifidx], + ifp->ifidx); brcmf_fws_macdesc_deinit(entry); brcmf_fws_cleanup(fws, ifp->ifidx); brcmf_fws_unlock(fws); @@ -2404,17 +2410,25 @@ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr) return fws; fail: - brcmf_fws_detach(fws); + brcmf_fws_detach_pre_delif(fws); + brcmf_fws_detach_post_delif(fws); return ERR_PTR(rc); } -void brcmf_fws_detach(struct brcmf_fws_info *fws) +void brcmf_fws_detach_pre_delif(struct brcmf_fws_info *fws) { if (!fws) return; - - if (fws->fws_wq) + if (fws->fws_wq) { destroy_workqueue(fws->fws_wq); + fws->fws_wq = NULL; + } +} + +void brcmf_fws_detach_post_delif(struct brcmf_fws_info *fws) +{ + if (!fws) + return; /* cleanup */ brcmf_fws_lock(fws); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h index 4e6835766d5d8f..749c06dcdc17dd 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h @@ -19,7 +19,8 @@ #define FWSIGNAL_H_ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr); -void brcmf_fws_detach(struct brcmf_fws_info *fws); +void brcmf_fws_detach_pre_delif(struct brcmf_fws_info *fws); +void brcmf_fws_detach_post_delif(struct brcmf_fws_info *fws); void brcmf_fws_debugfs_create(struct brcmf_pub *drvr); bool brcmf_fws_queue_skbs(struct brcmf_fws_info *fws); bool brcmf_fws_fc_active(struct brcmf_fws_info *fws); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index 4e8397a0cbc8e8..ee922b0525610e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c @@ -1116,7 +1116,7 @@ static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf) skb->protocol = eth_type_trans(skb, ifp->ndev); - brcmf_fweh_process_skb(ifp->drvr, skb); + brcmf_fweh_process_skb(ifp->drvr, skb, 0); exit: brcmu_pkt_buf_free_skb(skb); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c index c5ff551ec659e9..74e6fdbd3a2b8d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c @@ -67,16 +67,22 @@ int brcmf_proto_attach(struct brcmf_pub *drvr) return -ENOMEM; } -void brcmf_proto_detach(struct brcmf_pub *drvr) +void brcmf_proto_detach_post_delif(struct brcmf_pub *drvr) { brcmf_dbg(TRACE, "Enter\n"); if (drvr->proto) { if (drvr->bus_if->proto_type == BRCMF_PROTO_BCDC) - brcmf_proto_bcdc_detach(drvr); + brcmf_proto_bcdc_detach_post_delif(drvr); else if (drvr->bus_if->proto_type == BRCMF_PROTO_MSGBUF) brcmf_proto_msgbuf_detach(drvr); kfree(drvr->proto); drvr->proto = NULL; } } + +void brcmf_proto_detach_pre_delif(struct brcmf_pub *drvr) +{ + if (drvr->proto && drvr->bus_if->proto_type == BRCMF_PROTO_BCDC) + brcmf_proto_bcdc_detach_pre_delif(drvr); +} diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h index d3c3b9a815ad7f..72355aea902879 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h @@ -54,7 +54,8 @@ struct brcmf_proto { int brcmf_proto_attach(struct brcmf_pub *drvr); -void brcmf_proto_detach(struct brcmf_pub *drvr); +void brcmf_proto_detach_pre_delif(struct brcmf_pub *drvr); +void brcmf_proto_detach_post_delif(struct brcmf_pub *drvr); static inline int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, struct sk_buff *skb, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index a907d7b065fa8e..53e4962ceb8ae8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -667,6 +667,12 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on) brcmf_dbg(TRACE, "Enter: on=%d\n", on); + sdio_retune_crc_disable(bus->sdiodev->func1); + + /* Cannot re-tune if device is asleep; defer till we're awake */ + if (on) + sdio_retune_hold_now(bus->sdiodev->func1); + wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT); /* 1st KSO write goes to AOS wake up core if device is asleep */ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err); @@ -719,6 +725,11 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on) if (try_cnt > MAX_KSO_ATTEMPTS) brcmf_err("max tries: rd_val=0x%x err=%d\n", rd_val, err); + if (on) + sdio_retune_release(bus->sdiodev->func1); + + sdio_retune_crc_enable(bus->sdiodev->func1); + return err; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index a4308c6e72d795..44ead0fea7c61d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -160,7 +160,7 @@ struct brcmf_usbdev_info { struct usb_device *usbdev; struct device *dev; - struct mutex dev_init_lock; + struct completion dev_init_done; int ctl_in_pipe, ctl_out_pipe; struct urb *ctl_urb; /* URB for control endpoint */ @@ -684,12 +684,18 @@ static int brcmf_usb_up(struct device *dev) static void brcmf_cancel_all_urbs(struct brcmf_usbdev_info *devinfo) { + int i; + if (devinfo->ctl_urb) usb_kill_urb(devinfo->ctl_urb); if (devinfo->bulk_urb) usb_kill_urb(devinfo->bulk_urb); - brcmf_usb_free_q(&devinfo->tx_postq, true); - brcmf_usb_free_q(&devinfo->rx_postq, true); + if (devinfo->tx_reqs) + for (i = 0; i < devinfo->bus_pub.ntxq; i++) + usb_kill_urb(devinfo->tx_reqs[i].urb); + if (devinfo->rx_reqs) + for (i = 0; i < devinfo->bus_pub.nrxq; i++) + usb_kill_urb(devinfo->rx_reqs[i].urb); } static void brcmf_usb_down(struct device *dev) @@ -1195,11 +1201,11 @@ static void brcmf_usb_probe_phase2(struct device *dev, int ret, if (ret) goto error; - mutex_unlock(&devinfo->dev_init_lock); + complete(&devinfo->dev_init_done); return; error: brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret); - mutex_unlock(&devinfo->dev_init_lock); + complete(&devinfo->dev_init_done); device_release_driver(dev); } @@ -1267,7 +1273,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo) if (ret) goto fail; /* we are done */ - mutex_unlock(&devinfo->dev_init_lock); + complete(&devinfo->dev_init_done); return 0; } bus->chip = bus_pub->devid; @@ -1327,11 +1333,10 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) devinfo->usbdev = usb; devinfo->dev = &usb->dev; - /* Take an init lock, to protect for disconnect while still loading. + /* Init completion, to protect for disconnect while still loading. * Necessary because of the asynchronous firmware load construction */ - mutex_init(&devinfo->dev_init_lock); - mutex_lock(&devinfo->dev_init_lock); + init_completion(&devinfo->dev_init_done); usb_set_intfdata(intf, devinfo); @@ -1409,7 +1414,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) return 0; fail: - mutex_unlock(&devinfo->dev_init_lock); + complete(&devinfo->dev_init_done); kfree(devinfo); usb_set_intfdata(intf, NULL); return ret; @@ -1424,7 +1429,7 @@ brcmf_usb_disconnect(struct usb_interface *intf) devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf); if (devinfo) { - mutex_lock(&devinfo->dev_init_lock); + wait_for_completion(&devinfo->dev_init_done); /* Make sure that devinfo still exists. Firmware probe routines * may have released the device and cleared the intfdata. */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c index 8eff2753abadeb..d493021f603185 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c @@ -35,9 +35,10 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy, struct brcmf_if *ifp; const struct brcmf_vndr_dcmd_hdr *cmdhdr = data; struct sk_buff *reply; - int ret, payload, ret_len; + unsigned int payload, ret_len; void *dcmd_buf = NULL, *wr_pointer; u16 msglen, maxmsglen = PAGE_SIZE - 0x100; + int ret; if (len < sizeof(*cmdhdr)) { brcmf_err("vendor command too short: %d\n", len); @@ -65,7 +66,7 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy, brcmf_err("oversize return buffer %d\n", ret_len); ret_len = BRCMF_DCMD_MAXLEN; } - payload = max(ret_len, len) + 1; + payload = max_t(unsigned int, ret_len, len) + 1; dcmd_buf = vzalloc(payload); if (NULL == dcmd_buf) return -ENOMEM; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index b53148f972a4a4..036d1d82d93e7e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -143,9 +143,9 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, } /* iwl_mvm_create_skb Adds the rxb to a new skb */ -static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, - u16 len, u8 crypt_len, - struct iwl_rx_cmd_buffer *rxb) +static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_hdr *hdr, u16 len, u8 crypt_len, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; @@ -178,6 +178,20 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, * present before copying packet data. */ hdrlen += crypt_len; + + if (WARN_ONCE(headlen < hdrlen, + "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n", + hdrlen, len, crypt_len)) { + /* + * We warn and trace because we want to be able to see + * it in trace-cmd as well. + */ + IWL_DEBUG_RX(mvm, + "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n", + hdrlen, len, crypt_len); + return -EINVAL; + } + skb_put_data(skb, hdr, hdrlen); skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen); @@ -190,6 +204,8 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, fraglen, rxb->truesize); } + + return 0; } /* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */ @@ -1425,7 +1441,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->boottime_ns = ktime_get_boot_ns(); } - iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb); + if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) { + kfree_skb(skb); + goto out; + } + if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc)) iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta); out: diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index b2905f01b7df3a..6dcd5374d9b4d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1388,10 +1388,15 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq[queue]; + struct iwl_rxq *rxq; u32 r, i, count = 0; bool emergency = false; + if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd)) + return; + + rxq = &trans_pcie->rxq[queue]; + restart: spin_lock(&rxq->lock); /* uCode's read index (stored in shared DRAM) indicates the last Rx diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c index 27a49068d32d0a..57ad56435dda53 100644 --- a/drivers/net/wireless/intersil/p54/p54pci.c +++ b/drivers/net/wireless/intersil/p54/p54pci.c @@ -554,7 +554,7 @@ static int p54p_probe(struct pci_dev *pdev, err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable new PCI device\n"); - return err; + goto err_put; } mem_addr = pci_resource_start(pdev, 0); @@ -639,6 +639,7 @@ static int p54p_probe(struct pci_dev *pdev, pci_release_regions(pdev); err_disable_dev: pci_disable_device(pdev); +err_put: pci_dev_put(pdev); return err; } diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 2d87ebbfa4dab1..47ec5293c045df 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -4045,16 +4045,20 @@ static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, if (mwifiex_send_cmd(priv, 0, 0, 0, hostcmd, true)) { dev_err(priv->adapter->dev, "Failed to process hostcmd\n"); + kfree(hostcmd); return -EFAULT; } /* process hostcmd response*/ skb = cfg80211_testmode_alloc_reply_skb(wiphy, hostcmd->len); - if (!skb) + if (!skb) { + kfree(hostcmd); return -ENOMEM; + } err = nla_put(skb, MWIFIEX_TM_ATTR_DATA, hostcmd->len, hostcmd->cmd); if (err) { + kfree(hostcmd); kfree_skb(skb); return -EMSGSIZE; } diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c index bfe84e55df7762..f1522fb1c1e878 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfp.c +++ b/drivers/net/wireless/marvell/mwifiex/cfp.c @@ -531,5 +531,8 @@ u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv, rate_index = (rx_rate > MWIFIEX_RATE_INDEX_OFDM0) ? rx_rate - 1 : rx_rate; + if (rate_index >= MWIFIEX_MAX_AC_RX_RATES) + rate_index = MWIFIEX_MAX_AC_RX_RATES - 1; + return rate_index; } diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index ef9b502ce576b0..a3189294ecb80e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -469,6 +469,11 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw) /* <2> work queue */ rtlpriv->works.hw = hw; rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name); + if (unlikely(!rtlpriv->works.rtl_wq)) { + pr_err("Failed to allocate work queue\n"); + return; + } + INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq, (void *)rtl_watchdog_wq_callback); INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c index 63874512598bb7..b5f91c994c7989 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c @@ -622,6 +622,8 @@ void rtl88e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) u1rsvdpageloc, 3); skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; skb_put_data(skb, &reserved_page_packet, totalpacketlen); rtstatus = rtl_cmd_send_packet(hw, skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c index f3bff66e85d0c3..81ec0e6e07c1ff 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c @@ -646,6 +646,8 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; skb_put_data(skb, &reserved_page_packet, totalpacketlen); if (cmd_send_packet) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index 84a0d0eb72e1e4..a933490928ba91 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -766,6 +766,8 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) u1rsvdpageloc, 3); skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; skb_put_data(skb, &reserved_page_packet, totalpacketlen); rtstatus = rtl_cmd_send_packet(hw, skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c index bf9859f74b6f5e..52f108744e9693 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c @@ -470,6 +470,8 @@ void rtl8723e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) u1rsvdpageloc, 3); skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; skb_put_data(skb, &reserved_page_packet, totalpacketlen); rtstatus = rtl_cmd_send_packet(hw, skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c index f2441fbb92f1e7..307c2bd77f0600 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c @@ -584,6 +584,8 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, u1rsvdpageloc, sizeof(u1rsvdpageloc)); skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; skb_put_data(skb, &reserved_page_packet, totalpacketlen); rtstatus = rtl_cmd_send_packet(hw, skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c index d868a034659fb8..d7235f6165fdff 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c @@ -1645,6 +1645,8 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, &reserved_page_packet_8812[0], totalpacketlen); skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; skb_put_data(skb, &reserved_page_packet_8812, totalpacketlen); rtstatus = rtl_cmd_send_packet(hw, skb); @@ -1781,6 +1783,8 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, &reserved_page_packet_8821[0], totalpacketlen); skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; skb_put_data(skb, &reserved_page_packet_8821, totalpacketlen); rtstatus = rtl_cmd_send_packet(hw, skb); diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 4e510cbe0a89f0..be59d66585d6d2 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -188,27 +188,27 @@ bool rsi_is_cipher_wep(struct rsi_common *common) * @adapter: Pointer to the adapter structure. * @band: Operating band to be set. * - * Return: None. + * Return: int - 0 on success, negative error on failure. */ -static void rsi_register_rates_channels(struct rsi_hw *adapter, int band) +static int rsi_register_rates_channels(struct rsi_hw *adapter, int band) { struct ieee80211_supported_band *sbands = &adapter->sbands[band]; void *channels = NULL; if (band == NL80211_BAND_2GHZ) { - channels = kmalloc(sizeof(rsi_2ghz_channels), GFP_KERNEL); - memcpy(channels, - rsi_2ghz_channels, - sizeof(rsi_2ghz_channels)); + channels = kmemdup(rsi_2ghz_channels, sizeof(rsi_2ghz_channels), + GFP_KERNEL); + if (!channels) + return -ENOMEM; sbands->band = NL80211_BAND_2GHZ; sbands->n_channels = ARRAY_SIZE(rsi_2ghz_channels); sbands->bitrates = rsi_rates; sbands->n_bitrates = ARRAY_SIZE(rsi_rates); } else { - channels = kmalloc(sizeof(rsi_5ghz_channels), GFP_KERNEL); - memcpy(channels, - rsi_5ghz_channels, - sizeof(rsi_5ghz_channels)); + channels = kmemdup(rsi_5ghz_channels, sizeof(rsi_5ghz_channels), + GFP_KERNEL); + if (!channels) + return -ENOMEM; sbands->band = NL80211_BAND_5GHZ; sbands->n_channels = ARRAY_SIZE(rsi_5ghz_channels); sbands->bitrates = &rsi_rates[4]; @@ -227,6 +227,7 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band) sbands->ht_cap.mcs.rx_mask[0] = 0xff; sbands->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; /* sbands->ht_cap.mcs.rx_highest = 0x82; */ + return 0; } /** @@ -1985,11 +1986,16 @@ int rsi_mac80211_attach(struct rsi_common *common) wiphy->available_antennas_rx = 1; wiphy->available_antennas_tx = 1; - rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ); + status = rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ); + if (status) + return status; wiphy->bands[NL80211_BAND_2GHZ] = &adapter->sbands[NL80211_BAND_2GHZ]; if (common->num_supp_bands > 1) { - rsi_register_rates_channels(adapter, NL80211_BAND_5GHZ); + status = rsi_register_rates_channels(adapter, + NL80211_BAND_5GHZ); + if (status) + return status; wiphy->bands[NL80211_BAND_5GHZ] = &adapter->sbands[NL80211_BAND_5GHZ]; } diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c index 90dc979f260b60..c1608f0bf6d017 100644 --- a/drivers/net/wireless/st/cw1200/main.c +++ b/drivers/net/wireless/st/cw1200/main.c @@ -345,6 +345,11 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr, mutex_init(&priv->wsm_cmd_mux); mutex_init(&priv->conf_mutex); priv->workqueue = create_singlethread_workqueue("cw1200_wq"); + if (!priv->workqueue) { + ieee80211_free_hw(hw); + return NULL; + } + sema_init(&priv->scan.lock, 1); INIT_WORK(&priv->scan.work, cw1200_scan_work); INIT_DELAYED_WORK(&priv->scan.probe_work, cw1200_probe_work); diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 9148015ed8036f..a3132a9eb91c21 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -612,7 +612,7 @@ static struct attribute *nd_device_attributes[] = { NULL, }; -/** +/* * nd_device_attribute_group - generic attributes for all devices on an nd bus */ struct attribute_group nd_device_attribute_group = { @@ -641,7 +641,7 @@ static umode_t nd_numa_attr_visible(struct kobject *kobj, struct attribute *a, return a->mode; } -/** +/* * nd_numa_attribute_group - NUMA attributes for all devices on an nd bus */ struct attribute_group nd_numa_attribute_group = { diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 1eeb7be6aa3442..9f1b7e3153f991 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -25,6 +25,8 @@ static guid_t nvdimm_btt2_guid; static guid_t nvdimm_pfn_guid; static guid_t nvdimm_dax_guid; +static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0"; + static u32 best_seq(u32 a, u32 b) { a &= NSINDEX_SEQ_MASK; @@ -623,6 +625,17 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class, return &guid_null; } +static void reap_victim(struct nd_mapping *nd_mapping, + struct nd_label_ent *victim) +{ + struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); + u32 slot = to_slot(ndd, victim->label); + + dev_dbg(ndd->dev, "free: %d\n", slot); + nd_label_free_slot(ndd, slot); + victim->label = NULL; +} + static int __pmem_label_update(struct nd_region *nd_region, struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm, int pos, unsigned long flags) @@ -630,9 +643,9 @@ static int __pmem_label_update(struct nd_region *nd_region, struct nd_namespace_common *ndns = &nspm->nsio.common; struct nd_interleave_set *nd_set = nd_region->nd_set; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); - struct nd_label_ent *label_ent, *victim = NULL; struct nd_namespace_label *nd_label; struct nd_namespace_index *nsindex; + struct nd_label_ent *label_ent; struct nd_label_id label_id; struct resource *res; unsigned long *free; @@ -701,18 +714,10 @@ static int __pmem_label_update(struct nd_region *nd_region, list_for_each_entry(label_ent, &nd_mapping->labels, list) { if (!label_ent->label) continue; - if (memcmp(nspm->uuid, label_ent->label->uuid, - NSLABEL_UUID_LEN) != 0) - continue; - victim = label_ent; - list_move_tail(&victim->list, &nd_mapping->labels); - break; - } - if (victim) { - dev_dbg(ndd->dev, "free: %d\n", slot); - slot = to_slot(ndd, victim->label); - nd_label_free_slot(ndd, slot); - victim->label = NULL; + if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags) + || memcmp(nspm->uuid, label_ent->label->uuid, + NSLABEL_UUID_LEN) == 0) + reap_victim(nd_mapping, label_ent); } /* update index */ diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h index 18bbe183b3a9bd..52f9fcada00a1b 100644 --- a/drivers/nvdimm/label.h +++ b/drivers/nvdimm/label.h @@ -38,8 +38,6 @@ enum { ND_NSINDEX_INIT = 0x1, }; -static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0"; - /** * struct nd_namespace_index - label set superblock * @sig: NAMESPACE_INDEX\0 diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 73a444c41cde94..5dc3b407d7bd00 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1248,12 +1248,27 @@ static int namespace_update_uuid(struct nd_region *nd_region, for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); + struct nd_label_ent *label_ent; struct resource *res; for_each_dpa_resource(ndd, res) if (strcmp(res->name, old_label_id.id) == 0) sprintf((void *) res->name, "%s", new_label_id.id); + + mutex_lock(&nd_mapping->lock); + list_for_each_entry(label_ent, &nd_mapping->labels, list) { + struct nd_namespace_label *nd_label = label_ent->label; + struct nd_label_id label_id; + + if (!nd_label) + continue; + nd_label_gen_id(&label_id, nd_label->uuid, + __le32_to_cpu(nd_label->flags)); + if (strcmp(old_label_id.id, label_id.id) == 0) + set_bit(ND_LABEL_REAP, &label_ent->flags); + } + mutex_unlock(&nd_mapping->lock); } kfree(*old_uuid); out: diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 98317e7ce5b54d..01e194a5824e43 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -113,8 +113,12 @@ struct nd_percpu_lane { spinlock_t lock; }; +enum nd_label_flags { + ND_LABEL_REAP, +}; struct nd_label_ent { struct list_head list; + unsigned long flags; struct nd_namespace_label *label; }; diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index cff027fc267618..a7ce2f1761a2c5 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -281,16 +281,22 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev, return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn); } +/* + * Use the 'no check' versions of copy_from_iter_flushcache() and + * copy_to_iter_mcsafe() to bypass HARDENED_USERCOPY overhead. Bounds + * checking, both file offset and device offset, is handled by + * dax_iomap_actor() + */ static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) { - return copy_from_iter_flushcache(addr, bytes, i); + return _copy_from_iter_flushcache(addr, bytes, i); } static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) { - return copy_to_iter_mcsafe(addr, bytes, i); + return _copy_to_iter_mcsafe(addr, bytes, i); } static const struct dax_operations pmem_dax_ops = { diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 2cdb3032ca0fc9..d8869d978c3414 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1277,9 +1277,14 @@ static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk, { #ifdef CONFIG_NVME_MULTIPATH if (disk->fops == &nvme_ns_head_ops) { + struct nvme_ns *ns; + *head = disk->private_data; *srcu_idx = srcu_read_lock(&(*head)->srcu); - return nvme_find_path(*head); + ns = nvme_find_path(*head); + if (!ns) + srcu_read_unlock(&(*head)->srcu, *srcu_idx); + return ns; } #endif *head = NULL; @@ -1293,42 +1298,56 @@ static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx) srcu_read_unlock(&head->srcu, idx); } -static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned cmd, unsigned long arg) +static int nvme_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) { + struct nvme_ns_head *head = NULL; + void __user *argp = (void __user *)arg; + struct nvme_ns *ns; + int srcu_idx, ret; + + ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); + if (unlikely(!ns)) + return -EWOULDBLOCK; + + /* + * Handle ioctls that apply to the controller instead of the namespace + * seperately and drop the ns SRCU reference early. This avoids a + * deadlock when deleting namespaces using the passthrough interface. + */ + if (cmd == NVME_IOCTL_ADMIN_CMD || is_sed_ioctl(cmd)) { + struct nvme_ctrl *ctrl = ns->ctrl; + + nvme_get_ctrl(ns->ctrl); + nvme_put_ns_from_disk(head, srcu_idx); + + if (cmd == NVME_IOCTL_ADMIN_CMD) + ret = nvme_user_cmd(ctrl, NULL, argp); + else + ret = sed_ioctl(ctrl->opal_dev, cmd, argp); + + nvme_put_ctrl(ctrl); + return ret; + } + switch (cmd) { case NVME_IOCTL_ID: force_successful_syscall_return(); - return ns->head->ns_id; - case NVME_IOCTL_ADMIN_CMD: - return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg); + ret = ns->head->ns_id; + break; case NVME_IOCTL_IO_CMD: - return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg); + ret = nvme_user_cmd(ns->ctrl, ns, argp); + break; case NVME_IOCTL_SUBMIT_IO: - return nvme_submit_io(ns, (void __user *)arg); + ret = nvme_submit_io(ns, argp); + break; default: -#ifdef CONFIG_NVM if (ns->ndev) - return nvme_nvm_ioctl(ns, cmd, arg); -#endif - if (is_sed_ioctl(cmd)) - return sed_ioctl(ns->ctrl->opal_dev, cmd, - (void __user *) arg); - return -ENOTTY; + ret = nvme_nvm_ioctl(ns, cmd, arg); + else + ret = -ENOTTY; } -} -static int nvme_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long arg) -{ - struct nvme_ns_head *head = NULL; - struct nvme_ns *ns; - int srcu_idx, ret; - - ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); - if (unlikely(!ns)) - ret = -EWOULDBLOCK; - else - ret = nvme_ns_ioctl(ns, cmd, arg); nvme_put_ns_from_disk(head, srcu_idx); return ret; } @@ -1480,6 +1499,10 @@ static void nvme_update_disk_info(struct gendisk *disk, sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9); unsigned short bs = 1 << ns->lba_shift; + if (ns->lba_shift > PAGE_SHIFT) { + /* unsupported block size, set capacity to 0 later */ + bs = (1 << 9); + } blk_mq_freeze_queue(disk->queue); blk_integrity_unregister(disk); @@ -1490,7 +1513,8 @@ static void nvme_update_disk_info(struct gendisk *disk, if (ns->ms && !ns->ext && (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) nvme_init_integrity(disk, ns->ms, ns->pi_type); - if (ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) + if ((ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) || + ns->lba_shift > PAGE_SHIFT) capacity = 0; set_capacity(disk, capacity); @@ -3204,7 +3228,8 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn) { struct nvme_ns *ns; __le32 *ns_list; - unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024); + unsigned i, j, nsid, prev = 0; + unsigned num_lists = DIV_ROUND_UP_ULL((u64)nn, 1024); int ret = 0; ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); @@ -3501,6 +3526,7 @@ EXPORT_SYMBOL_GPL(nvme_start_ctrl); void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) { + dev_pm_qos_hide_latency_tolerance(ctrl->device); cdev_device_del(&ctrl->cdev, ctrl->device); } EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 7b9ef8e734e775..c8eeecc5811542 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1132,6 +1132,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) struct nvme_dev *dev = nvmeq->dev; struct request *abort_req; struct nvme_command cmd; + bool shutdown = false; u32 csts = readl(dev->bar + NVME_REG_CSTS); /* If PCI error recovery process is happening, we cannot reset or @@ -1168,12 +1169,14 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) * shutdown, so we return BLK_EH_DONE. */ switch (dev->ctrl.state) { + case NVME_CTRL_DELETING: + shutdown = true; case NVME_CTRL_CONNECTING: case NVME_CTRL_RESETTING: dev_warn_ratelimited(dev->ctrl.device, "I/O %d QID %d timeout, disable controller\n", req->tag, nvmeq->qid); - nvme_dev_disable(dev, false); + nvme_dev_disable(dev, shutdown); nvme_req(req)->flags |= NVME_REQ_CANCELLED; return BLK_EH_DONE; default: @@ -2187,8 +2190,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) * must flush all entered requests to their failed completion to avoid * deadlocking blk-mq hot-cpu notifier. */ - if (shutdown) + if (shutdown) { nvme_start_queues(&dev->ctrl); + if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) + blk_mq_unquiesce_queue(dev->ctrl.admin_q); + } mutex_unlock(&dev->shutdown_lock); } diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 0939a4e178fb97..e4f167e35353f2 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -880,8 +880,9 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, { blk_mq_quiesce_queue(ctrl->ctrl.admin_q); nvme_rdma_stop_queue(&ctrl->queues[0]); - blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_cancel_request, - &ctrl->ctrl); + if (ctrl->ctrl.admin_tagset) + blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset, + nvme_cancel_request, &ctrl->ctrl); blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); nvme_rdma_destroy_admin_queue(ctrl, remove); } @@ -892,8 +893,9 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, if (ctrl->ctrl.queue_count > 1) { nvme_stop_queues(&ctrl->ctrl); nvme_rdma_stop_io_queues(ctrl); - blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request, - &ctrl->ctrl); + if (ctrl->ctrl.tagset) + blk_mq_tagset_busy_iter(ctrl->ctrl.tagset, + nvme_cancel_request, &ctrl->ctrl); if (remove) nvme_start_queues(&ctrl->ctrl); nvme_rdma_destroy_io_queues(ctrl, remove); diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 7bc9f624043296..1096dd01ca2288 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -239,6 +239,7 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req) return 0; case nvme_cmd_write_zeroes: req->execute = nvmet_bdev_execute_write_zeroes; + req->data_len = 0; return 0; default: pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode, diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 7c530c88b3fbb9..99de51e87f7f84 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -1028,7 +1028,7 @@ EXPORT_SYMBOL_GPL(nvmem_cell_put); static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) { u8 *p, *b; - int i, bit_offset = cell->bit_offset; + int i, extra, bit_offset = cell->bit_offset; p = b = buf; if (bit_offset) { @@ -1043,11 +1043,16 @@ static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) p = b; *b++ >>= bit_offset; } - - /* result fits in less bytes */ - if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE)) - *p-- = 0; + } else { + /* point to the msb */ + p += cell->bytes - 1; } + + /* result fits in less bytes */ + extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); + while (--extra >= 0) + *p-- = 0; + /* clear msb bits if any leftover in the last byte */ *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); } diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c index d020f89248fd76..69f8e972e29c69 100644 --- a/drivers/nvmem/sunxi_sid.c +++ b/drivers/nvmem/sunxi_sid.c @@ -235,8 +235,10 @@ static const struct sunxi_sid_cfg sun50i_a64_cfg = { static const struct of_device_id sunxi_sid_of_match[] = { { .compatible = "allwinner,sun4i-a10-sid", .data = &sun4i_a10_cfg }, { .compatible = "allwinner,sun7i-a20-sid", .data = &sun7i_a20_cfg }, + { .compatible = "allwinner,sun8i-a83t-sid", .data = &sun50i_a64_cfg }, { .compatible = "allwinner,sun8i-h3-sid", .data = &sun8i_h3_cfg }, { .compatible = "allwinner,sun50i-a64-sid", .data = &sun50i_a64_cfg }, + { .compatible = "allwinner,sun50i-h5-sid", .data = &sun50i_a64_cfg }, {/* sentinel */}, }; MODULE_DEVICE_TABLE(of, sunxi_sid_of_match); diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index 45c0b1f4cb69f7..a09c1c3cf831e7 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -205,15 +205,24 @@ static void __of_attach_node(struct device_node *np) const __be32 *phandle; int sz; - np->name = __of_get_property(np, "name", NULL) ? : ""; - np->type = __of_get_property(np, "device_type", NULL) ? : ""; - - phandle = __of_get_property(np, "phandle", &sz); - if (!phandle) - phandle = __of_get_property(np, "linux,phandle", &sz); - if (IS_ENABLED(CONFIG_PPC_PSERIES) && !phandle) - phandle = __of_get_property(np, "ibm,phandle", &sz); - np->phandle = (phandle && (sz >= 4)) ? be32_to_cpup(phandle) : 0; + if (!of_node_check_flag(np, OF_OVERLAY)) { + np->name = __of_get_property(np, "name", NULL); + np->type = __of_get_property(np, "device_type", NULL); + if (!np->name) + np->name = ""; + if (!np->type) + np->type = ""; + + phandle = __of_get_property(np, "phandle", &sz); + if (!phandle) + phandle = __of_get_property(np, "linux,phandle", &sz); + if (IS_ENABLED(CONFIG_PPC_PSERIES) && !phandle) + phandle = __of_get_property(np, "ibm,phandle", &sz); + if (phandle && (sz >= 4)) + np->phandle = be32_to_cpup(phandle); + else + np->phandle = 0; + } np->child = NULL; np->sibling = np->parent->child; diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 9808aae4621ad4..2edb59039b5f57 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -287,7 +287,12 @@ static struct property *dup_and_fixup_symbol_prop( * @target may be either in the live devicetree or in a new subtree that * is contained in the changeset. * - * Some special properties are not updated (no error returned). + * Some special properties are not added or updated (no error returned): + * "name", "phandle", "linux,phandle". + * + * Properties "#address-cells" and "#size-cells" are not updated if they + * are already in the live tree, but if present in the live tree, the values + * in the overlay must match the values in the live tree. * * Update of property in symbols node is not allowed. * @@ -300,11 +305,13 @@ static int add_changeset_property(struct overlay_changeset *ovcs, { struct property *new_prop = NULL, *prop; int ret = 0; + bool check_for_non_overlay_node = false; - if (!of_prop_cmp(overlay_prop->name, "name") || - !of_prop_cmp(overlay_prop->name, "phandle") || - !of_prop_cmp(overlay_prop->name, "linux,phandle")) - return 0; + if (target->in_livetree) + if (!of_prop_cmp(overlay_prop->name, "name") || + !of_prop_cmp(overlay_prop->name, "phandle") || + !of_prop_cmp(overlay_prop->name, "linux,phandle")) + return 0; if (target->in_livetree) prop = of_find_property(target->np, overlay_prop->name, NULL); @@ -322,12 +329,36 @@ static int add_changeset_property(struct overlay_changeset *ovcs, if (!new_prop) return -ENOMEM; - if (!prop) + if (!prop) { + check_for_non_overlay_node = true; + if (!target->in_livetree) { + new_prop->next = target->np->deadprops; + target->np->deadprops = new_prop; + } ret = of_changeset_add_property(&ovcs->cset, target->np, new_prop); - else + } else if (!of_prop_cmp(prop->name, "#address-cells")) { + if (!of_prop_val_eq(prop, new_prop)) { + pr_err("ERROR: changing value of #address-cells is not allowed in %pOF\n", + target->np); + ret = -EINVAL; + } + } else if (!of_prop_cmp(prop->name, "#size-cells")) { + if (!of_prop_val_eq(prop, new_prop)) { + pr_err("ERROR: changing value of #size-cells is not allowed in %pOF\n", + target->np); + ret = -EINVAL; + } + } else { + check_for_non_overlay_node = true; ret = of_changeset_update_property(&ovcs->cset, target->np, new_prop); + } + + if (check_for_non_overlay_node && + !of_node_check_flag(target->np, OF_OVERLAY)) + pr_err("WARNING: memory leak will occur if overlay removed, property: %pOF/%s\n", + target->np, new_prop->name); if (ret) { kfree(new_prop->name); @@ -382,9 +413,10 @@ static int add_changeset_node(struct overlay_changeset *ovcs, struct target *target, struct device_node *node) { const char *node_kbasename; + const __be32 *phandle; struct device_node *tchild; struct target target_child; - int ret = 0; + int ret = 0, size; node_kbasename = kbasename(node->full_name); @@ -398,6 +430,19 @@ static int add_changeset_node(struct overlay_changeset *ovcs, return -ENOMEM; tchild->parent = target->np; + tchild->name = __of_get_property(node, "name", NULL); + tchild->type = __of_get_property(node, "device_type", NULL); + + if (!tchild->name) + tchild->name = ""; + if (!tchild->type) + tchild->type = ""; + + /* ignore obsolete "linux,phandle" */ + phandle = __of_get_property(node, "phandle", &size); + if (phandle && (size == 4)) + tchild->phandle = be32_to_cpup(phandle); + of_node_set_flag(tchild, OF_OVERLAY); ret = of_changeset_attach_node(&ovcs->cset, tchild); diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 614823617b8b9d..b7b2e811d54719 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -565,8 +565,6 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, /* We currently only support kernel addresses */ BUG_ON(sid != KERNEL_SPACE); - mtsp(sid,1); - /* ** WORD 1 - low order word ** "hints" parm includes the VALID bit! @@ -597,7 +595,7 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, ** Grab virtual index [0:11] ** Deposit virt_idx bits into I/O PDIR word */ - asm volatile ("lci %%r0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba)); + asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (vba)); asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci)); asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci)); diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c index 0c6e8b44b4ede7..c60b465f6fe45e 100644 --- a/drivers/parisc/led.c +++ b/drivers/parisc/led.c @@ -568,6 +568,9 @@ int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long d break; case DISPLAY_MODEL_LASI: + /* Skip to register LED in QEMU */ + if (running_on_qemu) + return 1; LED_DATA_REG = data_reg; led_func_ptr = led_LASI_driver; printk(KERN_INFO "LED display at %lx registered\n", LED_DATA_REG); diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 11de0eccf96841..6dd1780a5885dd 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -575,8 +575,7 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, pa = virt_to_phys(vba); pa &= IOVP_MASK; - mtsp(sid,1); - asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba)); + asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba)); pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */ pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */ diff --git a/drivers/parport/share.c b/drivers/parport/share.c index 5dc53d420ca8ca..7b4ee33c193544 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -895,6 +895,7 @@ parport_register_dev_model(struct parport *port, const char *name, par_dev->devmodel = true; ret = device_register(&par_dev->dev); if (ret) { + kfree(par_dev->state); put_device(&par_dev->dev); goto err_put_port; } @@ -912,6 +913,7 @@ parport_register_dev_model(struct parport *port, const char *name, spin_unlock(&port->physport->pardevice_lock); pr_debug("%s: cannot grant exclusive access for device %s\n", port->name, name); + kfree(par_dev->state); device_unregister(&par_dev->dev); goto err_put_port; } diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index e88bd221fffeab..5e199e7d2d4fd1 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -237,6 +237,7 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) ks_dw_pcie_enable_error_irq(ks_pcie); } +#ifdef CONFIG_ARM /* * When a PCI device does not exist during config cycles, keystone host gets a * bus error instead of returning 0xffffffff. This handler always returns 0 @@ -256,6 +257,7 @@ static int keystone_pcie_fault(unsigned long addr, unsigned int fsr, return 0; } +#endif static int __init ks_pcie_host_init(struct pcie_port *pp) { @@ -279,12 +281,14 @@ static int __init ks_pcie_host_init(struct pcie_port *pp) val |= BIT(12); writel(val, pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); +#ifdef CONFIG_ARM /* * PCIe access errors that result into OCP errors are caught by ARM as * "External aborts" */ hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0, "Asynchronous external abort"); +#endif return 0; } diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index de8635af4cde29..739d97080d3bdb 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -385,6 +385,7 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct pci_epc *epc = ep->epc; + unsigned int aligned_offset; u16 msg_ctrl, msg_data; u32 msg_addr_lower, msg_addr_upper, reg; u64 msg_addr; @@ -410,13 +411,15 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, reg = ep->msi_cap + PCI_MSI_DATA_32; msg_data = dw_pcie_readw_dbi(pci, reg); } - msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; + aligned_offset = msg_addr_lower & (epc->mem->page_size - 1); + msg_addr = ((u64)msg_addr_upper) << 32 | + (msg_addr_lower & ~aligned_offset); ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, epc->mem->page_size); if (ret) return ret; - writel(msg_data | (interrupt_num - 1), ep->msi_mem); + writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset); dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index b56e22262a7741..acd50920c2ffde 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -303,20 +303,24 @@ void dw_pcie_free_msi(struct pcie_port *pp) irq_domain_remove(pp->msi_domain); irq_domain_remove(pp->irq_domain); + + if (pp->msi_page) + __free_page(pp->msi_page); } void dw_pcie_msi_init(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct device *dev = pci->dev; - struct page *page; u64 msi_target; - page = alloc_page(GFP_KERNEL); - pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + pp->msi_page = alloc_page(GFP_KERNEL); + pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE, + DMA_FROM_DEVICE); if (dma_mapping_error(dev, pp->msi_data)) { dev_err(dev, "Failed to map MSI data\n"); - __free_page(page); + __free_page(pp->msi_page); + pp->msi_page = NULL; return; } msi_target = (u64)pp->msi_data; @@ -439,7 +443,7 @@ int dw_pcie_host_init(struct pcie_port *pp) if (ret) pci->num_viewport = 2; - if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_enabled()) { + if (pci_msi_enabled()) { /* * If a specific SoC driver needs to change the * default number of vectors, it needs to implement @@ -477,7 +481,7 @@ int dw_pcie_host_init(struct pcie_port *pp) if (pp->ops->host_init) { ret = pp->ops->host_init(pp); if (ret) - goto error; + goto err_free_msi; } pp->root_bus_nr = pp->busn->start; @@ -491,7 +495,7 @@ int dw_pcie_host_init(struct pcie_port *pp) ret = pci_scan_root_bus_bridge(bridge); if (ret) - goto error; + goto err_free_msi; bus = bridge->bus; @@ -507,6 +511,9 @@ int dw_pcie_host_init(struct pcie_port *pp) pci_bus_add_devices(bus); return 0; +err_free_msi: + if (pci_msi_enabled() && !pp->ops->msi_host_init) + dw_pcie_free_msi(pp); error: pci_free_host_bridge(bridge); return ret; diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 9f1a5e399b7033..14dcf66466996c 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -164,6 +164,7 @@ struct pcie_port { struct irq_domain *irq_domain; struct irq_domain *msi_domain; dma_addr_t msi_data; + struct page *msi_page; u32 num_vectors; u32 irq_status[MAX_MSI_CTRLS]; raw_spinlock_t lock; diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c index c8febb009454cd..9b9c677ad3a0b6 100644 --- a/drivers/pci/controller/pcie-rcar.c +++ b/drivers/pci/controller/pcie-rcar.c @@ -46,6 +46,7 @@ /* Transfer control */ #define PCIETCTLR 0x02000 +#define DL_DOWN BIT(3) #define CFINIT 1 #define PCIETSTR 0x02004 #define DATA_LINK_ACTIVE 1 @@ -94,6 +95,7 @@ #define MACCTLR 0x011058 #define SPEED_CHANGE BIT(24) #define SCRAMBLE_DISABLE BIT(27) +#define PMSR 0x01105c #define MACS2R 0x011078 #define MACCGSPSETR 0x011084 #define SPCNGRSN BIT(31) @@ -890,7 +892,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) { struct device *dev = pcie->dev; struct rcar_msi *msi = &pcie->msi; - unsigned long base; + phys_addr_t base; int err, i; mutex_init(&msi->lock); @@ -929,10 +931,14 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) /* setup MSI data target */ msi->pages = __get_free_pages(GFP_KERNEL, 0); + if (!msi->pages) { + err = -ENOMEM; + goto err; + } base = virt_to_phys((void *)msi->pages); - rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR); - rcar_pci_write_reg(pcie, 0, PCIEMSIAUR); + rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR); + rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR); /* enable all MSI interrupts */ rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER); @@ -1130,6 +1136,7 @@ static int rcar_pcie_probe(struct platform_device *pdev) pcie = pci_host_bridge_priv(bridge); pcie->dev = dev; + platform_set_drvdata(pdev, pcie); err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL); if (err) @@ -1221,10 +1228,28 @@ static int rcar_pcie_probe(struct platform_device *pdev) return err; } +static int rcar_pcie_resume_noirq(struct device *dev) +{ + struct rcar_pcie *pcie = dev_get_drvdata(dev); + + if (rcar_pci_read_reg(pcie, PMSR) && + !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN)) + return 0; + + /* Re-establish the PCIe link */ + rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR); + return rcar_pcie_wait_for_dl(pcie); +} + +static const struct dev_pm_ops rcar_pcie_pm_ops = { + .resume_noirq = rcar_pcie_resume_noirq, +}; + static struct platform_driver rcar_pcie_driver = { .driver = { .name = "rcar-pcie", .of_match_table = rcar_pcie_of_match, + .pm = &rcar_pcie_pm_ops, .suppress_bind_attrs = true, }, .probe = rcar_pcie_probe, diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c index 7b1389d8e2a571..ea48cba5480b80 100644 --- a/drivers/pci/controller/pcie-xilinx.c +++ b/drivers/pci/controller/pcie-xilinx.c @@ -336,14 +336,19 @@ static const struct irq_domain_ops msi_domain_ops = { * xilinx_pcie_enable_msi - Enable MSI support * @port: PCIe port information */ -static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) +static int xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) { phys_addr_t msg_addr; port->msi_pages = __get_free_pages(GFP_KERNEL, 0); + if (!port->msi_pages) + return -ENOMEM; + msg_addr = virt_to_phys((void *)port->msi_pages); pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1); pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2); + + return 0; } /* INTx Functions */ @@ -498,6 +503,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) struct device *dev = port->dev; struct device_node *node = dev->of_node; struct device_node *pcie_intc_node; + int ret; /* Setup INTx */ pcie_intc_node = of_get_next_child(node, NULL); @@ -526,7 +532,9 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) return -ENODEV; } - xilinx_pcie_enable_msi(port); + ret = xilinx_pcie_enable_msi(port); + if (ret) + return ret; } return 0; diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index e2356a9c7088a4..182f9e3443eef8 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c @@ -51,6 +51,7 @@ static struct device_node *find_vio_slot_node(char *drc_name) if (rc == 0) break; } + of_node_put(parent); return dn; } @@ -71,6 +72,7 @@ static struct device_node *find_php_slot_pci_node(char *drc_name, return np; } +/* Returns a device_node with its reference count incremented */ static struct device_node *find_dlpar_node(char *drc_name, int *node_type) { struct device_node *dn; @@ -306,6 +308,7 @@ int dlpar_add_slot(char *drc_name) rc = dlpar_add_phb(drc_name, dn); break; } + of_node_put(dn); printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name); exit: @@ -439,6 +442,7 @@ int dlpar_remove_slot(char *drc_name) rc = dlpar_remove_pci_slot(drc_name, dn); break; } + of_node_put(dn); vm_unmap_aliases(); printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name); diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index f8436d1c4d45fe..f7218c1673ceba 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -625,7 +625,8 @@ static bool acpi_pci_need_resume(struct pci_dev *dev) if (!adev || !acpi_device_power_manageable(adev)) return false; - if (device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count) + if (adev->wakeup.flags.valid && + device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count) return true; if (acpi_target_system_state() == ACPI_STATE_S0) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 30649addc62525..61f2ef28ea1c73 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -6135,8 +6135,7 @@ static int __init pci_setup(char *str) } else if (!strncmp(str, "pcie_scan_all", 13)) { pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); } else if (!strncmp(str, "disable_acs_redir=", 18)) { - disable_acs_redir_param = - kstrdup(str + 18, GFP_KERNEL); + disable_acs_redir_param = str + 18; } else { printk(KERN_ERR "PCI: Unknown option `%s'\n", str); @@ -6147,3 +6146,19 @@ static int __init pci_setup(char *str) return 0; } early_param("pci", pci_setup); + +/* + * 'disable_acs_redir_param' is initialized in pci_setup(), above, to point + * to data in the __initdata section which will be freed after the init + * sequence is complete. We can't allocate memory in pci_setup() because some + * architectures do not have any memory allocation service available during + * an early_param() call. So we allocate memory and copy the variable here + * before the init section is freed. + */ +static int __init pci_realloc_setup_params(void) +{ + disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL); + + return 0; +} +pure_initcall(pci_realloc_setup_params); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 6e0d1528d471c7..ab25752f00d96a 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -530,7 +530,7 @@ void pci_aer_clear_fatal_status(struct pci_dev *dev); void pci_aer_clear_device_status(struct pci_dev *dev); #else static inline void pci_no_aer(void) { } -static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; } +static inline void pci_aer_init(struct pci_dev *d) { } static inline void pci_aer_exit(struct pci_dev *d) { } static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { } static inline void pci_aer_clear_device_status(struct pci_dev *dev) { } diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index f78860ce884bc5..1117b25fbe0bb9 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -198,6 +198,38 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) link->clkpm_capable = (blacklist) ? 0 : capable; } +static bool pcie_retrain_link(struct pcie_link_state *link) +{ + struct pci_dev *parent = link->pdev; + unsigned long start_jiffies; + u16 reg16; + + pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16); + reg16 |= PCI_EXP_LNKCTL_RL; + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); + if (parent->clear_retrain_link) { + /* + * Due to an erratum in some devices the Retrain Link bit + * needs to be cleared again manually to allow the link + * training to succeed. + */ + reg16 &= ~PCI_EXP_LNKCTL_RL; + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); + } + + /* Wait for link training end. Break out after waiting for timeout */ + start_jiffies = jiffies; + for (;;) { + pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16); + if (!(reg16 & PCI_EXP_LNKSTA_LT)) + break; + if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) + break; + msleep(1); + } + return !(reg16 & PCI_EXP_LNKSTA_LT); +} + /* * pcie_aspm_configure_common_clock: check if the 2 ends of a link * could use common clock. If they are, configure them to use the @@ -207,7 +239,6 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) { int same_clock = 1; u16 reg16, parent_reg, child_reg[8]; - unsigned long start_jiffies; struct pci_dev *child, *parent = link->pdev; struct pci_bus *linkbus = parent->subordinate; /* @@ -265,21 +296,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) reg16 &= ~PCI_EXP_LNKCTL_CCC; pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); - /* Retrain link */ - reg16 |= PCI_EXP_LNKCTL_RL; - pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); - - /* Wait for link training end. Break out after waiting for timeout */ - start_jiffies = jiffies; - for (;;) { - pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16); - if (!(reg16 & PCI_EXP_LNKSTA_LT)) - break; - if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) - break; - msleep(1); - } - if (!(reg16 & PCI_EXP_LNKSTA_LT)) + if (pcie_retrain_link(link)) return; /* Training failed. Restore common clock configurations */ diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 4a4c16bfc0d326..fa4c386c8cd84d 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -535,16 +535,9 @@ static void pci_release_host_bridge_dev(struct device *dev) kfree(to_pci_host_bridge(dev)); } -struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) +static void pci_init_host_bridge(struct pci_host_bridge *bridge) { - struct pci_host_bridge *bridge; - - bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL); - if (!bridge) - return NULL; - INIT_LIST_HEAD(&bridge->windows); - bridge->dev.release = pci_release_host_bridge_dev; /* * We assume we can manage these PCIe features. Some systems may @@ -557,6 +550,18 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) bridge->native_shpc_hotplug = 1; bridge->native_pme = 1; bridge->native_ltr = 1; +} + +struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) +{ + struct pci_host_bridge *bridge; + + bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL); + if (!bridge) + return NULL; + + pci_init_host_bridge(bridge); + bridge->dev.release = pci_release_host_bridge_dev; return bridge; } @@ -571,7 +576,7 @@ struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev, if (!bridge) return NULL; - INIT_LIST_HEAD(&bridge->windows); + pci_init_host_bridge(bridge); bridge->dev.release = devm_pci_release_host_bridge_dev; return bridge; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 37d897bc4cf1bb..28c64f84bfe72b 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2220,6 +2220,23 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s); +/* + * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain + * Link bit cleared after starting the link retrain process to allow this + * process to finish. + * + * Affected devices: PI7C9X110, PI7C9X111SL, PI7C9X130. See also the + * Pericom Errata Sheet PI7C9X111SLB_errata_rev1.2_102711.pdf. + */ +static void quirk_enable_clear_retrain_link(struct pci_dev *dev) +{ + dev->clear_retrain_link = 1; + pci_info(dev, "Enable PCIe Retrain Link quirk\n"); +} +DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe110, quirk_enable_clear_retrain_link); +DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe111, quirk_enable_clear_retrain_link); +DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe130, quirk_enable_clear_retrain_link); + static void fixup_rev1_53c810(struct pci_dev *dev) { u32 class = dev->class; @@ -3383,6 +3400,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset); /* * Root port on some Cavium CN8xxx chips do not successfully complete a bus @@ -4878,6 +4896,7 @@ static void quirk_no_ats(struct pci_dev *pdev) /* AMD Stoney platform GPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats); #endif /* CONFIG_PCI_ATS */ /* Freescale PCIe doesn't support MSI in RC mode */ diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index 37d0c15c9eeb0b..72db2e0ebced56 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -1116,7 +1116,8 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx) if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ)) return 0; - if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE) + if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE || + eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP) return 0; dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr); diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c index 1bfeb160c5b16b..14a541c453e588 100644 --- a/drivers/perf/arm-cci.c +++ b/drivers/perf/arm-cci.c @@ -1692,21 +1692,24 @@ static int cci_pmu_probe(struct platform_device *pdev) raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); mutex_init(&cci_pmu->reserve_mutex); atomic_set(&cci_pmu->active_events, 0); - cci_pmu->cpu = get_cpu(); - - ret = cci_pmu_init(cci_pmu, pdev); - if (ret) { - put_cpu(); - return ret; - } + cci_pmu->cpu = raw_smp_processor_id(); + g_cci_pmu = cci_pmu; cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, "perf/arm/cci:online", NULL, cci_pmu_offline_cpu); - put_cpu(); - g_cci_pmu = cci_pmu; + + ret = cci_pmu_init(cci_pmu, pdev); + if (ret) + goto error_pmu_init; + pr_info("ARM %s PMU driver probed", cci_pmu->model->name); return 0; + +error_pmu_init: + cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE); + g_cci_pmu = NULL; + return ret; } static int cci_pmu_remove(struct platform_device *pdev) diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c index 15c8fc2abf01fc..1f8809bab002cd 100644 --- a/drivers/phy/allwinner/phy-sun4i-usb.c +++ b/drivers/phy/allwinner/phy-sun4i-usb.c @@ -550,6 +550,7 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work) struct sun4i_usb_phy_data *data = container_of(work, struct sun4i_usb_phy_data, detect.work); struct phy *phy0 = data->phys[0].phy; + struct sun4i_usb_phy *phy = phy_get_drvdata(phy0); bool force_session_end, id_notify = false, vbus_notify = false; int id_det, vbus_det; @@ -606,6 +607,9 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work) mutex_unlock(&phy0->mutex); } + /* Enable PHY0 passby for host mode only. */ + sun4i_usb_phy_passby(phy, !id_det); + /* Re-route PHY0 if necessary */ if (data->cfg->phy0_dual_route) sun4i_usb_phy0_reroute(data, id_det); diff --git a/drivers/phy/motorola/Kconfig b/drivers/phy/motorola/Kconfig index 82651524ffb9ce..718f8729701df1 100644 --- a/drivers/phy/motorola/Kconfig +++ b/drivers/phy/motorola/Kconfig @@ -13,7 +13,7 @@ config PHY_CPCAP_USB config PHY_MAPPHONE_MDM6600 tristate "Motorola Mapphone MDM6600 modem USB PHY driver" - depends on OF && USB_SUPPORT + depends on OF && USB_SUPPORT && GPIOLIB select GENERIC_PHY help Enable this for MDM6600 USB modem to work on Motorola phones diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c index 68ce4a082b9b22..693acc167351cf 100644 --- a/drivers/phy/ti/phy-ti-pipe3.c +++ b/drivers/phy/ti/phy-ti-pipe3.c @@ -303,7 +303,7 @@ static void ti_pipe3_calibrate(struct ti_pipe3 *phy) val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_ANA_PROGRAMMABILITY); val &= ~(INTERFACE_MASK | LOSD_MASK | MEM_PLLDIV); - val = (0x1 << INTERFACE_SHIFT | 0xA << LOSD_SHIFT); + val |= (0x1 << INTERFACE_SHIFT | 0xA << LOSD_SHIFT); ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_ANA_PROGRAMMABILITY, val); val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_DIGITAL_MODES); diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c index 302190d1558d98..0d7d379e9bb80e 100644 --- a/drivers/pinctrl/pinctrl-pistachio.c +++ b/drivers/pinctrl/pinctrl-pistachio.c @@ -1368,6 +1368,7 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl) if (!of_find_property(child, "gpio-controller", NULL)) { dev_err(pctl->dev, "No gpio-controller property for bank %u\n", i); + of_node_put(child); ret = -ENODEV; goto err; } @@ -1375,6 +1376,7 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl) irq = irq_of_parse_and_map(child, 0); if (irq < 0) { dev_err(pctl->dev, "No IRQ for bank %u: %d\n", i, irq); + of_node_put(child); ret = irq; goto err; } diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c index 44c6b753f692ad..85ddf49a518859 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c @@ -71,6 +71,7 @@ s5pv210_retention_init(struct samsung_pinctrl_drv_data *drvdata, } clk_base = of_iomap(np, 0); + of_node_put(np); if (!clk_base) { pr_err("%s: failed to map clock registers\n", __func__); return ERR_PTR(-EINVAL); diff --git a/drivers/pinctrl/zte/pinctrl-zx.c b/drivers/pinctrl/zte/pinctrl-zx.c index caa44dd2880a82..3cb69309912bad 100644 --- a/drivers/pinctrl/zte/pinctrl-zx.c +++ b/drivers/pinctrl/zte/pinctrl-zx.c @@ -411,6 +411,7 @@ int zx_pinctrl_init(struct platform_device *pdev, } zpctl->aux_base = of_iomap(np, 0); + of_node_put(np); if (!zpctl->aux_base) return -ENOMEM; diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index e5d5b1adb5a95b..ac784ac66ac341 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -67,6 +67,17 @@ static int send_command(struct cros_ec_device *ec_dev, else xfer_fxn = ec_dev->cmd_xfer; + if (!xfer_fxn) { + /* + * This error can happen if a communication error happened and + * the EC is trying to use protocol v2, on an underlying + * communication mechanism that does not support v2. + */ + dev_err_once(ec_dev->dev, + "missing EC transfer API, cannot send command\n"); + return -EIO; + } + ret = (*xfer_fxn)(ec_dev, msg); if (msg->result == EC_RES_IN_PROGRESS) { int i; diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c index e7edc8c6393674..4ad9d127f2f58b 100644 --- a/drivers/platform/x86/intel_pmc_ipc.c +++ b/drivers/platform/x86/intel_pmc_ipc.c @@ -776,13 +776,17 @@ static int ipc_create_pmc_devices(void) if (ret) { dev_err(ipcdev.dev, "Failed to add punit platform device\n"); platform_device_unregister(ipcdev.tco_dev); + return ret; } if (!ipcdev.telem_res_inval) { ret = ipc_create_telemetry_device(); - if (ret) + if (ret) { dev_warn(ipcdev.dev, "Failed to add telemetry platform device\n"); + platform_device_unregister(ipcdev.punit_dev); + platform_device_unregister(ipcdev.tco_dev); + } } return ret; diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c index c7039f52ad5180..b1d8043762371c 100644 --- a/drivers/platform/x86/pmc_atom.c +++ b/drivers/platform/x86/pmc_atom.c @@ -398,12 +398,45 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc) */ static const struct dmi_system_id critclk_systems[] = { { + /* pmc_plt_clk0 is used for an external HSIC USB HUB */ .ident = "MPL CEC1x", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MPL AG"), DMI_MATCH(DMI_PRODUCT_NAME, "CEC10 Family"), }, }, + { + /* pmc_plt_clk0 - 3 are used for the 4 ethernet controllers */ + .ident = "Lex 3I380D", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Lex BayTrail"), + DMI_MATCH(DMI_PRODUCT_NAME, "3I380D"), + }, + }, + { + /* pmc_plt_clk* - are used for ethernet controllers */ + .ident = "Beckhoff CB3163", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"), + DMI_MATCH(DMI_BOARD_NAME, "CB3163"), + }, + }, + { + /* pmc_plt_clk* - are used for ethernet controllers */ + .ident = "Beckhoff CB6263", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"), + DMI_MATCH(DMI_BOARD_NAME, "CB6263"), + }, + }, + { + /* pmc_plt_clk* - are used for ethernet controllers */ + .ident = "Beckhoff CB6363", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"), + DMI_MATCH(DMI_BOARD_NAME, "CB6363"), + }, + }, { /*sentinel*/ } }; diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c index 735658ee1c60f3..c60659fb21deca 100644 --- a/drivers/power/supply/axp288_charger.c +++ b/drivers/power/supply/axp288_charger.c @@ -832,6 +832,10 @@ static int axp288_charger_probe(struct platform_device *pdev) /* Register charger interrupts */ for (i = 0; i < CHRG_INTR_END; i++) { pirq = platform_get_irq(info->pdev, i); + if (pirq < 0) { + dev_err(&pdev->dev, "Failed to get IRQ: %d\n", pirq); + return pirq; + } info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq); if (info->irq[i] < 0) { dev_warn(&info->pdev->dev, diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c index 084c8ba9749d7c..ab0b6e78ca02a6 100644 --- a/drivers/power/supply/axp288_fuel_gauge.c +++ b/drivers/power/supply/axp288_fuel_gauge.c @@ -695,6 +695,26 @@ static void fuel_gauge_init_irq(struct axp288_fg_info *info) * detection reports one despite it not being there. */ static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = { + { + /* ACEPC T8 Cherry Trail Z8350 mini PC */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T8"), + /* also match on somewhat unique bios-version */ + DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"), + }, + }, + { + /* ACEPC T11 Cherry Trail Z8350 mini PC */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T11"), + /* also match on somewhat unique bios-version */ + DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"), + }, + }, { /* Intel Cherry Trail Compute Stick, Windows version */ .matches = { diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c index 98ba07869c3b00..3bae02380bb229 100644 --- a/drivers/power/supply/cpcap-battery.c +++ b/drivers/power/supply/cpcap-battery.c @@ -221,6 +221,9 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata, int avg_current; u32 cc_lsb; + if (!divider) + return 0; + sample &= 0xffffff; /* 24-bits, unsigned */ offset &= 0x7ff; /* 10-bits, signed */ diff --git a/drivers/power/supply/max14656_charger_detector.c b/drivers/power/supply/max14656_charger_detector.c index b91b1d2999dc6d..d19307f791c689 100644 --- a/drivers/power/supply/max14656_charger_detector.c +++ b/drivers/power/supply/max14656_charger_detector.c @@ -280,6 +280,13 @@ static int max14656_probe(struct i2c_client *client, INIT_DELAYED_WORK(&chip->irq_work, max14656_irq_worker); + chip->detect_psy = devm_power_supply_register(dev, + &chip->psy_desc, &psy_cfg); + if (IS_ERR(chip->detect_psy)) { + dev_err(dev, "power_supply_register failed\n"); + return -EINVAL; + } + ret = devm_request_irq(dev, chip->irq, max14656_irq, IRQF_TRIGGER_FALLING, MAX14656_NAME, chip); @@ -289,13 +296,6 @@ static int max14656_probe(struct i2c_client *client, } enable_irq_wake(chip->irq); - chip->detect_psy = devm_power_supply_register(dev, - &chip->psy_desc, &psy_cfg); - if (IS_ERR(chip->detect_psy)) { - dev_err(dev, "power_supply_register failed\n"); - return -EINVAL; - } - schedule_delayed_work(&chip->irq_work, msecs_to_jiffies(2000)); return 0; diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index 6170ed8b6854b9..5a2757a7f40885 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -382,15 +382,11 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) char *prop_buf; char *attrname; - dev_dbg(dev, "uevent\n"); - if (!psy || !psy->desc) { dev_dbg(dev, "No power supply yet\n"); return ret; } - dev_dbg(dev, "POWER_SUPPLY_NAME=%s\n", psy->desc->name); - ret = add_uevent_var(env, "POWER_SUPPLY_NAME=%s", psy->desc->name); if (ret) return ret; @@ -426,8 +422,6 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) goto out; } - dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf); - ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf); kfree(attrname); if (ret) diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 1581f6ab1b1f42..c45e5719ba172c 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -311,10 +311,12 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip, if (IS_ENABLED(CONFIG_OF)) of_pwmchip_add(chip); - pwmchip_sysfs_export(chip); - out: mutex_unlock(&pwm_lock); + + if (!ret) + pwmchip_sysfs_export(chip); + return ret; } EXPORT_SYMBOL_GPL(pwmchip_add_with_polarity); @@ -348,7 +350,7 @@ int pwmchip_remove(struct pwm_chip *chip) unsigned int i; int ret = 0; - pwmchip_sysfs_unexport_children(chip); + pwmchip_sysfs_unexport(chip); mutex_lock(&pwm_lock); @@ -368,8 +370,6 @@ int pwmchip_remove(struct pwm_chip *chip) free_pwms(chip); - pwmchip_sysfs_unexport(chip); - out: mutex_unlock(&pwm_lock); return ret; diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c index c1ed641b3e2662..f6e738ad7bd929 100644 --- a/drivers/pwm/pwm-meson.c +++ b/drivers/pwm/pwm-meson.c @@ -111,6 +111,10 @@ struct meson_pwm { const struct meson_pwm_data *data; void __iomem *base; u8 inverter_mask; + /* + * Protects register (write) access to the REG_MISC_AB register + * that is shared between the two PWMs. + */ spinlock_t lock; }; @@ -235,6 +239,7 @@ static void meson_pwm_enable(struct meson_pwm *meson, { u32 value, clk_shift, clk_enable, enable; unsigned int offset; + unsigned long flags; switch (id) { case 0: @@ -255,6 +260,8 @@ static void meson_pwm_enable(struct meson_pwm *meson, return; } + spin_lock_irqsave(&meson->lock, flags); + value = readl(meson->base + REG_MISC_AB); value &= ~(MISC_CLK_DIV_MASK << clk_shift); value |= channel->pre_div << clk_shift; @@ -267,11 +274,14 @@ static void meson_pwm_enable(struct meson_pwm *meson, value = readl(meson->base + REG_MISC_AB); value |= enable; writel(value, meson->base + REG_MISC_AB); + + spin_unlock_irqrestore(&meson->lock, flags); } static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id) { u32 value, enable; + unsigned long flags; switch (id) { case 0: @@ -286,9 +296,13 @@ static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id) return; } + spin_lock_irqsave(&meson->lock, flags); + value = readl(meson->base + REG_MISC_AB); value &= ~enable; writel(value, meson->base + REG_MISC_AB); + + spin_unlock_irqrestore(&meson->lock, flags); } static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, @@ -296,19 +310,16 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, { struct meson_pwm_channel *channel = pwm_get_chip_data(pwm); struct meson_pwm *meson = to_meson_pwm(chip); - unsigned long flags; int err = 0; if (!state) return -EINVAL; - spin_lock_irqsave(&meson->lock, flags); - if (!state->enabled) { meson_pwm_disable(meson, pwm->hwpwm); channel->state.enabled = false; - goto unlock; + return 0; } if (state->period != channel->state.period || @@ -329,7 +340,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, err = meson_pwm_calc(meson, channel, pwm->hwpwm, state->duty_cycle, state->period); if (err < 0) - goto unlock; + return err; channel->state.polarity = state->polarity; channel->state.period = state->period; @@ -341,9 +352,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, channel->state.enabled = true; } -unlock: - spin_unlock_irqrestore(&meson->lock, flags); - return err; + return 0; } static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c index f7b8a86fa5c5e9..ad4a40c0f27cf4 100644 --- a/drivers/pwm/pwm-tiehrpwm.c +++ b/drivers/pwm/pwm-tiehrpwm.c @@ -382,6 +382,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) } /* Update shadow register first before modifying active register */ + ehrpwm_modify(pc->mmio_base, AQSFRC, AQSFRC_RLDCSF_MASK, + AQSFRC_RLDCSF_ZRO); ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val); /* * Changes to immediate action on Action Qualifier. This puts diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c index 7c71cdb8a9d8f9..1c64fd8e9234a9 100644 --- a/drivers/pwm/sysfs.c +++ b/drivers/pwm/sysfs.c @@ -399,19 +399,6 @@ void pwmchip_sysfs_export(struct pwm_chip *chip) } void pwmchip_sysfs_unexport(struct pwm_chip *chip) -{ - struct device *parent; - - parent = class_find_device(&pwm_class, NULL, chip, - pwmchip_sysfs_match); - if (parent) { - /* for class_find_device() */ - put_device(parent); - device_unregister(parent); - } -} - -void pwmchip_sysfs_unexport_children(struct pwm_chip *chip) { struct device *parent; unsigned int i; @@ -429,6 +416,7 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip) } put_device(parent); + device_unregister(parent); } static int __init pwm_sysfs_init(void) diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c index bad0e0ea4f3059..ef989a15aefc41 100644 --- a/drivers/rapidio/rio_cm.c +++ b/drivers/rapidio/rio_cm.c @@ -2145,6 +2145,14 @@ static int riocm_add_mport(struct device *dev, mutex_init(&cm->rx_lock); riocm_rx_fill(cm, RIOCM_RX_RING_SIZE); cm->rx_wq = create_workqueue(DRV_NAME "/rxq"); + if (!cm->rx_wq) { + riocm_error("failed to allocate IBMBOX_%d on %s", + cmbox, mport->name); + rio_release_outb_mbox(mport, cmbox); + kfree(cm); + return -ENOMEM; + } + INIT_WORK(&cm->rx_work, rio_ibmsg_handler); cm->tx_slot = 0; diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c index 2d9ec378a8bc34..f85d6b7a198485 100644 --- a/drivers/ras/cec.c +++ b/drivers/ras/cec.c @@ -2,6 +2,7 @@ #include #include #include +#include #include @@ -123,16 +124,12 @@ static u64 dfs_pfn; /* Amount of errors after which we offline */ static unsigned int count_threshold = COUNT_MASK; -/* - * The timer "decays" element count each timer_interval which is 24hrs by - * default. - */ - -#define CEC_TIMER_DEFAULT_INTERVAL 24 * 60 * 60 /* 24 hrs */ -#define CEC_TIMER_MIN_INTERVAL 1 * 60 * 60 /* 1h */ -#define CEC_TIMER_MAX_INTERVAL 30 * 24 * 60 * 60 /* one month */ -static struct timer_list cec_timer; -static u64 timer_interval = CEC_TIMER_DEFAULT_INTERVAL; +/* Each element "decays" each decay_interval which is 24hrs by default. */ +#define CEC_DECAY_DEFAULT_INTERVAL 24 * 60 * 60 /* 24 hrs */ +#define CEC_DECAY_MIN_INTERVAL 1 * 60 * 60 /* 1h */ +#define CEC_DECAY_MAX_INTERVAL 30 * 24 * 60 * 60 /* one month */ +static struct delayed_work cec_work; +static u64 decay_interval = CEC_DECAY_DEFAULT_INTERVAL; /* * Decrement decay value. We're using DECAY_BITS bits to denote decay of an @@ -160,20 +157,21 @@ static void do_spring_cleaning(struct ce_array *ca) /* * @interval in seconds */ -static void cec_mod_timer(struct timer_list *t, unsigned long interval) +static void cec_mod_work(unsigned long interval) { unsigned long iv; - iv = interval * HZ + jiffies; - - mod_timer(t, round_jiffies(iv)); + iv = interval * HZ; + mod_delayed_work(system_wq, &cec_work, round_jiffies(iv)); } -static void cec_timer_fn(struct timer_list *unused) +static void cec_work_fn(struct work_struct *work) { + mutex_lock(&ce_mutex); do_spring_cleaning(&ce_arr); + mutex_unlock(&ce_mutex); - cec_mod_timer(&cec_timer, timer_interval); + cec_mod_work(decay_interval); } /* @@ -183,32 +181,38 @@ static void cec_timer_fn(struct timer_list *unused) */ static int __find_elem(struct ce_array *ca, u64 pfn, unsigned int *to) { + int min = 0, max = ca->n - 1; u64 this_pfn; - int min = 0, max = ca->n; - while (min < max) { - int tmp = (max + min) >> 1; + while (min <= max) { + int i = (min + max) >> 1; - this_pfn = PFN(ca->array[tmp]); + this_pfn = PFN(ca->array[i]); if (this_pfn < pfn) - min = tmp + 1; + min = i + 1; else if (this_pfn > pfn) - max = tmp; - else { - min = tmp; - break; + max = i - 1; + else if (this_pfn == pfn) { + if (to) + *to = i; + + return i; } } + /* + * When the loop terminates without finding @pfn, min has the index of + * the element slot where the new @pfn should be inserted. The loop + * terminates when min > max, which means the min index points to the + * bigger element while the max index to the smaller element, in-between + * which the new @pfn belongs to. + * + * For more details, see exercise 1, Section 6.2.1 in TAOCP, vol. 3. + */ if (to) *to = min; - this_pfn = PFN(ca->array[min]); - - if (this_pfn == pfn) - return min; - return -ENOKEY; } @@ -374,15 +378,15 @@ static int decay_interval_set(void *data, u64 val) { *(u64 *)data = val; - if (val < CEC_TIMER_MIN_INTERVAL) + if (val < CEC_DECAY_MIN_INTERVAL) return -EINVAL; - if (val > CEC_TIMER_MAX_INTERVAL) + if (val > CEC_DECAY_MAX_INTERVAL) return -EINVAL; - timer_interval = val; + decay_interval = val; - cec_mod_timer(&cec_timer, timer_interval); + cec_mod_work(decay_interval); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(decay_interval_ops, u64_get, decay_interval_set, "%lld\n"); @@ -426,7 +430,7 @@ static int array_dump(struct seq_file *m, void *v) seq_printf(m, "Flags: 0x%x\n", ca->flags); - seq_printf(m, "Timer interval: %lld seconds\n", timer_interval); + seq_printf(m, "Decay interval: %lld seconds\n", decay_interval); seq_printf(m, "Decays: %lld\n", ca->decays_done); seq_printf(m, "Action threshold: %d\n", count_threshold); @@ -472,7 +476,7 @@ static int __init create_debugfs_nodes(void) } decay = debugfs_create_file("decay_interval", S_IRUSR | S_IWUSR, d, - &timer_interval, &decay_interval_ops); + &decay_interval, &decay_interval_ops); if (!decay) { pr_warn("Error creating decay_interval debugfs node!\n"); goto err; @@ -508,8 +512,8 @@ void __init cec_init(void) if (create_debugfs_nodes()) return; - timer_setup(&cec_timer, cec_timer_fn, 0); - cec_mod_timer(&cec_timer, CEC_TIMER_DEFAULT_INTERVAL); + INIT_DELAYED_WORK(&cec_work, cec_work_fn); + schedule_delayed_work(&cec_work, CEC_DECAY_DEFAULT_INTERVAL); pr_info("Correctable Errors collector initialized.\n"); } diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c index 01ffc0ef8033f8..fbcf13bbbd8d11 100644 --- a/drivers/rtc/rtc-88pm860x.c +++ b/drivers/rtc/rtc-88pm860x.c @@ -414,7 +414,7 @@ static int pm860x_rtc_remove(struct platform_device *pdev) struct pm860x_rtc_info *info = platform_get_drvdata(pdev); #ifdef VRTC_CALIBRATION - flush_scheduled_work(); + cancel_delayed_work_sync(&info->calib_work); /* disable measurement */ pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0); #endif /* VRTC_CALIBRATION */ diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c index 453615f8ac9a01..3fcd2cbafc8457 100644 --- a/drivers/rtc/rtc-pcf8523.c +++ b/drivers/rtc/rtc-pcf8523.c @@ -85,6 +85,18 @@ static int pcf8523_write(struct i2c_client *client, u8 reg, u8 value) return 0; } +static int pcf8523_voltage_low(struct i2c_client *client) +{ + u8 value; + int err; + + err = pcf8523_read(client, REG_CONTROL3, &value); + if (err < 0) + return err; + + return !!(value & REG_CONTROL3_BLF); +} + static int pcf8523_select_capacitance(struct i2c_client *client, bool high) { u8 value; @@ -167,6 +179,14 @@ static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm) struct i2c_msg msgs[2]; int err; + err = pcf8523_voltage_low(client); + if (err < 0) { + return err; + } else if (err > 0) { + dev_err(dev, "low voltage detected, time is unreliable\n"); + return -EINVAL; + } + msgs[0].addr = client->addr; msgs[0].flags = 0; msgs[0].len = 1; @@ -251,17 +271,13 @@ static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) { struct i2c_client *client = to_i2c_client(dev); - u8 value; - int ret = 0, err; + int ret; switch (cmd) { case RTC_VL_READ: - err = pcf8523_read(client, REG_CONTROL3, &value); - if (err < 0) - return err; - - if (value & REG_CONTROL3_BLF) - ret = 1; + ret = pcf8523_voltage_low(client); + if (ret < 0) + return ret; if (copy_to_user((void __user *)arg, &ret, sizeof(int))) return -EFAULT; diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c index c5908cfea2340f..8e6c9b3bcc29a4 100644 --- a/drivers/rtc/rtc-stm32.c +++ b/drivers/rtc/rtc-stm32.c @@ -788,11 +788,14 @@ static int stm32_rtc_probe(struct platform_device *pdev) ret = device_init_wakeup(&pdev->dev, true); if (rtc->data->has_wakeirq) { rtc->wakeirq_alarm = platform_get_irq(pdev, 1); - if (rtc->wakeirq_alarm <= 0) - ret = rtc->wakeirq_alarm; - else + if (rtc->wakeirq_alarm > 0) { ret = dev_pm_set_dedicated_wake_irq(&pdev->dev, rtc->wakeirq_alarm); + } else { + ret = rtc->wakeirq_alarm; + if (rtc->wakeirq_alarm == -EPROBE_DEFER) + goto err; + } } if (ret) dev_warn(&pdev->dev, "alarm can't wake up the system: %d", ret); diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c index 153820876a8200..2f741f455c30a9 100644 --- a/drivers/rtc/rtc-xgene.c +++ b/drivers/rtc/rtc-xgene.c @@ -168,6 +168,10 @@ static int xgene_rtc_probe(struct platform_device *pdev) if (IS_ERR(pdata->csr_base)) return PTR_ERR(pdata->csr_base); + pdata->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(pdata->rtc)) + return PTR_ERR(pdata->rtc); + irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "No IRQ resource\n"); @@ -198,15 +202,15 @@ static int xgene_rtc_probe(struct platform_device *pdev) return ret; } - pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, - &xgene_rtc_ops, THIS_MODULE); - if (IS_ERR(pdata->rtc)) { - clk_disable_unprepare(pdata->clk); - return PTR_ERR(pdata->rtc); - } - /* HW does not support update faster than 1 seconds */ pdata->rtc->uie_unsupported = 1; + pdata->rtc->ops = &xgene_rtc_ops; + + ret = rtc_register_device(pdata->rtc); + if (ret) { + clk_disable_unprepare(pdata->clk); + return ret; + } return 0; } diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 9811fd8a0c7310..92eabbb5f18d44 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -115,7 +115,7 @@ struct subchannel { struct schib_config config; } __attribute__ ((aligned(8))); -DECLARE_PER_CPU(struct irb, cio_irb); +DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb); #define to_subchannel(n) container_of(n, struct subchannel, dev) diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index fabd9798e4c470..7a06cdff6572d8 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -40,26 +40,30 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch) if (ret != -EBUSY) goto out_unlock; + iretry = 255; do { - iretry = 255; ret = cio_cancel_halt_clear(sch, &iretry); - while (ret == -EBUSY) { - /* - * Flush all I/O and wait for - * cancel/halt/clear completion. - */ - private->completion = &completion; - spin_unlock_irq(sch->lock); - wait_for_completion_timeout(&completion, 3*HZ); + if (ret == -EIO) { + pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n", + sch->schid.ssid, sch->schid.sch_no); + break; + } + + /* + * Flush all I/O and wait for + * cancel/halt/clear completion. + */ + private->completion = &completion; + spin_unlock_irq(sch->lock); - spin_lock_irq(sch->lock); - private->completion = NULL; - flush_workqueue(vfio_ccw_work_q); - ret = cio_cancel_halt_clear(sch, &iretry); - }; + if (ret == -EBUSY) + wait_for_completion_timeout(&completion, 3*HZ); + private->completion = NULL; + flush_workqueue(vfio_ccw_work_q); + spin_lock_irq(sch->lock); ret = cio_disable_subchannel(sch); } while (ret == -EBUSY); out_unlock: diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index f673e106c04153..dc5ff47de3feec 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c @@ -130,11 +130,12 @@ static int vfio_ccw_mdev_remove(struct mdev_device *mdev) if ((private->state != VFIO_CCW_STATE_NOT_OPER) && (private->state != VFIO_CCW_STATE_STANDBY)) { - if (!vfio_ccw_mdev_reset(mdev)) + if (!vfio_ccw_sch_quiesce(private->sch)) private->state = VFIO_CCW_STATE_STANDBY; /* The state will be NOT_OPER on error. */ } + cp_free(&private->cp); private->mdev = NULL; atomic_inc(&private->avail); @@ -158,6 +159,14 @@ static void vfio_ccw_mdev_release(struct mdev_device *mdev) struct vfio_ccw_private *private = dev_get_drvdata(mdev_parent_dev(mdev)); + if ((private->state != VFIO_CCW_STATE_NOT_OPER) && + (private->state != VFIO_CCW_STATE_STANDBY)) { + if (!vfio_ccw_mdev_reset(mdev)) + private->state = VFIO_CCW_STATE_STANDBY; + /* The state will be NOT_OPER on error. */ + } + + cp_free(&private->cp); vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &private->nb); } diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index e6854127b4343d..b2737bfeb8bb68 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -224,6 +224,7 @@ static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); if (mex->outputdatalength < mex->inputdatalength) { + func_code = 0; rc = -EINVAL; goto out; } @@ -298,6 +299,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) trace_s390_zcrypt_req(crt, TP_ICARSACRT); if (crt->outputdatalength < crt->inputdatalength) { + func_code = 0; rc = -EINVAL; goto out; } @@ -483,6 +485,7 @@ static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); if (!targets) { + func_code = 0; rc = -ENOMEM; goto out; } @@ -490,6 +493,7 @@ static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) uptr = (struct ep11_target_dev __force __user *) xcrb->targets; if (copy_from_user(targets, uptr, target_num * sizeof(*targets))) { + func_code = 0; rc = -EFAULT; goto out_free; } diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 2d1f6a583641b6..b2657582cfcfd1 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -201,6 +201,12 @@ struct qeth_vnicc_info { bool rx_bcast_enabled; }; +static inline int qeth_is_adp_supported(struct qeth_ipa_info *ipa, + enum qeth_ipa_setadp_cmd func) +{ + return (ipa->supported_funcs & func); +} + static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func) { @@ -214,9 +220,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, } #define qeth_adp_supported(c, f) \ - qeth_is_ipa_supported(&c->options.adp, f) -#define qeth_adp_enabled(c, f) \ - qeth_is_ipa_enabled(&c->options.adp, f) + qeth_is_adp_supported(&c->options.adp, f) #define qeth_is_supported(c, f) \ qeth_is_ipa_supported(&c->options.ipa4, f) #define qeth_is_enabled(c, f) \ diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 56aacf32f71b07..461afc276db72b 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1370,7 +1370,7 @@ static void qeth_set_multiple_write_queues(struct qeth_card *card) card->qdio.no_out_queues = 4; } -static void qeth_update_from_chp_desc(struct qeth_card *card) +static int qeth_update_from_chp_desc(struct qeth_card *card) { struct ccw_device *ccwdev; struct channel_path_desc_fmt0 *chp_dsc; @@ -1380,7 +1380,7 @@ static void qeth_update_from_chp_desc(struct qeth_card *card) ccwdev = card->data.ccwdev; chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); if (!chp_dsc) - goto out; + return -ENOMEM; card->info.func_level = 0x4100 + chp_dsc->desc; if (card->info.type == QETH_CARD_TYPE_IQD) @@ -1395,6 +1395,7 @@ static void qeth_update_from_chp_desc(struct qeth_card *card) kfree(chp_dsc); QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues); QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level); + return 0; } static void qeth_init_qdio_info(struct qeth_card *card) @@ -5090,7 +5091,9 @@ int qeth_core_hardsetup_card(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); atomic_set(&card->force_alloc_skb, 0); - qeth_update_from_chp_desc(card); + rc = qeth_update_from_chp_desc(card); + if (rc) + return rc; retry: if (retries < 3) QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", @@ -5768,7 +5771,9 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) gdev->cdev[2]->handler = qeth_irq; qeth_setup_card(card); - qeth_update_from_chp_desc(card); + rc = qeth_update_from_chp_desc(card); + if (rc) + goto err_chp_desc; card->dev = qeth_alloc_netdev(card); if (!card->dev) { @@ -5806,6 +5811,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) qeth_core_free_discipline(card); err_load: free_netdev(card->dev); +err_chp_desc: err_card: qeth_core_free_card(card); err_dev: diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index b7513c5848cf9a..c1c35eccd5b653 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1901,7 +1901,7 @@ static void qeth_bridgeport_an_set_cb(void *priv, l2entry = (struct qdio_brinfo_entry_l2 *)entry; code = IPA_ADDR_CHANGE_CODE_MACADDR; - if (l2entry->addr_lnid.lnid) + if (l2entry->addr_lnid.lnid < VLAN_N_VID) code |= IPA_ADDR_CHANGE_CODE_VLANID; qeth_bridge_emit_host_event(card, anev_reg_unreg, code, (struct net_if_token *)&l2entry->nit, diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 1b4d6a3afb8f8a..3d971ffbd4bc2d 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -164,6 +164,7 @@ extern const struct attribute_group *zfcp_port_attr_groups[]; extern struct mutex zfcp_sysfs_port_units_mutex; extern struct device_attribute *zfcp_sysfs_sdev_attrs[]; extern struct device_attribute *zfcp_sysfs_shost_attrs[]; +bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port); /* zfcp_unit.c */ extern int zfcp_unit_add(struct zfcp_port *, u64); diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index a4bbfa4ef653c0..588bf5ac6fb96f 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -125,6 +125,15 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) zfcp_sdev->erp_action.port = port; + mutex_lock(&zfcp_sysfs_port_units_mutex); + if (zfcp_sysfs_port_is_removing(port)) { + /* port is already gone */ + mutex_unlock(&zfcp_sysfs_port_units_mutex); + put_device(&port->dev); /* undo zfcp_get_port_by_wwpn() */ + return -ENXIO; + } + mutex_unlock(&zfcp_sysfs_port_units_mutex); + unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev)); if (unit) put_device(&unit->dev); diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index b277be6f7611cc..af197e2b3e69d1 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -235,6 +235,53 @@ static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, DEFINE_MUTEX(zfcp_sysfs_port_units_mutex); +static void zfcp_sysfs_port_set_removing(struct zfcp_port *const port) +{ + lockdep_assert_held(&zfcp_sysfs_port_units_mutex); + atomic_set(&port->units, -1); +} + +bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port) +{ + lockdep_assert_held(&zfcp_sysfs_port_units_mutex); + return atomic_read(&port->units) == -1; +} + +static bool zfcp_sysfs_port_in_use(struct zfcp_port *const port) +{ + struct zfcp_adapter *const adapter = port->adapter; + unsigned long flags; + struct scsi_device *sdev; + bool in_use = true; + + mutex_lock(&zfcp_sysfs_port_units_mutex); + if (atomic_read(&port->units) > 0) + goto unlock_port_units_mutex; /* zfcp_unit(s) under port */ + + spin_lock_irqsave(adapter->scsi_host->host_lock, flags); + __shost_for_each_device(sdev, adapter->scsi_host) { + const struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev); + + if (sdev->sdev_state == SDEV_DEL || + sdev->sdev_state == SDEV_CANCEL) + continue; + if (zsdev->port != port) + continue; + /* alive scsi_device under port of interest */ + goto unlock_host_lock; + } + + /* port is about to be removed, so no more unit_add or slave_alloc */ + zfcp_sysfs_port_set_removing(port); + in_use = false; + +unlock_host_lock: + spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); +unlock_port_units_mutex: + mutex_unlock(&zfcp_sysfs_port_units_mutex); + return in_use; +} + static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -257,15 +304,11 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, else retval = 0; - mutex_lock(&zfcp_sysfs_port_units_mutex); - if (atomic_read(&port->units) > 0) { + if (zfcp_sysfs_port_in_use(port)) { retval = -EBUSY; - mutex_unlock(&zfcp_sysfs_port_units_mutex); + put_device(&port->dev); /* undo zfcp_get_port_by_wwpn() */ goto out; } - /* port is about to be removed, so no more unit_add */ - atomic_set(&port->units, -1); - mutex_unlock(&zfcp_sysfs_port_units_mutex); write_lock_irq(&adapter->port_list_lock); list_del(&port->list); diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c index 1bf0a0984a098f..e67bf7388cae82 100644 --- a/drivers/s390/scsi/zfcp_unit.c +++ b/drivers/s390/scsi/zfcp_unit.c @@ -124,7 +124,7 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun) int retval = 0; mutex_lock(&zfcp_sysfs_port_units_mutex); - if (atomic_read(&port->units) == -1) { + if (zfcp_sysfs_port_is_removing(port)) { /* port is already gone */ retval = -ENODEV; goto out; @@ -168,8 +168,14 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun) write_lock_irq(&port->unit_list_lock); list_add_tail(&unit->list, &port->unit_list); write_unlock_irq(&port->unit_list_lock); + /* + * lock order: shost->scan_mutex before zfcp_sysfs_port_units_mutex + * due to zfcp_unit_scsi_scan() => zfcp_scsi_slave_alloc() + */ + mutex_unlock(&zfcp_sysfs_port_units_mutex); zfcp_unit_scsi_scan(unit); + return retval; out: mutex_unlock(&zfcp_sysfs_port_units_mutex); diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index e8ae4d671d233b..097305949a955c 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -830,7 +830,7 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) ((u64)err_entry->data.err_warn_bitmap_hi << 32) | (u64)err_entry->data.err_warn_bitmap_lo; for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { - if (err_warn_bit_map & (u64) (1 << i)) { + if (err_warn_bit_map & ((u64)1 << i)) { err_warn = i; break; } diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index f2c561ca731a32..cd2c247d6d0ce1 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -641,6 +641,10 @@ cxgbi_check_route(struct sockaddr *dst_addr, int ifindex) if (ndev->flags & IFF_LOOPBACK) { ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr); + if (!ndev) { + err = -ENETUNREACH; + goto rel_neigh; + } mtu = ndev->mtu; pr_info("rt dev %s, loopback -> %s, mtu %u.\n", n->dev->name, ndev->name, mtu); diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 12dc7100bb4c24..d1154baa9436a0 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -1173,10 +1173,8 @@ static int __init alua_init(void) int r; kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0); - if (!kaluad_wq) { - /* Temporary failure, bypass */ - return SCSI_DH_DEV_TEMP_BUSY; - } + if (!kaluad_wq) + return -ENOMEM; r = scsi_register_device_handler(&alua_dh); if (r != 0) { diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index e9ecc667e3fb4d..b141d1061f38ea 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -989,6 +989,8 @@ static struct domain_device *sas_ex_discover_expander( list_del(&child->dev_list_node); spin_unlock_irq(&parent->port->dev_list_lock); sas_put_device(child); + sas_port_delete(phy->port); + phy->port = NULL; return NULL; } list_add_tail(&child->siblings, &parent->ex_dev.children); @@ -2040,6 +2042,11 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) { phy->phy_state = PHY_EMPTY; sas_unregister_devs_sas_addr(dev, phy_id, last); + /* + * Even though the PHY is empty, for convenience we discover + * the PHY to update the PHY info, like negotiated linkrate. + */ + sas_ex_phy_discover(dev, phy_id); return res; } else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) && dev_type_flutter(type, phy->attached_dev_type)) { diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index cb19b12e72116d..55cd96e2469c65 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -341,7 +341,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, phba->sli4_hba.scsi_xri_max, lpfc_sli4_get_els_iocb_cnt(phba)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; /* Port state is only one of two values for now. */ if (localport->port_id) @@ -357,7 +357,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, wwn_to_u64(vport->fc_nodename.u.wwn), localport->port_id, statep); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { nrport = NULL; @@ -384,39 +384,39 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, /* Tab in to show lport ownership. */ if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; if (phba->brd_no >= 10) { if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; } scnprintf(tmp, sizeof(tmp), "WWPN x%llx ", nrport->port_name); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; scnprintf(tmp, sizeof(tmp), "WWNN x%llx ", nrport->node_name); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; scnprintf(tmp, sizeof(tmp), "DID x%06x ", nrport->port_id); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; /* An NVME rport can have multiple roles. */ if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) { if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; } if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) { if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; } if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) { if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; } if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | FC_PORT_ROLE_NVME_TARGET | @@ -424,12 +424,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x", nrport->port_role); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; } scnprintf(tmp, sizeof(tmp), "%s\n", statep); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; } rcu_read_unlock(); @@ -491,7 +491,13 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, atomic_read(&lport->cmpl_fcp_err)); strlcat(buf, tmp, PAGE_SIZE); -buffer_done: + /* RCU is already unlocked. */ + goto buffer_done; + + rcu_unlock_buf_done: + rcu_read_unlock(); + + buffer_done: len = strnlen(buf, PAGE_SIZE); if (unlikely(len >= (PAGE_SIZE - 1))) { diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 1a964e71582f4a..d909d90035bb25 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1762,6 +1762,9 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; memset(ae, 0, 256); + /* This string MUST be consistent with other FC platforms + * supported by Broadcom. + */ strncpy(ae->un.AttrString, "Emulex Corporation", sizeof(ae->un.AttrString)); @@ -2117,10 +2120,11 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; memset(ae, 0, 32); - ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */ - ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */ - ae->un.AttrTypes[6] = 0x01; /* Type 40 - NVME */ - ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */ + ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */ + ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */ + if (vport->nvmei_support || vport->phba->nvmet_support) + ae->un.AttrTypes[6] = 0x01; /* Type 0x28 - NVME */ + ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */ size = FOURBYTES + 32; ad->AttrLen = cpu_to_be16(size); ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES); @@ -2425,9 +2429,11 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; memset(ae, 0, 32); - ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */ - ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */ - ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */ + ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */ + ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */ + if (vport->phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + ae->un.AttrTypes[6] = 0x1; /* Type 0x28 - NVME */ + ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */ size = FOURBYTES + 32; ad->AttrLen = cpu_to_be16(size); ad->AttrType = cpu_to_be16(RPRT_ACTIVE_FC4_TYPES); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 0d214e6b8e9aeb..f3c6801c0b3128 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -7094,7 +7094,10 @@ int lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) { struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, - rrq->nlp_DID); + rrq->nlp_DID); + if (!ndlp) + return 1; + if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) return lpfc_issue_els_rrq(rrq->vport, ndlp, rrq->nlp_DID, rrq); diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index eb71877f12f8b7..ccdd82b1123f7e 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -921,7 +921,11 @@ lpfc_linkdown(struct lpfc_hba *phba) } } lpfc_destroy_vport_work_array(phba, vports); - /* Clean up any firmware default rpi's */ + + /* Clean up any SLI3 firmware default rpi's */ + if (phba->sli_rev > LPFC_SLI_REV3) + goto skip_unreg_did; + mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mb) { lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb); @@ -933,6 +937,7 @@ lpfc_linkdown(struct lpfc_hba *phba) } } + skip_unreg_did: /* Setup myDID for link up if we are in pt2pt mode */ if (phba->pport->fc_flag & FC_PT2PT) { mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -4855,6 +4860,10 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport) LPFC_MBOXQ_t *mbox; int rc; + /* Unreg DID is an SLI3 operation. */ + if (phba->sli_rev > LPFC_SLI_REV3) + return; + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS, diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index ca62117a2d1319..645ffb5332b4ab 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -2477,6 +2477,9 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel; + if (!IS_ENABLED(CONFIG_NVME_FC)) + return ret; + cstat = kmalloc((sizeof(struct lpfc_nvme_ctrl_stat) * phba->cfg_nvme_io_channel), GFP_KERNEL); if (!cstat) @@ -2485,12 +2488,9 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) /* localport is allocated from the stack, but the registration * call allocates heap memory as well as the private area. */ -#if (IS_ENABLED(CONFIG_NVME_FC)) + ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template, &vport->phba->pcidev->dev, &localport); -#else - ret = -ENOMEM; -#endif if (!ret) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC, "6005 Successfully registered local " diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c index 6bbc38b1b4654d..a17c13846d1eb3 100644 --- a/drivers/scsi/qedf/qedf_io.c +++ b/drivers/scsi/qedf/qedf_io.c @@ -902,6 +902,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req) if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n"); kref_put(&io_req->refcount, qedf_release_cmd); + return -EINVAL; } /* Obtain free SQE */ diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c index 8fd28b056f73f3..3383314a3882bc 100644 --- a/drivers/scsi/qedi/qedi_dbg.c +++ b/drivers/scsi/qedi/qedi_dbg.c @@ -16,10 +16,6 @@ qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line, { va_list va; struct va_format vaf; - char nfunc[32]; - - memset(nfunc, 0, sizeof(nfunc)); - memcpy(nfunc, func, sizeof(nfunc) - 1); va_start(va, fmt); @@ -28,9 +24,9 @@ qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line, if (likely(qedi) && likely(qedi->pdev)) pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), - nfunc, line, qedi->host_no, &vaf); + func, line, qedi->host_no, &vaf); else - pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + pr_err("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); va_end(va); } @@ -41,10 +37,6 @@ qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line, { va_list va; struct va_format vaf; - char nfunc[32]; - - memset(nfunc, 0, sizeof(nfunc)); - memcpy(nfunc, func, sizeof(nfunc) - 1); va_start(va, fmt); @@ -56,9 +48,9 @@ qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line, if (likely(qedi) && likely(qedi->pdev)) pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), - nfunc, line, qedi->host_no, &vaf); + func, line, qedi->host_no, &vaf); else - pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + pr_warn("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); ret: va_end(va); @@ -70,10 +62,6 @@ qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line, { va_list va; struct va_format vaf; - char nfunc[32]; - - memset(nfunc, 0, sizeof(nfunc)); - memcpy(nfunc, func, sizeof(nfunc) - 1); va_start(va, fmt); @@ -85,10 +73,10 @@ qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line, if (likely(qedi) && likely(qedi->pdev)) pr_notice("[%s]:[%s:%d]:%d: %pV", - dev_name(&qedi->pdev->dev), nfunc, line, + dev_name(&qedi->pdev->dev), func, line, qedi->host_no, &vaf); else - pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + pr_notice("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); ret: va_end(va); @@ -100,10 +88,6 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line, { va_list va; struct va_format vaf; - char nfunc[32]; - - memset(nfunc, 0, sizeof(nfunc)); - memcpy(nfunc, func, sizeof(nfunc) - 1); va_start(va, fmt); @@ -115,9 +99,9 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line, if (likely(qedi) && likely(qedi->pdev)) pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), - nfunc, line, qedi->host_no, &vaf); + func, line, qedi->host_no, &vaf); else - pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + pr_info("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); ret: va_end(va); diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index d4821b9dea45d8..1b7049dce1699b 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -810,8 +810,6 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, struct qedi_endpoint *qedi_ep; struct sockaddr_in *addr; struct sockaddr_in6 *addr6; - struct qed_dev *cdev = NULL; - struct qedi_uio_dev *udev = NULL; struct iscsi_path path_req; u32 msg_type = ISCSI_KEVENT_IF_DOWN; u32 iscsi_cid = QEDI_CID_RESERVED; @@ -831,8 +829,6 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, } qedi = iscsi_host_priv(shost); - cdev = qedi->cdev; - udev = qedi->udev; if (test_bit(QEDI_IN_OFFLINE, &qedi->flags) || test_bit(QEDI_IN_RECOVERY, &qedi->flags)) { @@ -1001,6 +997,9 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) qedi_ep = ep->dd_data; qedi = qedi_ep->qedi; + if (qedi_ep->state == EP_STATE_OFLDCONN_START) + goto ep_exit_recover; + flush_work(&qedi_ep->offload_work); if (qedi_ep->conn) { diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index de3f2a097451d0..1f1a05a90d3d7f 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -3261,6 +3261,8 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res) "Async done-%s res %x, WWPN %8phC \n", sp->name, res, fcport->port_name); + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + if (res == QLA_FUNCTION_TIMEOUT) return; @@ -4604,6 +4606,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport) done_free_sp: sp->free(sp); + fcport->flags &= ~FCF_ASYNC_SENT; done: return rval; } diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 36cbb29c84f639..88d8acf86a2a44 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -3449,7 +3449,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) ql_log(ql_log_fatal, vha, 0x00c8, "Failed to allocate memory for ha->msix_entries.\n"); ret = -ENOMEM; - goto msix_out; + goto free_irqs; } ha->flags.msix_enabled = 1; @@ -3532,6 +3532,10 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) msix_out: return ret; + +free_irqs: + pci_free_irq_vectors(ha->pdev); + goto msix_out; } int diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index a8c67cd176256a..9d7feb005acfdc 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -684,7 +684,6 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) { fc_port_t *t; - unsigned long flags; switch (e->u.nack.type) { case SRB_NACK_PRLI: @@ -694,10 +693,8 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) if (t) { ql_log(ql_log_info, vha, 0xd034, "%s create sess success %p", __func__, t); - spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); /* create sess has an extra kref */ vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport); - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); } break; } @@ -709,9 +706,6 @@ void qla24xx_delete_sess_fn(struct work_struct *work) { fc_port_t *fcport = container_of(work, struct fc_port, del_work); struct qla_hw_data *ha = fcport->vha->hw; - unsigned long flags; - - spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (fcport->se_sess) { ha->tgt.tgt_ops->shutdown_sess(fcport); @@ -719,7 +713,6 @@ void qla24xx_delete_sess_fn(struct work_struct *work) } else { qlt_unreg_sess(fcport); } - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } /* @@ -788,8 +781,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) fcport->port_name, sess->loop_id); sess->local = 0; } - ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + ha->tgt.tgt_ops->put_sess(sess); } /* @@ -4135,9 +4129,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) /* * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( */ - spin_lock_irqsave(&ha->tgt.sess_lock, flags); ha->tgt.tgt_ops->put_sess(sess); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return; out_term: @@ -4154,9 +4146,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) target_free_tag(sess->se_sess, &cmd->se_cmd); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); - spin_lock_irqsave(&ha->tgt.sess_lock, flags); ha->tgt.tgt_ops->put_sess(sess); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } static void qlt_do_work(struct work_struct *work) @@ -4365,9 +4355,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, if (!cmd) { ql_dbg(ql_dbg_io, vha, 0x3062, "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); - spin_lock_irqsave(&ha->tgt.sess_lock, flags); ha->tgt.tgt_ops->put_sess(sess); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return -EBUSY; } @@ -6105,17 +6093,19 @@ static void qlt_abort_work(struct qla_tgt *tgt, } rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); - ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); + ha->tgt.tgt_ops->put_sess(sess); + if (rc != 0) goto out_term; return; out_term2: + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); + if (sess) ha->tgt.tgt_ops->put_sess(sess); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); out_term: spin_lock_irqsave(&ha->hardware_lock, flags); @@ -6175,9 +6165,10 @@ static void qlt_tmr_work(struct qla_tgt *tgt, scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); - ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + ha->tgt.tgt_ops->put_sess(sess); + if (rc != 0) goto out_term; return; diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 64e2d859f63324..b8c1a739dfbd1f 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -350,7 +350,6 @@ static void tcm_qla2xxx_put_sess(struct fc_port *sess) if (!sess) return; - assert_spin_locked(&sess->vha->hw->tgt.sess_lock); kref_put(&sess->sess_kref, tcm_qla2xxx_release_session); } @@ -365,8 +364,9 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess) spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); target_sess_cmd_list_set_waiting(se_sess); - tcm_qla2xxx_put_sess(sess); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + tcm_qla2xxx_put_sess(sess); } static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess) @@ -390,6 +390,8 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) cmd->se_cmd.transport_state, cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); + transport_generic_request_failure(&cmd->se_cmd, + TCM_CHECK_CONDITION_ABORT_CMD); return 0; } cmd->trc_flags |= TRC_XFR_RDY; @@ -829,7 +831,6 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess) static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess) { - assert_spin_locked(&sess->vha->hw->tgt.sess_lock); target_sess_cmd_list_set_waiting(sess->se_sess); } diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 5dd3e4e01b1096..25c8ce54a976dc 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -5935,7 +5935,7 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[]) val = rd_nvram_byte(ha, sec_addr); if (val & BIT_7) ddb_index[1] = (val & 0x7f); - + goto exit_boot_info; } else if (is_qla80XX(ha)) { buf = dma_alloc_coherent(&ha->pdev->dev, size, &buf_dma, GFP_KERNEL); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index e925eda931916d..77cb45ef55fc57 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2605,7 +2605,6 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) int res; struct scsi_device *sdp = sdkp->device; struct scsi_mode_data data; - int disk_ro = get_disk_ro(sdkp->disk); int old_wp = sdkp->write_prot; set_disk_ro(sdkp->disk, 0); @@ -2646,7 +2645,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) "Test WP failed, assume Write Enabled\n"); } else { sdkp->write_prot = ((data.device_specific & 0x80) != 0); - set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro); + set_disk_ro(sdkp->disk, sdkp->write_prot); if (sdkp->first_scan || old_wp != sdkp->write_prot) { sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", sdkp->write_prot ? "on" : "off"); diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 3781e8109dd740..98f2d076f938f3 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -3697,8 +3697,10 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, return -ETIMEDOUT; msecs_blocked = jiffies_to_msecs(jiffies - start_jiffies); - if (msecs_blocked >= timeout_msecs) - return -ETIMEDOUT; + if (msecs_blocked >= timeout_msecs) { + rc = -ETIMEDOUT; + goto out; + } timeout_msecs -= msecs_blocked; } } @@ -6378,7 +6380,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) else mask = DMA_BIT_MASK(32); - rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask); + rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); if (rc) { dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); goto disable_device; diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c index 452e19f8fb4702..c2cee73a8560d3 100644 --- a/drivers/scsi/ufs/ufs-hisi.c +++ b/drivers/scsi/ufs/ufs-hisi.c @@ -544,6 +544,10 @@ static int ufs_hisi_init_common(struct ufs_hba *hba) ufshcd_set_variant(hba, host); host->rst = devm_reset_control_get(dev, "rst"); + if (IS_ERR(host->rst)) { + dev_err(dev, "%s: failed to get reset control\n", __func__); + return PTR_ERR(host->rst); + } ufs_hisi_set_pm_lvl(hba); diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index 895a9b5ac98993..30c22e16b1e34b 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -340,24 +340,21 @@ int ufshcd_pltfrm_init(struct platform_device *pdev, goto dealloc_host; } - pm_runtime_set_active(&pdev->dev); - pm_runtime_enable(&pdev->dev); - ufshcd_init_lanes_per_dir(hba); err = ufshcd_init(hba, mmio_base, irq); if (err) { dev_err(dev, "Initialization failed\n"); - goto out_disable_rpm; + goto dealloc_host; } platform_set_drvdata(pdev, hba); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + return 0; -out_disable_rpm: - pm_runtime_disable(&pdev->dev); - pm_runtime_set_suspended(&pdev->dev); dealloc_host: ufshcd_dealloc_host(hba); out: diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 6e80dfe4fa979d..b8b59cfeacd1f9 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -1914,7 +1914,8 @@ int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); /* Get the descriptor */ - if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { + if (hba->dev_cmd.query.descriptor && + lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + GENERAL_UPIU_REQUEST_SIZE; u16 resp_len; @@ -6130,19 +6131,19 @@ static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, goto out; } - if (hba->vreg_info.vcc) + if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA) icc_level = ufshcd_get_max_icc_level( hba->vreg_info.vcc->max_uA, POWER_DESC_MAX_ACTV_ICC_LVLS - 1, &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]); - if (hba->vreg_info.vccq) + if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA) icc_level = ufshcd_get_max_icc_level( hba->vreg_info.vccq->max_uA, icc_level, &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]); - if (hba->vreg_info.vccq2) + if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA) icc_level = ufshcd_get_max_icc_level( hba->vreg_info.vccq2->max_uA, icc_level, @@ -6767,6 +6768,15 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, if (!vreg) return 0; + /* + * "set_load" operation shall be required on those regulators + * which specifically configured current limitation. Otherwise + * zero max_uA may cause unexpected behavior when regulator is + * enabled or set as high power mode. + */ + if (!vreg->max_uA) + return 0; + ret = regulator_set_load(vreg->reg, ua); if (ret < 0) { dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n", @@ -6813,12 +6823,15 @@ static int ufshcd_config_vreg(struct device *dev, name = vreg->name; if (regulator_count_voltages(reg) > 0) { - min_uV = on ? vreg->min_uV : 0; - ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); - if (ret) { - dev_err(dev, "%s: %s set voltage failed, err=%d\n", + if (vreg->min_uV && vreg->max_uV) { + min_uV = on ? vreg->min_uV : 0; + ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); + if (ret) { + dev_err(dev, + "%s: %s set voltage failed, err=%d\n", __func__, name, ret); - goto out; + goto out; + } } uA_load = on ? vreg->max_uA : 0; diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c index 14a9d18306cbf7..f63d1b8a09335f 100644 --- a/drivers/slimbus/qcom-ngd-ctrl.c +++ b/drivers/slimbus/qcom-ngd-ctrl.c @@ -1331,6 +1331,10 @@ static int of_qcom_slim_ngd_register(struct device *parent, return -ENOMEM; ngd->pdev = platform_device_alloc(QCOM_SLIM_NGD_DRV_NAME, id); + if (!ngd->pdev) { + kfree(ngd); + return -ENOMEM; + } ngd->id = id; ngd->pdev->dev.parent = parent; ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME; diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c index 4e931fdf4d0918..011a40b5fb4907 100644 --- a/drivers/soc/mediatek/mtk-pmic-wrap.c +++ b/drivers/soc/mediatek/mtk-pmic-wrap.c @@ -1104,7 +1104,7 @@ static bool pwrap_is_pmic_cipher_ready(struct pmic_wrapper *wrp) static int pwrap_init_cipher(struct pmic_wrapper *wrp) { int ret; - u32 rdata; + u32 rdata = 0; pwrap_writel(wrp, 0x1, PWRAP_CIPHER_SWRST); pwrap_writel(wrp, 0x0, PWRAP_CIPHER_SWRST); diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c index d44d0e687ab8ad..2a43d6e99962f8 100644 --- a/drivers/soc/renesas/renesas-soc.c +++ b/drivers/soc/renesas/renesas-soc.c @@ -285,6 +285,9 @@ static int __init renesas_soc_init(void) /* R-Car M3-W ES1.1 incorrectly identifies as ES2.0 */ if ((product & 0x7fff) == 0x5210) product ^= 0x11; + /* R-Car M3-W ES1.3 incorrectly identifies as ES2.1 */ + if ((product & 0x7fff) == 0x5211) + product ^= 0x12; if (soc->id && ((product >> 8) & 0xff) != soc->id) { pr_warn("SoC mismatch (product = 0x%x)\n", product); return -ENODEV; diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c index 96882ffde67ea6..3b81e1d75a97e7 100644 --- a/drivers/soc/rockchip/grf.c +++ b/drivers/soc/rockchip/grf.c @@ -66,9 +66,11 @@ static const struct rockchip_grf_info rk3228_grf __initconst = { }; #define RK3288_GRF_SOC_CON0 0x244 +#define RK3288_GRF_SOC_CON2 0x24c static const struct rockchip_grf_value rk3288_defaults[] __initconst = { { "jtag switching", RK3288_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 12) }, + { "pwm select", RK3288_GRF_SOC_CON2, HIWORD_UPDATE(1, 1, 0) }, }; static const struct rockchip_grf_info rk3288_grf __initconst = { diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 08dd3a31a3e5f7..5b6f3655c366a6 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1427,7 +1427,7 @@ static int spi_imx_transfer(struct spi_device *spi, /* flush rxfifo before transfer */ while (spi_imx->devtype_data->rx_available(spi_imx)) - spi_imx->rx(spi_imx); + readl(spi_imx->base + MXC_CSPIRXDATA); if (spi_imx->slave_mode) return spi_imx_pio_transfer_slave(spi, transfer); diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 1b923a2e29d2f9..fb0a6707670a09 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -876,10 +876,14 @@ static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) rate = min_t(int, ssp_clk, rate); + /* + * Calculate the divisor for the SCR (Serial Clock Rate), avoiding + * that the SSP transmission rate can be greater than the device rate + */ if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP) - return (ssp_clk / (2 * rate) - 1) & 0xff; + return (DIV_ROUND_UP(ssp_clk, 2 * rate) - 1) & 0xff; else - return (ssp_clk / rate - 1) & 0xfff; + return (DIV_ROUND_UP(ssp_clk, rate) - 1) & 0xfff; } static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data, @@ -1412,12 +1416,7 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = { static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param) { - struct device *dev = param; - - if (dev != chan->device->dev->parent) - return false; - - return true; + return param == chan->device->dev; } static struct pxa2xx_spi_master * diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index b37de1d991d6ab..d61120822f0263 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -279,7 +279,8 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size) /* Sets parity, interrupt mask */ rspi_write8(rspi, 0x00, RSPI_SPCR2); - /* Sets SPCMD */ + /* Resets sequencer */ + rspi_write8(rspi, 0, RSPI_SPSCR); rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size); rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0); @@ -323,7 +324,8 @@ static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size) rspi_write8(rspi, 0x00, RSPI_SSLND); rspi_write8(rspi, 0x00, RSPI_SPND); - /* Sets SPCMD */ + /* Resets sequencer */ + rspi_write8(rspi, 0, RSPI_SPSCR); rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size); rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0); @@ -374,7 +376,8 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size) /* Sets buffer to allow normal operation */ rspi_write8(rspi, 0x00, QSPI_SPBFCR); - /* Sets SPCMD */ + /* Resets sequencer */ + rspi_write8(rspi, 0, RSPI_SPSCR); rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0); /* Sets RSPI mode */ diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index a76acedd7e2f40..a1888dc6a938af 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -1067,27 +1067,19 @@ static int tegra_spi_probe(struct platform_device *pdev) spi_irq = platform_get_irq(pdev, 0); tspi->irq = spi_irq; - ret = request_threaded_irq(tspi->irq, tegra_spi_isr, - tegra_spi_isr_thread, IRQF_ONESHOT, - dev_name(&pdev->dev), tspi); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n", - tspi->irq); - goto exit_free_master; - } tspi->clk = devm_clk_get(&pdev->dev, "spi"); if (IS_ERR(tspi->clk)) { dev_err(&pdev->dev, "can not get clock\n"); ret = PTR_ERR(tspi->clk); - goto exit_free_irq; + goto exit_free_master; } tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi"); if (IS_ERR(tspi->rst)) { dev_err(&pdev->dev, "can not get reset\n"); ret = PTR_ERR(tspi->rst); - goto exit_free_irq; + goto exit_free_master; } tspi->max_buf_size = SPI_FIFO_DEPTH << 2; @@ -1095,7 +1087,7 @@ static int tegra_spi_probe(struct platform_device *pdev) ret = tegra_spi_init_dma_param(tspi, true); if (ret < 0) - goto exit_free_irq; + goto exit_free_master; ret = tegra_spi_init_dma_param(tspi, false); if (ret < 0) goto exit_rx_dma_free; @@ -1117,18 +1109,32 @@ static int tegra_spi_probe(struct platform_device *pdev) dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret); goto exit_pm_disable; } + + reset_control_assert(tspi->rst); + udelay(2); + reset_control_deassert(tspi->rst); tspi->def_command1_reg = SPI_M_S; tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1); pm_runtime_put(&pdev->dev); + ret = request_threaded_irq(tspi->irq, tegra_spi_isr, + tegra_spi_isr_thread, IRQF_ONESHOT, + dev_name(&pdev->dev), tspi); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n", + tspi->irq); + goto exit_pm_disable; + } master->dev.of_node = pdev->dev.of_node; ret = devm_spi_register_master(&pdev->dev, master); if (ret < 0) { dev_err(&pdev->dev, "can not register to master err %d\n", ret); - goto exit_pm_disable; + goto exit_free_irq; } return ret; +exit_free_irq: + free_irq(spi_irq, tspi); exit_pm_disable: pm_runtime_disable(&pdev->dev); if (!pm_runtime_status_suspended(&pdev->dev)) @@ -1136,8 +1142,6 @@ static int tegra_spi_probe(struct platform_device *pdev) tegra_spi_deinit_dma_param(tspi, false); exit_rx_dma_free: tegra_spi_deinit_dma_param(tspi, true); -exit_free_irq: - free_irq(spi_irq, tspi); exit_free_master: spi_master_put(master); return ret; diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 97d137591b18d5..4389ab80c23e6c 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -1294,18 +1294,27 @@ static void pch_free_dma_buf(struct pch_spi_board_data *board_dat, dma->rx_buf_virt, dma->rx_buf_dma); } -static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat, +static int pch_alloc_dma_buf(struct pch_spi_board_data *board_dat, struct pch_spi_data *data) { struct pch_spi_dma_ctrl *dma; + int ret; dma = &data->dma; + ret = 0; /* Get Consistent memory for Tx DMA */ dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL); + if (!dma->tx_buf_virt) + ret = -ENOMEM; + /* Get Consistent memory for Rx DMA */ dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL); + if (!dma->rx_buf_virt) + ret = -ENOMEM; + + return ret; } static int pch_spi_pd_probe(struct platform_device *plat_dev) @@ -1382,7 +1391,9 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev) if (use_dma) { dev_info(&plat_dev->dev, "Use DMA for data transfers\n"); - pch_alloc_dma_buf(board_dat, data); + ret = pch_alloc_dma_buf(board_dat, data); + if (ret) + goto err_spi_register_master; } ret = spi_register_master(master); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 9da0bc5a036cff..88a8a8edd44be0 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -982,6 +982,8 @@ static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) if (max_tx || max_rx) { list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (!xfer->len) + continue; if (!xfer->tx_buf) xfer->tx_buf = ctlr->dummy_tx; if (!xfer->rx_buf) diff --git a/drivers/ssb/bridge_pcmcia_80211.c b/drivers/ssb/bridge_pcmcia_80211.c index f51f150307dfbc..ffa379efff83c7 100644 --- a/drivers/ssb/bridge_pcmcia_80211.c +++ b/drivers/ssb/bridge_pcmcia_80211.c @@ -113,16 +113,21 @@ static struct pcmcia_driver ssb_host_pcmcia_driver = { .resume = ssb_host_pcmcia_resume, }; +static int pcmcia_init_failed; + /* * These are not module init/exit functions! * The module_pcmcia_driver() helper cannot be used here. */ int ssb_host_pcmcia_init(void) { - return pcmcia_register_driver(&ssb_host_pcmcia_driver); + pcmcia_init_failed = pcmcia_register_driver(&ssb_host_pcmcia_driver); + + return pcmcia_init_failed; } void ssb_host_pcmcia_exit(void) { - pcmcia_unregister_driver(&ssb_host_pcmcia_driver); + if (!pcmcia_init_failed) + pcmcia_unregister_driver(&ssb_host_pcmcia_driver); } diff --git a/drivers/staging/erofs/erofs_fs.h b/drivers/staging/erofs/erofs_fs.h index 2f8e2bf70941dc..7677da889f1255 100644 --- a/drivers/staging/erofs/erofs_fs.h +++ b/drivers/staging/erofs/erofs_fs.h @@ -17,10 +17,16 @@ #define EROFS_SUPER_MAGIC_V1 0xE0F5E1E2 #define EROFS_SUPER_OFFSET 1024 +/* + * Any bits that aren't in EROFS_ALL_REQUIREMENTS should be + * incompatible with this kernel version. + */ +#define EROFS_ALL_REQUIREMENTS 0 + struct erofs_super_block { /* 0 */__le32 magic; /* in the little endian */ /* 4 */__le32 checksum; /* crc32c(super_block) */ -/* 8 */__le32 features; +/* 8 */__le32 features; /* (aka. feature_compat) */ /* 12 */__u8 blkszbits; /* support block_size == PAGE_SIZE only */ /* 13 */__u8 reserved; @@ -34,9 +40,10 @@ struct erofs_super_block { /* 44 */__le32 xattr_blkaddr; /* 48 */__u8 uuid[16]; /* 128-bit uuid for volume */ /* 64 */__u8 volume_name[16]; /* volume name */ +/* 80 */__le32 requirements; /* (aka. feature_incompat) */ -/* 80 */__u8 reserved2[48]; /* 128 bytes */ -} __packed; +/* 84 */__u8 reserved2[44]; +} __packed; /* 128 bytes */ #define __EROFS_BIT(_prefix, _cur, _pre) enum { \ _prefix ## _cur ## _BIT = _prefix ## _pre ## _BIT + \ diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h index 58d8cbc3f921f1..8ce37091db2051 100644 --- a/drivers/staging/erofs/internal.h +++ b/drivers/staging/erofs/internal.h @@ -111,6 +111,8 @@ struct erofs_sb_info { u8 uuid[16]; /* 128-bit uuid for volume */ u8 volume_name[16]; /* volume name */ + u32 requirements; + char *dev_name; unsigned int mount_opt; diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c index b0583cdb079ae4..b49ebdf6ebdaf0 100644 --- a/drivers/staging/erofs/super.c +++ b/drivers/staging/erofs/super.c @@ -75,6 +75,22 @@ static void destroy_inode(struct inode *inode) call_rcu(&inode->i_rcu, i_callback); } +static bool check_layout_compatibility(struct super_block *sb, + struct erofs_super_block *layout) +{ + const unsigned int requirements = le32_to_cpu(layout->requirements); + + EROFS_SB(sb)->requirements = requirements; + + /* check if current kernel meets all mandatory requirements */ + if (requirements & (~EROFS_ALL_REQUIREMENTS)) { + errln("unidentified requirements %x, please upgrade kernel version", + requirements & ~EROFS_ALL_REQUIREMENTS); + return false; + } + return true; +} + static int superblock_read(struct super_block *sb) { struct erofs_sb_info *sbi; @@ -108,6 +124,9 @@ static int superblock_read(struct super_block *sb) goto out; } + if (!check_layout_compatibility(sb, layout)) + goto out; + sbi->blocks = le32_to_cpu(layout->blocks); sbi->meta_blkaddr = le32_to_cpu(layout->meta_blkaddr); #ifdef CONFIG_EROFS_FS_XATTR diff --git a/drivers/staging/media/davinci_vpfe/Kconfig b/drivers/staging/media/davinci_vpfe/Kconfig index aea449a8dbf8a0..76818cc48ddcb5 100644 --- a/drivers/staging/media/davinci_vpfe/Kconfig +++ b/drivers/staging/media/davinci_vpfe/Kconfig @@ -1,7 +1,7 @@ config VIDEO_DM365_VPFE tristate "DM365 VPFE Media Controller Capture Driver" depends on VIDEO_V4L2 - depends on (ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF) || COMPILE_TEST + depends on (ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF) || (COMPILE_TEST && !ARCH_OMAP1) depends on VIDEO_V4L2_SUBDEV_API depends on VIDEO_DAVINCI_VPBE_DISPLAY select VIDEOBUF2_DMA_CONTIG diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c index e22f1239a3181d..d17ce1fb4ef51f 100644 --- a/drivers/staging/media/imx/imx-media-csi.c +++ b/drivers/staging/media/imx/imx-media-csi.c @@ -153,9 +153,10 @@ static inline bool requires_passthrough(struct v4l2_fwnode_endpoint *ep, /* * Parses the fwnode endpoint from the source pad of the entity * connected to this CSI. This will either be the entity directly - * upstream from the CSI-2 receiver, or directly upstream from the - * video mux. The endpoint is needed to determine the bus type and - * bus config coming into the CSI. + * upstream from the CSI-2 receiver, directly upstream from the + * video mux, or directly upstream from the CSI itself. The endpoint + * is needed to determine the bus type and bus config coming into + * the CSI. */ static int csi_get_upstream_endpoint(struct csi_priv *priv, struct v4l2_fwnode_endpoint *ep) @@ -168,7 +169,8 @@ static int csi_get_upstream_endpoint(struct csi_priv *priv, if (!priv->src_sd) return -EPIPE; - src = &priv->src_sd->entity; + sd = priv->src_sd; + src = &sd->entity; if (src->function == MEDIA_ENT_F_VID_MUX) { /* @@ -182,6 +184,14 @@ static int csi_get_upstream_endpoint(struct csi_priv *priv, src = &sd->entity; } + /* + * If the source is neither the video mux nor the CSI-2 receiver, + * get the source pad directly upstream from CSI itself. + */ + if (src->function != MEDIA_ENT_F_VID_MUX && + sd->grp_id != IMX_MEDIA_GRP_ID_CSI2) + src = &priv->sd.entity; + /* get source pad of entity directly upstream from src */ pad = imx_media_find_upstream_pad(priv->md, src, 0); if (IS_ERR(pad)) diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c index acde372c6795b3..1647da216bf9d6 100644 --- a/drivers/staging/media/imx/imx-media-of.c +++ b/drivers/staging/media/imx/imx-media-of.c @@ -233,15 +233,18 @@ int imx_media_create_csi_of_links(struct imx_media_dev *imxmd, struct v4l2_subdev *csi) { struct device_node *csi_np = csi->dev->of_node; - struct fwnode_handle *fwnode, *csi_ep; - struct v4l2_fwnode_link link; struct device_node *ep; - int ret; - - link.local_node = of_fwnode_handle(csi_np); - link.local_port = CSI_SINK_PAD; for_each_child_of_node(csi_np, ep) { + struct fwnode_handle *fwnode, *csi_ep; + struct v4l2_fwnode_link link; + int ret; + + memset(&link, 0, sizeof(link)); + + link.local_node = of_fwnode_handle(csi_np); + link.local_port = CSI_SINK_PAD; + csi_ep = of_fwnode_handle(ep); fwnode = fwnode_graph_get_remote_endpoint(csi_ep); diff --git a/drivers/staging/vc04_services/bcm2835-camera/controls.c b/drivers/staging/vc04_services/bcm2835-camera/controls.c index cff7b1e07153b9..b688ebc0174059 100644 --- a/drivers/staging/vc04_services/bcm2835-camera/controls.c +++ b/drivers/staging/vc04_services/bcm2835-camera/controls.c @@ -576,7 +576,7 @@ static int ctrl_set_image_effect(struct bm2835_mmal_dev *dev, dev->colourfx.enable ? "true" : "false", dev->colourfx.u, dev->colourfx.v, ret, (ret == 0 ? 0 : -EINVAL)); - return (ret == 0 ? 0 : EINVAL); + return (ret == 0 ? 0 : -EINVAL); } static int ctrl_set_colfx(struct bm2835_mmal_dev *dev, @@ -600,7 +600,7 @@ static int ctrl_set_colfx(struct bm2835_mmal_dev *dev, "%s: After: mmal_ctrl:%p ctrl id:0x%x ctrl val:%d ret %d(%d)\n", __func__, mmal_ctrl, ctrl->id, ctrl->val, ret, (ret == 0 ? 0 : -EINVAL)); - return (ret == 0 ? 0 : EINVAL); + return (ret == 0 ? 0 : -EINVAL); } static int ctrl_set_bitrate(struct bm2835_mmal_dev *dev, diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c index e767209030642d..3bece6b86831eb 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c @@ -208,6 +208,9 @@ vchiq_platform_init_state(VCHIQ_STATE_T *state) struct vchiq_2835_state *platform_state; state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL); + if (!state->platform_state) + return VCHIQ_ERROR; + platform_state = (struct vchiq_2835_state *)state->platform_state; platform_state->inited = 1; @@ -407,9 +410,18 @@ create_pagelist(char __user *buf, size_t count, unsigned short type) int dma_buffers; dma_addr_t dma_addr; + if (count >= INT_MAX - PAGE_SIZE) + return NULL; + offset = ((unsigned int)(unsigned long)buf & (PAGE_SIZE - 1)); num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE); + if (num_pages > (SIZE_MAX - sizeof(PAGELIST_T) - + sizeof(struct vchiq_pagelist_info)) / + (sizeof(u32) + sizeof(pages[0]) + + sizeof(struct scatterlist))) + return NULL; + pagelist_size = sizeof(PAGELIST_T) + (num_pages * sizeof(u32)) + (num_pages * sizeof(pages[0]) + diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c index 7642ced3143644..63ce567eb6b75a 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c @@ -2537,6 +2537,8 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero, local->debug[DEBUG_ENTRIES] = DEBUG_MAX; status = vchiq_platform_init_state(state); + if (status != VCHIQ_SUCCESS) + return VCHIQ_ERROR; /* bring up slot handler thread diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c index 16f7dd266e3b18..767ec8184adfee 100644 --- a/drivers/staging/wlan-ng/hfa384x_usb.c +++ b/drivers/staging/wlan-ng/hfa384x_usb.c @@ -3119,7 +3119,9 @@ static void hfa384x_usbin_callback(struct urb *urb) break; } + /* Save values from the RX URB before reposting overwrites it. */ urb_status = urb->status; + usbin = (union hfa384x_usbin *)urb->transfer_buffer; if (action != ABORT) { /* Repost the RX URB */ @@ -3136,7 +3138,6 @@ static void hfa384x_usbin_callback(struct urb *urb) /* Note: the check of the sw_support field, the type field doesn't * have bit 12 set like the docs suggest. */ - usbin = (union hfa384x_usbin *)urb->transfer_buffer; type = le16_to_cpu(usbin->type); if (HFA384x_USB_ISRXFRM(type)) { if (action == HANDLE) { diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c index a2c9bfae3d867e..b139713289a42d 100644 --- a/drivers/thermal/qcom/tsens.c +++ b/drivers/thermal/qcom/tsens.c @@ -171,7 +171,8 @@ static int tsens_probe(struct platform_device *pdev) if (tmdev->ops->calibrate) { ret = tmdev->ops->calibrate(tmdev); if (ret < 0) { - dev_err(dev, "tsens calibration failed\n"); + if (ret != -EPROBE_DEFER) + dev_err(dev, "tsens calibration failed\n"); return ret; } } diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c index 7aed5337bdd35b..704c8ad045bb55 100644 --- a/drivers/thermal/rcar_gen3_thermal.c +++ b/drivers/thermal/rcar_gen3_thermal.c @@ -328,6 +328,9 @@ MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids); static int rcar_gen3_thermal_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct rcar_gen3_thermal_priv *priv = dev_get_drvdata(dev); + + rcar_thermal_irq_set(priv, false); pm_runtime_put(dev); pm_runtime_disable(dev); diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 28fc4ce75edb49..8490a1b6b61564 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -476,6 +476,11 @@ static void add_switch(struct tb_switch *parent_sw, u64 route, goto out; sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL); + if (!sw->uuid) { + tb_sw_warn(sw, "cannot allocate memory for switch\n"); + tb_switch_put(sw); + goto out; + } sw->connection_id = connection_id; sw->connection_key = connection_key; sw->link = link; diff --git a/drivers/thunderbolt/property.c b/drivers/thunderbolt/property.c index 8fe913a95b4ad0..be3f8b592b05bb 100644 --- a/drivers/thunderbolt/property.c +++ b/drivers/thunderbolt/property.c @@ -551,6 +551,11 @@ int tb_property_add_data(struct tb_property_dir *parent, const char *key, property->length = size / 4; property->value.data = kzalloc(size, GFP_KERNEL); + if (!property->value.data) { + kfree(property); + return -ENOMEM; + } + memcpy(property->value.data, buf, buflen); list_add_tail(&property->list, &parent->properties); @@ -581,7 +586,12 @@ int tb_property_add_text(struct tb_property_dir *parent, const char *key, return -ENOMEM; property->length = size / 4; - property->value.data = kzalloc(size, GFP_KERNEL); + property->value.text = kzalloc(size, GFP_KERNEL); + if (!property->value.text) { + kfree(property); + return -ENOMEM; + } + strcpy(property->value.text, text); list_add_tail(&property->list, &parent->properties); diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index dd9ae6f5d19ce6..bc7efa6e515d08 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -9,15 +9,13 @@ #include #include #include +#include #include #include #include #include "tb.h" -/* Switch authorization from userspace is serialized by this lock */ -static DEFINE_MUTEX(switch_lock); - /* Switch NVM support */ #define NVM_DEVID 0x05 @@ -253,8 +251,8 @@ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, struct tb_switch *sw = priv; int ret = 0; - if (mutex_lock_interruptible(&switch_lock)) - return -ERESTARTSYS; + if (!mutex_trylock(&sw->tb->lock)) + return restart_syscall(); /* * Since writing the NVM image might require some special steps, @@ -274,7 +272,7 @@ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, memcpy(sw->nvm->buf + offset, val, bytes); unlock: - mutex_unlock(&switch_lock); + mutex_unlock(&sw->tb->lock); return ret; } @@ -363,10 +361,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw) } nvm->non_active = nvm_dev; - mutex_lock(&switch_lock); sw->nvm = nvm; - mutex_unlock(&switch_lock); - return 0; err_nvm_active: @@ -383,10 +378,8 @@ static void tb_switch_nvm_remove(struct tb_switch *sw) { struct tb_switch_nvm *nvm; - mutex_lock(&switch_lock); nvm = sw->nvm; sw->nvm = NULL; - mutex_unlock(&switch_lock); if (!nvm) return; @@ -717,8 +710,8 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) { int ret = -EINVAL; - if (mutex_lock_interruptible(&switch_lock)) - return -ERESTARTSYS; + if (!mutex_trylock(&sw->tb->lock)) + return restart_syscall(); if (sw->authorized) goto unlock; @@ -761,7 +754,7 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) } unlock: - mutex_unlock(&switch_lock); + mutex_unlock(&sw->tb->lock); return ret; } @@ -818,15 +811,15 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr, struct tb_switch *sw = tb_to_switch(dev); ssize_t ret; - if (mutex_lock_interruptible(&switch_lock)) - return -ERESTARTSYS; + if (!mutex_trylock(&sw->tb->lock)) + return restart_syscall(); if (sw->key) ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); else ret = sprintf(buf, "\n"); - mutex_unlock(&switch_lock); + mutex_unlock(&sw->tb->lock); return ret; } @@ -843,8 +836,8 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr, else if (hex2bin(key, buf, sizeof(key))) return -EINVAL; - if (mutex_lock_interruptible(&switch_lock)) - return -ERESTARTSYS; + if (!mutex_trylock(&sw->tb->lock)) + return restart_syscall(); if (sw->authorized) { ret = -EBUSY; @@ -859,7 +852,7 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr, } } - mutex_unlock(&switch_lock); + mutex_unlock(&sw->tb->lock); return ret; } static DEVICE_ATTR(key, 0600, key_show, key_store); @@ -905,8 +898,8 @@ static ssize_t nvm_authenticate_store(struct device *dev, bool val; int ret; - if (mutex_lock_interruptible(&switch_lock)) - return -ERESTARTSYS; + if (!mutex_trylock(&sw->tb->lock)) + return restart_syscall(); /* If NVMem devices are not yet added */ if (!sw->nvm) { @@ -954,7 +947,7 @@ static ssize_t nvm_authenticate_store(struct device *dev, } exit_unlock: - mutex_unlock(&switch_lock); + mutex_unlock(&sw->tb->lock); if (ret) return ret; @@ -968,8 +961,8 @@ static ssize_t nvm_version_show(struct device *dev, struct tb_switch *sw = tb_to_switch(dev); int ret; - if (mutex_lock_interruptible(&switch_lock)) - return -ERESTARTSYS; + if (!mutex_trylock(&sw->tb->lock)) + return restart_syscall(); if (sw->safe_mode) ret = -ENODATA; @@ -978,7 +971,7 @@ static ssize_t nvm_version_show(struct device *dev, else ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); - mutex_unlock(&switch_lock); + mutex_unlock(&sw->tb->lock); return ret; } @@ -1296,13 +1289,14 @@ int tb_switch_configure(struct tb_switch *sw) return tb_plug_events_active(sw, true); } -static void tb_switch_set_uuid(struct tb_switch *sw) +static int tb_switch_set_uuid(struct tb_switch *sw) { u32 uuid[4]; - int cap; + int cap, ret; + ret = 0; if (sw->uuid) - return; + return ret; /* * The newer controllers include fused UUID as part of link @@ -1310,7 +1304,9 @@ static void tb_switch_set_uuid(struct tb_switch *sw) */ cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); if (cap > 0) { - tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4); + ret = tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4); + if (ret) + return ret; } else { /* * ICM generates UUID based on UID and fills the upper @@ -1325,6 +1321,9 @@ static void tb_switch_set_uuid(struct tb_switch *sw) } sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); + if (!sw->uuid) + ret = -ENOMEM; + return ret; } static int tb_switch_add_dma_port(struct tb_switch *sw) @@ -1374,7 +1373,9 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) if (status) { tb_sw_info(sw, "switch flash authentication failed\n"); - tb_switch_set_uuid(sw); + ret = tb_switch_set_uuid(sw); + if (ret) + return ret; nvm_set_auth_status(sw, status); } @@ -1424,7 +1425,9 @@ int tb_switch_add(struct tb_switch *sw) } tb_sw_info(sw, "uid: %#llx\n", sw->uid); - tb_switch_set_uuid(sw); + ret = tb_switch_set_uuid(sw); + if (ret) + return ret; for (i = 0; i <= sw->config.max_port_number; i++) { if (sw->ports[i].disabled) { diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 5067d69d05018b..7a0ee9836a8a76 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -79,8 +79,7 @@ struct tb_switch_nvm { * @depth: Depth in the chain this switch is connected (ICM only) * * When the switch is being added or removed to the domain (other - * switches) you need to have domain lock held. For switch authorization - * internal switch_lock is enough. + * switches) you need to have domain lock held. */ struct tb_switch { struct device dev; diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c index db8bece6332706..befe754906979d 100644 --- a/drivers/thunderbolt/xdomain.c +++ b/drivers/thunderbolt/xdomain.c @@ -743,6 +743,7 @@ static void enumerate_services(struct tb_xdomain *xd) struct tb_service *svc; struct tb_property *p; struct device *dev; + int id; /* * First remove all services that are not available anymore in @@ -771,7 +772,12 @@ static void enumerate_services(struct tb_xdomain *xd) break; } - svc->id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL); + id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL); + if (id < 0) { + kfree(svc); + break; + } + svc->id = id; svc->dev.bus = &tb_bus_type; svc->dev.type = &tb_service_type; svc->dev.parent = &xd->dev; diff --git a/drivers/tty/hvc/hvc_riscv_sbi.c b/drivers/tty/hvc/hvc_riscv_sbi.c index 75155bde2b8810..31f53fa77e4af5 100644 --- a/drivers/tty/hvc/hvc_riscv_sbi.c +++ b/drivers/tty/hvc/hvc_riscv_sbi.c @@ -53,7 +53,6 @@ device_initcall(hvc_sbi_init); static int __init hvc_sbi_console_init(void) { hvc_instantiate(0, 0, &hvc_sbi_ops); - add_preferred_console("hvc", 0, NULL); return 0; } diff --git a/drivers/tty/ipwireless/main.c b/drivers/tty/ipwireless/main.c index 3475e841ef5c1f..4c18bbfe1a92ee 100644 --- a/drivers/tty/ipwireless/main.c +++ b/drivers/tty/ipwireless/main.c @@ -114,6 +114,10 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) ipw->common_memory = ioremap(p_dev->resource[2]->start, resource_size(p_dev->resource[2])); + if (!ipw->common_memory) { + ret = -ENOMEM; + goto exit1; + } if (!request_mem_region(p_dev->resource[2]->start, resource_size(p_dev->resource[2]), IPWIRELESS_PCCARD_NAME)) { @@ -134,6 +138,10 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) ipw->attr_memory = ioremap(p_dev->resource[3]->start, resource_size(p_dev->resource[3])); + if (!ipw->attr_memory) { + ret = -ENOMEM; + goto exit3; + } if (!request_mem_region(p_dev->resource[3]->start, resource_size(p_dev->resource[3]), IPWIRELESS_PCCARD_NAME)) { diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index d31b975dd3fd7b..284e8d052fc3ce 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -365,7 +365,7 @@ static bool dw8250_fallback_dma_filter(struct dma_chan *chan, void *param) static bool dw8250_idma_filter(struct dma_chan *chan, void *param) { - return param == chan->device->dev->parent; + return param == chan->device->dev; } /* @@ -434,7 +434,7 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data) data->uart_16550_compatible = true; } - /* Platforms with iDMA */ + /* Platforms with iDMA 64-bit */ if (platform_get_resource_byname(to_platform_device(p->dev), IORESOURCE_MEM, "lpss_priv")) { data->dma.rx_param = p->dev->parent; diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c index 4c4070a202fb66..38c48a02b9206e 100644 --- a/drivers/tty/serial/max310x.c +++ b/drivers/tty/serial/max310x.c @@ -576,7 +576,7 @@ static int max310x_set_ref_clk(struct device *dev, struct max310x_port *s, } /* Configure clock source */ - clksrc = xtal ? MAX310X_CLKSRC_CRYST_BIT : MAX310X_CLKSRC_EXTCLK_BIT; + clksrc = MAX310X_CLKSRC_EXTCLK_BIT | (xtal ? MAX310X_CLKSRC_CRYST_BIT : 0); /* Configure PLL */ if (pllcfg) { diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 736b74fd662376..0f41b936da0320 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -860,6 +860,7 @@ static void msm_handle_tx(struct uart_port *port) struct circ_buf *xmit = &msm_port->uart.state->xmit; struct msm_dma *dma = &msm_port->tx_dma; unsigned int pio_count, dma_count, dma_min; + char buf[4] = { 0 }; void __iomem *tf; int err = 0; @@ -869,10 +870,12 @@ static void msm_handle_tx(struct uart_port *port) else tf = port->membase + UART_TF; + buf[0] = port->x_char; + if (msm_port->is_uartdm) msm_reset_dm_count(port, 1); - iowrite8_rep(tf, &port->x_char, 1); + iowrite32_rep(tf, buf, 1); port->icount.tx++; port->x_char = 0; return; diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index dbc140304624a9..13ba2a9db4d21e 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -130,9 +130,6 @@ static void uart_start(struct tty_struct *tty) struct uart_port *port; unsigned long flags; - if (!state) - return; - port = uart_port_lock(state, flags); __uart_start(tty); uart_port_unlock(port, flags); @@ -730,9 +727,6 @@ static void uart_unthrottle(struct tty_struct *tty) upstat_t mask = UPSTAT_SYNC_FIFO; struct uart_port *port; - if (!state) - return; - port = uart_port_ref(state); if (!port) return; @@ -1768,6 +1762,16 @@ static void uart_dtr_rts(struct tty_port *port, int raise) uart_port_deref(uport); } +static int uart_install(struct tty_driver *driver, struct tty_struct *tty) +{ + struct uart_driver *drv = driver->driver_state; + struct uart_state *state = drv->state + tty->index; + + tty->driver_data = state; + + return tty_standard_install(driver, tty); +} + /* * Calls to uart_open are serialised by the tty_lock in * drivers/tty/tty_io.c:tty_open() @@ -1780,11 +1784,8 @@ static void uart_dtr_rts(struct tty_port *port, int raise) */ static int uart_open(struct tty_struct *tty, struct file *filp) { - struct uart_driver *drv = tty->driver->driver_state; - int retval, line = tty->index; - struct uart_state *state = drv->state + line; - - tty->driver_data = state; + struct uart_state *state = tty->driver_data; + int retval; retval = tty_port_open(&state->port, tty, filp); if (retval > 0) @@ -2469,6 +2470,7 @@ static void uart_poll_put_char(struct tty_driver *driver, int line, char ch) #endif static const struct tty_operations uart_ops = { + .install = uart_install, .open = uart_open, .close = uart_close, .write = uart_write, diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 03fe3fb4bff65f..040832635a6490 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -1542,6 +1542,13 @@ static void sci_request_dma(struct uart_port *port) dev_dbg(port->dev, "%s: port %d\n", __func__, port->line); + /* + * DMA on console may interfere with Kernel log messages which use + * plain putchar(). So, simply don't use it with a console. + */ + if (uart_console(port)) + return; + if (!port->dev->of_node) return; diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c index 63e34d868de8f3..f8503f8fc44e20 100644 --- a/drivers/tty/serial/sunhv.c +++ b/drivers/tty/serial/sunhv.c @@ -397,7 +397,7 @@ static const struct uart_ops sunhv_pops = { static struct uart_driver sunhv_reg = { .owner = THIS_MODULE, .driver_name = "sunhv", - .dev_name = "ttyS", + .dev_name = "ttyHV", .major = TTY_MAJOR, }; diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c index 88312c6c92cc6d..0617e87ab34392 100644 --- a/drivers/tty/vt/keyboard.c +++ b/drivers/tty/vt/keyboard.c @@ -123,6 +123,7 @@ static const int NR_TYPES = ARRAY_SIZE(max_vals); static struct input_handler kbd_handler; static DEFINE_SPINLOCK(kbd_event_lock); static DEFINE_SPINLOCK(led_lock); +static DEFINE_SPINLOCK(func_buf_lock); /* guard 'func_buf' and friends */ static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */ static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */ static bool dead_key_next; @@ -1990,11 +1991,12 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) char *p; u_char *q; u_char __user *up; - int sz; + int sz, fnw_sz; int delta; char *first_free, *fj, *fnw; int i, j, k; int ret; + unsigned long flags; if (!capable(CAP_SYS_TTY_CONFIG)) perm = 0; @@ -2037,7 +2039,14 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) goto reterr; } + fnw = NULL; + fnw_sz = 0; + /* race aginst other writers */ + again: + spin_lock_irqsave(&func_buf_lock, flags); q = func_table[i]; + + /* fj pointer to next entry after 'q' */ first_free = funcbufptr + (funcbufsize - funcbufleft); for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++) ; @@ -2045,10 +2054,12 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) fj = func_table[j]; else fj = first_free; - + /* buffer usage increase by new entry */ delta = (q ? -strlen(q) : 1) + strlen(kbs->kb_string); + if (delta <= funcbufleft) { /* it fits in current buf */ if (j < MAX_NR_FUNC) { + /* make enough space for new entry at 'fj' */ memmove(fj + delta, fj, first_free - fj); for (k = j; k < MAX_NR_FUNC; k++) if (func_table[k]) @@ -2061,20 +2072,28 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) sz = 256; while (sz < funcbufsize - funcbufleft + delta) sz <<= 1; - fnw = kmalloc(sz, GFP_KERNEL); - if(!fnw) { - ret = -ENOMEM; - goto reterr; + if (fnw_sz != sz) { + spin_unlock_irqrestore(&func_buf_lock, flags); + kfree(fnw); + fnw = kmalloc(sz, GFP_KERNEL); + fnw_sz = sz; + if (!fnw) { + ret = -ENOMEM; + goto reterr; + } + goto again; } if (!q) func_table[i] = fj; + /* copy data before insertion point to new location */ if (fj > funcbufptr) memmove(fnw, funcbufptr, fj - funcbufptr); for (k = 0; k < j; k++) if (func_table[k]) func_table[k] = fnw + (func_table[k] - funcbufptr); + /* copy data after insertion point to new location */ if (first_free > fj) { memmove(fnw + (fj - funcbufptr) + delta, fj, first_free - fj); for (k = j; k < MAX_NR_FUNC; k++) @@ -2087,7 +2106,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) funcbufleft = funcbufleft - delta + sz - funcbufsize; funcbufsize = sz; } + /* finally insert item itself */ strcpy(func_table[i], kbs->kb_string); + spin_unlock_irqrestore(&func_buf_lock, flags); break; } ret = 0; diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 3e5ec1cee05930..d673e3592662c9 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1059,6 +1059,13 @@ static void visual_init(struct vc_data *vc, int num, int init) vc->vc_screenbuf_size = vc->vc_rows * vc->vc_size_row; } + +static void visual_deinit(struct vc_data *vc) +{ + vc->vc_sw->con_deinit(vc); + module_put(vc->vc_sw->owner); +} + int vc_allocate(unsigned int currcons) /* return 0 on success */ { struct vt_notifier_param param; @@ -1106,6 +1113,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ return 0; err_free: + visual_deinit(vc); kfree(vc); vc_cons[currcons].d = NULL; return -ENOMEM; @@ -1334,9 +1342,8 @@ struct vc_data *vc_deallocate(unsigned int currcons) param.vc = vc = vc_cons[currcons].d; atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, ¶m); vcs_remove_sysfs(currcons); - vc->vc_sw->con_deinit(vc); + visual_deinit(vc); put_pid(vc->vt_pid); - module_put(vc->vc_sw->owner); vc_uniscr_set(vc, NULL); kfree(vc->vc_screenbuf); vc_cons[currcons].d = NULL; @@ -4155,8 +4162,6 @@ void do_blank_screen(int entering_gfx) return; } - if (blank_state != blank_normal_wait) - return; blank_state = blank_off; /* don't blank graphics */ diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 9852ec5e6e017e..cc7c856126df5a 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -1621,6 +1621,25 @@ static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on) static int ci_udc_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver); static int ci_udc_stop(struct usb_gadget *gadget); + +/* Match ISOC IN from the highest endpoint */ +static struct usb_ep *ci_udc_match_ep(struct usb_gadget *gadget, + struct usb_endpoint_descriptor *desc, + struct usb_ss_ep_comp_descriptor *comp_desc) +{ + struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget); + struct usb_ep *ep; + + if (usb_endpoint_xfer_isoc(desc) && usb_endpoint_dir_in(desc)) { + list_for_each_entry_reverse(ep, &ci->gadget.ep_list, ep_list) { + if (ep->caps.dir_in && !ep->claimed) + return ep; + } + } + + return NULL; +} + /** * Device operations part of the API to the USB controller hardware, * which don't involve endpoints (or i/o) @@ -1634,6 +1653,7 @@ static const struct usb_gadget_ops usb_gadget_ops = { .vbus_draw = ci_udc_vbus_draw, .udc_start = ci_udc_start, .udc_stop = ci_udc_stop, + .match_ep = ci_udc_match_ep, }; static int init_eps(struct ci_hdrc *ci) diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 7b5cb28ffb3578..e723ddd79bcf96 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -936,8 +936,8 @@ int usb_get_bos_descriptor(struct usb_device *dev) /* Get BOS descriptor */ ret = usb_get_descriptor(dev, USB_DT_BOS, 0, bos, USB_DT_BOS_SIZE); - if (ret < USB_DT_BOS_SIZE) { - dev_err(ddev, "unable to get BOS descriptor\n"); + if (ret < USB_DT_BOS_SIZE || bos->bLength < USB_DT_BOS_SIZE) { + dev_err(ddev, "unable to get BOS descriptor or descriptor too short\n"); if (ret >= 0) ret = -ENOMSG; kfree(bos); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 1c21955fe7c007..b82a7d787add8c 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -3017,6 +3017,9 @@ usb_hcd_platform_shutdown(struct platform_device *dev) { struct usb_hcd *hcd = platform_get_drvdata(dev); + /* No need for pm_runtime_put(), we're shutting down */ + pm_runtime_get_sync(&dev->dev); + if (hcd->driver->shutdown) hcd->driver->shutdown(hcd); } diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index bbcfa63d0233bf..eb24ec0e160d40 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -5823,7 +5823,10 @@ int usb_reset_device(struct usb_device *udev) cintf->needs_binding = 1; } } - usb_unbind_and_rebind_marked_interfaces(udev); + + /* If the reset failed, hub_wq will unbind drivers later */ + if (ret == 0) + usb_unbind_and_rebind_marked_interfaces(udev); } usb_autosuspend_device(udev); diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 8bc35d53408b5d..6b641307358433 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -209,9 +209,15 @@ static const struct usb_device_id usb_quirk_list[] = { /* Microsoft LifeCam-VX700 v2.0 */ { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Microsoft Surface Dock Ethernet (RTL8153 GigE) */ + { USB_DEVICE(0x045e, 0x07c6), .driver_info = USB_QUIRK_NO_LPM }, + /* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */ { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Logitech HD Webcam C270 */ + { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT }, diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 220c0f9b89b0ba..03614ef64ca479 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -675,13 +675,11 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep) unsigned int maxsize; if (is_isoc) - maxsize = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT : - DEV_DMA_ISOC_RX_NBYTES_LIMIT; + maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT : + DEV_DMA_ISOC_RX_NBYTES_LIMIT) * + MAX_DMA_DESC_NUM_HS_ISOC; else - maxsize = DEV_DMA_NBYTES_LIMIT; - - /* Above size of one descriptor was chosen, multiple it */ - maxsize *= MAX_DMA_DESC_NUM_GENERIC; + maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC; return maxsize; } @@ -864,7 +862,7 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep, /* Update index of last configured entry in the chain */ hs_ep->next_desc++; - if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_GENERIC) + if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_HS_ISOC) hs_ep->next_desc = 0; return 0; @@ -896,7 +894,7 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep) } /* Initialize descriptor chain by Host Busy status */ - for (i = 0; i < MAX_DMA_DESC_NUM_GENERIC; i++) { + for (i = 0; i < MAX_DMA_DESC_NUM_HS_ISOC; i++) { desc = &hs_ep->desc_list[i]; desc->status = 0; desc->status |= (DEV_DMA_BUFF_STS_HBUSY @@ -2083,7 +2081,7 @@ static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep) dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); hs_ep->compl_desc++; - if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_GENERIC - 1)) + if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_HS_ISOC - 1)) hs_ep->compl_desc = 0; desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status; } @@ -3779,6 +3777,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, unsigned int i, val, size; int ret = 0; unsigned char ep_type; + int desc_num; dev_dbg(hsotg->dev, "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n", @@ -3825,11 +3824,15 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n", __func__, epctrl, epctrl_reg); + if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC) + desc_num = MAX_DMA_DESC_NUM_HS_ISOC; + else + desc_num = MAX_DMA_DESC_NUM_GENERIC; + /* Allocate DMA descriptor chain for non-ctrl endpoints */ if (using_desc_dma(hsotg) && !hs_ep->desc_list) { hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev, - MAX_DMA_DESC_NUM_GENERIC * - sizeof(struct dwc2_dma_desc), + desc_num * sizeof(struct dwc2_dma_desc), &hs_ep->desc_list_dma, GFP_ATOMIC); if (!hs_ep->desc_list) { ret = -ENOMEM; @@ -3971,7 +3974,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, error2: if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) { - dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * + dmam_free_coherent(hsotg->dev, desc_num * sizeof(struct dwc2_dma_desc), hs_ep->desc_list, hs_ep->desc_list_dma); hs_ep->desc_list = NULL; diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 260010abf9d81c..aad7edc29bddd4 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -2673,8 +2673,10 @@ static void dwc2_free_dma_aligned_buffer(struct urb *urb) return; /* Restore urb->transfer_buffer from the end of the allocated area */ - memcpy(&stored_xfer_buffer, urb->transfer_buffer + - urb->transfer_buffer_length, sizeof(urb->transfer_buffer)); + memcpy(&stored_xfer_buffer, + PTR_ALIGN(urb->transfer_buffer + urb->transfer_buffer_length, + dma_get_cache_alignment()), + sizeof(urb->transfer_buffer)); if (usb_urb_dir_in(urb)) { if (usb_pipeisoc(urb->pipe)) @@ -2706,6 +2708,7 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) * DMA */ kmalloc_size = urb->transfer_buffer_length + + (dma_get_cache_alignment() - 1) + sizeof(urb->transfer_buffer); kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); @@ -2716,7 +2719,8 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) * Position value of original urb->transfer_buffer pointer to the end * of allocation for later referencing */ - memcpy(kmalloc_ptr + urb->transfer_buffer_length, + memcpy(PTR_ALIGN(kmalloc_ptr + urb->transfer_buffer_length, + dma_get_cache_alignment()), &urb->transfer_buffer, sizeof(urb->transfer_buffer)); if (usb_urb_dir_out(urb)) @@ -2801,7 +2805,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) chan->dev_addr = dwc2_hcd_get_dev_addr(&urb->pipe_info); chan->ep_num = dwc2_hcd_get_ep_num(&urb->pipe_info); chan->speed = qh->dev_speed; - chan->max_packet = dwc2_max_packet(qh->maxp); + chan->max_packet = qh->maxp; chan->xfer_started = 0; chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS; @@ -2879,7 +2883,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) * This value may be modified when the transfer is started * to reflect the actual transfer length */ - chan->multi_count = dwc2_hb_mult(qh->maxp); + chan->multi_count = qh->maxp_mult; if (hsotg->params.dma_desc_enable) { chan->desc_list_addr = qh->desc_list_dma; @@ -3991,19 +3995,21 @@ static struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg, static void dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg *hsotg, struct dwc2_hcd_urb *urb, u8 dev_addr, - u8 ep_num, u8 ep_type, u8 ep_dir, u16 mps) + u8 ep_num, u8 ep_type, u8 ep_dir, + u16 maxp, u16 maxp_mult) { if (dbg_perio() || ep_type == USB_ENDPOINT_XFER_BULK || ep_type == USB_ENDPOINT_XFER_CONTROL) dev_vdbg(hsotg->dev, - "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, mps=%d\n", - dev_addr, ep_num, ep_dir, ep_type, mps); + "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, maxp=%d (%d mult)\n", + dev_addr, ep_num, ep_dir, ep_type, maxp, maxp_mult); urb->pipe_info.dev_addr = dev_addr; urb->pipe_info.ep_num = ep_num; urb->pipe_info.pipe_type = ep_type; urb->pipe_info.pipe_dir = ep_dir; - urb->pipe_info.mps = mps; + urb->pipe_info.maxp = maxp; + urb->pipe_info.maxp_mult = maxp_mult; } /* @@ -4094,8 +4100,9 @@ void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg) dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT"); dev_dbg(hsotg->dev, - " Max packet size: %d\n", - dwc2_hcd_get_mps(&urb->pipe_info)); + " Max packet size: %d (%d mult)\n", + dwc2_hcd_get_maxp(&urb->pipe_info), + dwc2_hcd_get_maxp_mult(&urb->pipe_info)); dev_dbg(hsotg->dev, " transfer_buffer: %p\n", urb->buf); @@ -4653,8 +4660,10 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb, } dev_vdbg(hsotg->dev, " Speed: %s\n", speed); - dev_vdbg(hsotg->dev, " Max packet size: %d\n", - usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); + dev_vdbg(hsotg->dev, " Max packet size: %d (%d mult)\n", + usb_endpoint_maxp(&urb->ep->desc), + usb_endpoint_maxp_mult(&urb->ep->desc)); + dev_vdbg(hsotg->dev, " Data buffer length: %d\n", urb->transfer_buffer_length); dev_vdbg(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n", @@ -4737,8 +4746,8 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), ep_type, usb_pipein(urb->pipe), - usb_maxpacket(urb->dev, urb->pipe, - !(usb_pipein(urb->pipe)))); + usb_endpoint_maxp(&ep->desc), + usb_endpoint_maxp_mult(&ep->desc)); buf = urb->transfer_buffer; diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h index c089ffa1f0a8fa..ce6445a065889b 100644 --- a/drivers/usb/dwc2/hcd.h +++ b/drivers/usb/dwc2/hcd.h @@ -171,7 +171,8 @@ struct dwc2_hcd_pipe_info { u8 ep_num; u8 pipe_type; u8 pipe_dir; - u16 mps; + u16 maxp; + u16 maxp_mult; }; struct dwc2_hcd_iso_packet_desc { @@ -264,6 +265,7 @@ struct dwc2_hs_transfer_time { * - USB_ENDPOINT_XFER_ISOC * @ep_is_in: Endpoint direction * @maxp: Value from wMaxPacketSize field of Endpoint Descriptor + * @maxp_mult: Multiplier for maxp * @dev_speed: Device speed. One of the following values: * - USB_SPEED_LOW * - USB_SPEED_FULL @@ -340,6 +342,7 @@ struct dwc2_qh { u8 ep_type; u8 ep_is_in; u16 maxp; + u16 maxp_mult; u8 dev_speed; u8 data_toggle; u8 ping_state; @@ -503,9 +506,14 @@ static inline u8 dwc2_hcd_get_pipe_type(struct dwc2_hcd_pipe_info *pipe) return pipe->pipe_type; } -static inline u16 dwc2_hcd_get_mps(struct dwc2_hcd_pipe_info *pipe) +static inline u16 dwc2_hcd_get_maxp(struct dwc2_hcd_pipe_info *pipe) +{ + return pipe->maxp; +} + +static inline u16 dwc2_hcd_get_maxp_mult(struct dwc2_hcd_pipe_info *pipe) { - return pipe->mps; + return pipe->maxp_mult; } static inline u8 dwc2_hcd_get_dev_addr(struct dwc2_hcd_pipe_info *pipe) @@ -620,12 +628,6 @@ static inline bool dbg_urb(struct urb *urb) static inline bool dbg_perio(void) { return false; } #endif -/* High bandwidth multiplier as encoded in highspeed endpoint descriptors */ -#define dwc2_hb_mult(wmaxpacketsize) (1 + (((wmaxpacketsize) >> 11) & 0x03)) - -/* Packet size for any kind of endpoint descriptor */ -#define dwc2_max_packet(wmaxpacketsize) ((wmaxpacketsize) & 0x07ff) - /* * Returns true if frame1 index is greater than frame2 index. The comparison * is done modulo FRLISTEN_64_SIZE. This accounts for the rollover of the diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c index 88b5dcf3aefc57..a052d39b4375e4 100644 --- a/drivers/usb/dwc2/hcd_intr.c +++ b/drivers/usb/dwc2/hcd_intr.c @@ -1617,8 +1617,9 @@ static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg, dev_err(hsotg->dev, " Speed: %s\n", speed); - dev_err(hsotg->dev, " Max packet size: %d\n", - dwc2_hcd_get_mps(&urb->pipe_info)); + dev_err(hsotg->dev, " Max packet size: %d (mult %d)\n", + dwc2_hcd_get_maxp(&urb->pipe_info), + dwc2_hcd_get_maxp_mult(&urb->pipe_info)); dev_err(hsotg->dev, " Data buffer length: %d\n", urb->length); dev_err(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n", urb->buf, (unsigned long)urb->dma); diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c index ea3aa640c15c18..68bbac64b7536a 100644 --- a/drivers/usb/dwc2/hcd_queue.c +++ b/drivers/usb/dwc2/hcd_queue.c @@ -708,7 +708,7 @@ static void dwc2_hs_pmap_unschedule(struct dwc2_hsotg *hsotg, static int dwc2_uframe_schedule_split(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) { - int bytecount = dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp); + int bytecount = qh->maxp_mult * qh->maxp; int ls_search_slice; int err = 0; int host_interval_in_sched; @@ -1332,7 +1332,7 @@ static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg, u32 max_channel_xfer_size; int status = 0; - max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp); + max_xfer_size = qh->maxp * qh->maxp_mult; max_channel_xfer_size = hsotg->params.max_transfer_size; if (max_xfer_size > max_channel_xfer_size) { @@ -1517,8 +1517,9 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, u32 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT; bool do_split = (prtspd == HPRT0_SPD_HIGH_SPEED && dev_speed != USB_SPEED_HIGH); - int maxp = dwc2_hcd_get_mps(&urb->pipe_info); - int bytecount = dwc2_hb_mult(maxp) * dwc2_max_packet(maxp); + int maxp = dwc2_hcd_get_maxp(&urb->pipe_info); + int maxp_mult = dwc2_hcd_get_maxp_mult(&urb->pipe_info); + int bytecount = maxp_mult * maxp; char *speed, *type; /* Initialize QH */ @@ -1531,6 +1532,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, qh->data_toggle = DWC2_HC_PID_DATA0; qh->maxp = maxp; + qh->maxp_mult = maxp_mult; INIT_LIST_HEAD(&qh->qtd_list); INIT_LIST_HEAD(&qh->qh_list_entry); diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 4d5c7dda8f54d4..05b9ccff7447af 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1591,6 +1591,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) spin_lock_irqsave(&dwc->lock, flags); dwc3_gadget_suspend(dwc); spin_unlock_irqrestore(&dwc->lock, flags); + synchronize_irq(dwc->irq_gadget); dwc3_core_exit(dwc); break; case DWC3_GCTL_PRTCAP_HOST: @@ -1623,6 +1624,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) spin_lock_irqsave(&dwc->lock, flags); dwc3_gadget_suspend(dwc); spin_unlock_irqrestore(&dwc->lock, flags); + synchronize_irq(dwc->irq_gadget); } dwc3_otg_exit(dwc); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 524104eed8a71d..65ba1038b11110 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -3277,8 +3277,6 @@ int dwc3_gadget_suspend(struct dwc3 *dwc) dwc3_disconnect_gadget(dwc); __dwc3_gadget_stop(dwc); - synchronize_irq(dwc->irq_gadget); - return 0; } diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index cadc01336bf80e..7ba6afc7ef230e 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -440,6 +440,9 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci, struct xhci_ep_priv *epriv; struct xhci_slot_priv *spriv = dev->debugfs_private; + if (!spriv) + return; + if (spriv->eps[ep_index]) return; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index f054464347c9e9..f896a00662efe3 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -656,6 +656,7 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct device *dev = xhci_to_hcd(xhci)->self.controller; struct xhci_segment *seg = td->bounce_seg; struct urb *urb = td->urb; + size_t len; if (!ring || !seg || !urb) return; @@ -666,11 +667,14 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, return; } - /* for in tranfers we need to copy the data from bounce to sg */ - sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf, - seg->bounce_len, seg->bounce_offs); dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, DMA_FROM_DEVICE); + /* for in tranfers we need to copy the data from bounce to sg */ + len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, + seg->bounce_len, seg->bounce_offs); + if (len != seg->bounce_len) + xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", + len, seg->bounce_len); seg->bounce_len = 0; seg->bounce_offs = 0; } @@ -1600,8 +1604,13 @@ static void handle_port_status(struct xhci_hcd *xhci, usb_hcd_resume_root_hub(hcd); } - if (hcd->speed >= HCD_USB3 && (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) + if (hcd->speed >= HCD_USB3 && + (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) { + slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1); + if (slot_id && xhci->devs[slot_id]) + xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR; bus_state->port_remote_wakeup &= ~(1 << hcd_portnum); + } if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) { xhci_dbg(xhci, "port resume event for port %d\n", port_id); @@ -1789,6 +1798,14 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, { struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; struct xhci_command *command; + + /* + * Avoid resetting endpoint if link is inactive. Can cause host hang. + * Device will be reset soon to recover the link so don't do anything + */ + if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) + return; + command = xhci_alloc_command(xhci, false, GFP_ATOMIC); if (!command) return; @@ -3104,6 +3121,7 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, unsigned int unalign; unsigned int max_pkt; u32 new_buff_len; + size_t len; max_pkt = usb_endpoint_maxp(&urb->ep->desc); unalign = (enqd_len + *trb_buff_len) % max_pkt; @@ -3134,8 +3152,12 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, /* create a max max_pkt sized bounce buffer pointed to by last trb */ if (usb_urb_dir_out(urb)) { - sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs, + len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, new_buff_len, enqd_len); + if (len != seg->bounce_len) + xhci_warn(xhci, + "WARN Wrong bounce buffer write length: %zu != %d\n", + len, seg->bounce_len); seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, max_pkt, DMA_TO_DEVICE); } else { diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index dae3be1b9c8f01..4ffadca2c71a62 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -9,6 +9,7 @@ */ #include +#include #include #include #include @@ -52,7 +53,6 @@ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) return false; } -/* TODO: copied from ehci-hcd.c - can this be refactored? */ /* * xhci_handshake - spin reading hc until handshake completes or fails * @ptr: address of hc register to be read @@ -69,18 +69,16 @@ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec) { u32 result; + int ret; - do { - result = readl(ptr); - if (result == ~(u32)0) /* card removed */ - return -ENODEV; - result &= mask; - if (result == done) - return 0; - udelay(1); - usec--; - } while (usec > 0); - return -ETIMEDOUT; + ret = readl_poll_timeout_atomic(ptr, result, + (result & mask) == done || + result == U32_MAX, + 1, usec); + if (result == U32_MAX) /* card removed */ + return -ENODEV; + + return ret; } /* @@ -1443,6 +1441,10 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag xhci_dbg(xhci, "urb submitted during PCI suspend\n"); return -ESHUTDOWN; } + if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) { + xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n"); + return -ENODEV; + } if (usb_endpoint_xfer_isoc(&urb->ep->desc)) num_tds = urb->number_of_packets; @@ -3726,6 +3728,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd, } /* If necessary, update the number of active TTs on this root port */ xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); + virt_dev->flags = 0; ret = 0; command_cleanup: @@ -4289,7 +4292,6 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, pm_addr = ports[port_num]->addr + PORTPMSC; pm_val = readl(pm_addr); hlpm_addr = ports[port_num]->addr + PORTHLPMC; - field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", enable ? "enable" : "disable", port_num + 1); @@ -4301,6 +4303,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, * default one which works with mixed HIRD and BESL * systems. See XHCI_DEFAULT_BESL definition in xhci.h */ + field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); if ((field & USB_BESL_SUPPORT) && (field & USB_BESL_BASELINE_VALID)) hird = USB_GET_BESL_BASELINE(field); @@ -5032,16 +5035,26 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) } else { /* * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol - * minor revision instead of sbrn + * minor revision instead of sbrn. Minor revision is a two digit + * BCD containing minor and sub-minor numbers, only show minor. */ - minor_rev = xhci->usb3_rhub.min_rev; - if (minor_rev) { + minor_rev = xhci->usb3_rhub.min_rev / 0x10; + + switch (minor_rev) { + case 2: + hcd->speed = HCD_USB32; + hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; + hcd->self.root_hub->rx_lanes = 2; + hcd->self.root_hub->tx_lanes = 2; + break; + case 1: hcd->speed = HCD_USB31; hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; + break; } - xhci_info(xhci, "Host supports USB 3.%x %s SuperSpeed\n", + xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n", minor_rev, - minor_rev ? "Enhanced" : ""); + minor_rev ? "Enhanced " : ""); xhci->usb3_rhub.hcd = hcd; /* xHCI private pointer was set in xhci_pci_probe for the second diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index dc00f59c8e6923..761b341d27b076 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1010,6 +1010,15 @@ struct xhci_virt_device { u8 real_port; struct xhci_interval_bw_table *bw_table; struct xhci_tt_bw_info *tt_info; + /* + * flags for state tracking based on events and issued commands. + * Software can not rely on states from output contexts because of + * latency between events and xHC updating output context values. + * See xhci 1.1 section 4.8.3 for more details + */ + unsigned long flags; +#define VDEV_PORT_ERROR BIT(0) /* Port error, link inactive */ + /* The current max exit latency for the enabled USB3 link states. */ u16 current_mel; /* Used for the debugfs interfaces. */ diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c index 7b9adeb3e7aa14..a32d61a79ab8af 100644 --- a/drivers/usb/misc/rio500.c +++ b/drivers/usb/misc/rio500.c @@ -86,9 +86,22 @@ static int close_rio(struct inode *inode, struct file *file) { struct rio_usb_data *rio = &rio_instance; - rio->isopen = 0; + /* against disconnect() */ + mutex_lock(&rio500_mutex); + mutex_lock(&(rio->lock)); - dev_info(&rio->rio_dev->dev, "Rio closed.\n"); + rio->isopen = 0; + if (!rio->present) { + /* cleanup has been delayed */ + kfree(rio->ibuf); + kfree(rio->obuf); + rio->ibuf = NULL; + rio->obuf = NULL; + } else { + dev_info(&rio->rio_dev->dev, "Rio closed.\n"); + } + mutex_unlock(&(rio->lock)); + mutex_unlock(&rio500_mutex); return 0; } @@ -447,15 +460,23 @@ static int probe_rio(struct usb_interface *intf, { struct usb_device *dev = interface_to_usbdev(intf); struct rio_usb_data *rio = &rio_instance; - int retval; + int retval = 0; - dev_info(&intf->dev, "USB Rio found at address %d\n", dev->devnum); + mutex_lock(&rio500_mutex); + if (rio->present) { + dev_info(&intf->dev, "Second USB Rio at address %d refused\n", dev->devnum); + retval = -EBUSY; + goto bail_out; + } else { + dev_info(&intf->dev, "USB Rio found at address %d\n", dev->devnum); + } retval = usb_register_dev(intf, &usb_rio_class); if (retval) { dev_err(&dev->dev, "Not able to get a minor for this device.\n"); - return -ENOMEM; + retval = -ENOMEM; + goto bail_out; } rio->rio_dev = dev; @@ -464,7 +485,8 @@ static int probe_rio(struct usb_interface *intf, dev_err(&dev->dev, "probe_rio: Not enough memory for the output buffer\n"); usb_deregister_dev(intf, &usb_rio_class); - return -ENOMEM; + retval = -ENOMEM; + goto bail_out; } dev_dbg(&intf->dev, "obuf address:%p\n", rio->obuf); @@ -473,7 +495,8 @@ static int probe_rio(struct usb_interface *intf, "probe_rio: Not enough memory for the input buffer\n"); usb_deregister_dev(intf, &usb_rio_class); kfree(rio->obuf); - return -ENOMEM; + retval = -ENOMEM; + goto bail_out; } dev_dbg(&intf->dev, "ibuf address:%p\n", rio->ibuf); @@ -481,8 +504,10 @@ static int probe_rio(struct usb_interface *intf, usb_set_intfdata (intf, rio); rio->present = 1; +bail_out: + mutex_unlock(&rio500_mutex); - return 0; + return retval; } static void disconnect_rio(struct usb_interface *intf) diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index 3198d0477cf85d..c4f6ac5f035eba 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c @@ -3029,6 +3029,13 @@ static int sisusb_probe(struct usb_interface *intf, mutex_init(&(sisusb->lock)); + sisusb->sisusb_dev = dev; + sisusb->vrambase = SISUSB_PCI_MEMBASE; + sisusb->mmiobase = SISUSB_PCI_MMIOBASE; + sisusb->mmiosize = SISUSB_PCI_MMIOSIZE; + sisusb->ioportbase = SISUSB_PCI_IOPORTBASE; + /* Everything else is zero */ + /* Register device */ retval = usb_register_dev(intf, &usb_sisusb_class); if (retval) { @@ -3039,13 +3046,7 @@ static int sisusb_probe(struct usb_interface *intf, goto error_1; } - sisusb->sisusb_dev = dev; - sisusb->minor = intf->minor; - sisusb->vrambase = SISUSB_PCI_MEMBASE; - sisusb->mmiobase = SISUSB_PCI_MMIOBASE; - sisusb->mmiosize = SISUSB_PCI_MMIOSIZE; - sisusb->ioportbase = SISUSB_PCI_IOPORTBASE; - /* Everything else is zero */ + sisusb->minor = intf->minor; /* Allocate buffers */ sisusb->ibufsize = SISUSB_IBUF_SIZE; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index d8c474b386a811..ea891195bbdf06 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1171,6 +1171,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x1260), + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x1261), + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x1900), /* Telit LN940 (QMI) */ .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */ @@ -1772,6 +1776,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E), .driver_info = RSVD(5) | RSVD(6) }, { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */ + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */ + .driver_info = RSVD(7) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D), diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 5a6df6e9ad5776..5d7b21ea623834 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -106,6 +106,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) }, + { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 559941ca884daf..b0175f17d1a2b6 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -155,3 +155,6 @@ #define SMART_VENDOR_ID 0x0b8c #define SMART_PRODUCT_ID 0x2303 +/* Allied Telesis VT-Kit3 */ +#define AT_VENDOR_ID 0x0caa +#define AT_VTKIT3_PRODUCT_ID 0x3001 diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h index 6b2140f966ef87..7e14c2d7cf734f 100644 --- a/drivers/usb/storage/unusual_realtek.h +++ b/drivers/usb/storage/unusual_realtek.h @@ -17,6 +17,11 @@ UNUSUAL_DEV(0x0bda, 0x0138, 0x0000, 0x9999, "USB Card Reader", USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0), +UNUSUAL_DEV(0x0bda, 0x0153, 0x0000, 0x9999, + "Realtek", + "USB Card Reader", + USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0), + UNUSUAL_DEV(0x0bda, 0x0158, 0x0000, 0x9999, "Realtek", "USB Card Reader", diff --git a/drivers/usb/typec/fusb302/fusb302.c b/drivers/usb/typec/fusb302/fusb302.c index 82bed9810be6c4..62a0060d39d8dc 100644 --- a/drivers/usb/typec/fusb302/fusb302.c +++ b/drivers/usb/typec/fusb302/fusb302.c @@ -641,6 +641,8 @@ static int fusb302_set_toggling(struct fusb302_chip *chip, return ret; chip->intr_togdone = false; } else { + /* Datasheet says vconn MUST be off when toggling */ + WARN(chip->vconn_on, "Vconn is on during toggle start"); /* unmask TOGDONE interrupt */ ret = fusb302_i2c_clear_bits(chip, FUSB_REG_MASKA, FUSB_REG_MASKA_TOGDONE); diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index c0d6ff1baa7217..7931e6cecc7044 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -301,9 +301,17 @@ static int stub_probe(struct usb_device *udev) const char *udev_busid = dev_name(&udev->dev); struct bus_id_priv *busid_priv; int rc = 0; + char save_status; dev_dbg(&udev->dev, "Enter probe\n"); + /* Not sure if this is our device. Allocate here to avoid + * calling alloc while holding busid_table lock. + */ + sdev = stub_device_alloc(udev); + if (!sdev) + return -ENOMEM; + /* check we should claim or not by busid_table */ busid_priv = get_busid_priv(udev_busid); if (!busid_priv || (busid_priv->status == STUB_BUSID_REMOV) || @@ -318,6 +326,9 @@ static int stub_probe(struct usb_device *udev) * See driver_probe_device() in driver/base/dd.c */ rc = -ENODEV; + if (!busid_priv) + goto sdev_free; + goto call_put_busid_priv; } @@ -337,12 +348,6 @@ static int stub_probe(struct usb_device *udev) goto call_put_busid_priv; } - /* ok, this is my device */ - sdev = stub_device_alloc(udev); - if (!sdev) { - rc = -ENOMEM; - goto call_put_busid_priv; - } dev_info(&udev->dev, "usbip-host: register new device (bus %u dev %u)\n", @@ -352,9 +357,16 @@ static int stub_probe(struct usb_device *udev) /* set private data to usb_device */ dev_set_drvdata(&udev->dev, sdev); + busid_priv->sdev = sdev; busid_priv->udev = udev; + save_status = busid_priv->status; + busid_priv->status = STUB_BUSID_ALLOC; + + /* release the busid_lock */ + put_busid_priv(busid_priv); + /* * Claim this hub port. * It doesn't matter what value we pass as owner @@ -372,10 +384,8 @@ static int stub_probe(struct usb_device *udev) dev_err(&udev->dev, "stub_add_files for %s\n", udev_busid); goto err_files; } - busid_priv->status = STUB_BUSID_ALLOC; - rc = 0; - goto call_put_busid_priv; + return 0; err_files: usb_hub_release_port(udev->parent, udev->portnum, @@ -384,23 +394,30 @@ static int stub_probe(struct usb_device *udev) dev_set_drvdata(&udev->dev, NULL); usb_put_dev(udev); + /* we already have busid_priv, just lock busid_lock */ + spin_lock(&busid_priv->busid_lock); busid_priv->sdev = NULL; - stub_device_free(sdev); + busid_priv->status = save_status; + spin_unlock(&busid_priv->busid_lock); + /* lock is released - go to free */ + goto sdev_free; call_put_busid_priv: + /* release the busid_lock */ put_busid_priv(busid_priv); + +sdev_free: + stub_device_free(sdev); + return rc; } static void shutdown_busid(struct bus_id_priv *busid_priv) { - if (busid_priv->sdev && !busid_priv->shutdown_busid) { - busid_priv->shutdown_busid = 1; - usbip_event_add(&busid_priv->sdev->ud, SDEV_EVENT_REMOVED); + usbip_event_add(&busid_priv->sdev->ud, SDEV_EVENT_REMOVED); - /* wait for the stop of the event handler */ - usbip_stop_eh(&busid_priv->sdev->ud); - } + /* wait for the stop of the event handler */ + usbip_stop_eh(&busid_priv->sdev->ud); } /* @@ -427,11 +444,16 @@ static void stub_disconnect(struct usb_device *udev) /* get stub_device */ if (!sdev) { dev_err(&udev->dev, "could not get device"); - goto call_put_busid_priv; + /* release busid_lock */ + put_busid_priv(busid_priv); + return; } dev_set_drvdata(&udev->dev, NULL); + /* release busid_lock before call to remove device files */ + put_busid_priv(busid_priv); + /* * NOTE: rx/tx threads are invoked for each usb_device. */ @@ -442,27 +464,36 @@ static void stub_disconnect(struct usb_device *udev) (struct usb_dev_state *) udev); if (rc) { dev_dbg(&udev->dev, "unable to release port\n"); - goto call_put_busid_priv; + return; } /* If usb reset is called from event handler */ if (usbip_in_eh(current)) - goto call_put_busid_priv; + return; + + /* we already have busid_priv, just lock busid_lock */ + spin_lock(&busid_priv->busid_lock); + if (!busid_priv->shutdown_busid) + busid_priv->shutdown_busid = 1; + /* release busid_lock */ + spin_unlock(&busid_priv->busid_lock); /* shutdown the current connection */ shutdown_busid(busid_priv); usb_put_dev(sdev->udev); + /* we already have busid_priv, just lock busid_lock */ + spin_lock(&busid_priv->busid_lock); /* free sdev */ busid_priv->sdev = NULL; stub_device_free(sdev); if (busid_priv->status == STUB_BUSID_ALLOC) busid_priv->status = STUB_BUSID_ADDED; - -call_put_busid_priv: - put_busid_priv(busid_priv); + /* release busid_lock */ + spin_unlock(&busid_priv->busid_lock); + return; } #ifdef CONFIG_PM diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 64833879f75d3c..7a386fb30bf1fc 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -34,6 +34,7 @@ #include #include #include +#include #define DRIVER_VERSION "0.3" #define DRIVER_AUTHOR "Alex Williamson " @@ -904,30 +905,17 @@ void *vfio_device_data(struct vfio_device *device) } EXPORT_SYMBOL_GPL(vfio_device_data); -/* Given a referenced group, check if it contains the device */ -static bool vfio_dev_present(struct vfio_group *group, struct device *dev) -{ - struct vfio_device *device; - - device = vfio_group_get_device(group, dev); - if (!device) - return false; - - vfio_device_put(device); - return true; -} - /* * Decrement the device reference count and wait for the device to be * removed. Open file descriptors for the device... */ void *vfio_del_group_dev(struct device *dev) { + DEFINE_WAIT_FUNC(wait, woken_wake_function); struct vfio_device *device = dev_get_drvdata(dev); struct vfio_group *group = device->group; void *device_data = device->device_data; struct vfio_unbound_dev *unbound; unsigned int i = 0; - long ret; bool interrupted = false; /* @@ -964,6 +952,8 @@ void *vfio_del_group_dev(struct device *dev) * interval with counter to allow the driver to take escalating * measures to release the device if it has the ability to do so. */ + add_wait_queue(&vfio.release_q, &wait); + do { device = vfio_group_get_device(group, dev); if (!device) @@ -975,12 +965,10 @@ void *vfio_del_group_dev(struct device *dev) vfio_device_put(device); if (interrupted) { - ret = wait_event_timeout(vfio.release_q, - !vfio_dev_present(group, dev), HZ * 10); + wait_woken(&wait, TASK_UNINTERRUPTIBLE, HZ * 10); } else { - ret = wait_event_interruptible_timeout(vfio.release_q, - !vfio_dev_present(group, dev), HZ * 10); - if (ret == -ERESTARTSYS) { + wait_woken(&wait, TASK_INTERRUPTIBLE, HZ * 10); + if (signal_pending(current)) { interrupted = true; dev_warn(dev, "Device is currently in use, task" @@ -989,8 +977,10 @@ void *vfio_del_group_dev(struct device *dev) current->comm, task_pid_nr(current)); } } - } while (ret <= 0); + } while (1); + + remove_wait_queue(&vfio.release_q, &wait); /* * In order to support multiple devices per group, devices can be * plucked from the group while other devices in the group are still diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c index 68a113594808f2..2811c4afde01c0 100644 --- a/drivers/video/fbdev/core/fbcmap.c +++ b/drivers/video/fbdev/core/fbcmap.c @@ -94,6 +94,8 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags) int size = len * sizeof(u16); int ret = -ENOMEM; + flags |= __GFP_NOWARN; + if (cmap->len != len) { fb_dealloc_cmap(cmap); if (!len) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 5d961e3ac66ead..b96d4e779333bf 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -1237,7 +1237,7 @@ static void fbcon_deinit(struct vc_data *vc) if (free_font) vc->vc_font.data = NULL; - if (vc->vc_hi_font_mask) + if (vc->vc_hi_font_mask && vc->vc_screenbuf) set_vc_hi_font(vc, false); if (!con_is_bound(&fb_con)) diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c index 283d9307df21c2..ac049871704ddd 100644 --- a/drivers/video/fbdev/core/modedb.c +++ b/drivers/video/fbdev/core/modedb.c @@ -935,6 +935,9 @@ void fb_var_to_videomode(struct fb_videomode *mode, if (var->vmode & FB_VMODE_DOUBLE) vtotal *= 2; + if (!htotal || !vtotal) + return; + hfreq = pixclock/htotal; mode->refresh = hfreq/vtotal; } diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index ba906876cc454f..9f39f0c360e0c0 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -464,7 +464,8 @@ static int efifb_probe(struct platform_device *dev) info->apertures->ranges[0].base = efifb_fix.smem_start; info->apertures->ranges[0].size = size_remap; - if (!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) { + if (efi_enabled(EFI_BOOT) && + !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) { if ((efifb_fix.smem_start + efifb_fix.smem_len) > (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) { pr_err("efifb: video memory @ 0x%lx spans multiple EFI memory regions\n", @@ -476,8 +477,12 @@ static int efifb_probe(struct platform_device *dev) * If the UEFI memory map covers the efifb region, we may only * remap it using the attributes the memory map prescribes. */ - mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB; - mem_flags &= md.attribute; + md.attribute &= EFI_MEMORY_UC | EFI_MEMORY_WC | + EFI_MEMORY_WT | EFI_MEMORY_WB; + if (md.attribute) { + mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB; + mem_flags &= md.attribute; + } } if (mem_flags & EFI_MEMORY_WC) info->screen_base = ioremap_wc(efifb_fix.smem_start, diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c index 46302854317367..59e1cae5794814 100644 --- a/drivers/video/fbdev/hgafb.c +++ b/drivers/video/fbdev/hgafb.c @@ -285,6 +285,8 @@ static int hga_card_detect(void) hga_vram_len = 0x08000; hga_vram = ioremap(0xb0000, hga_vram_len); + if (!hga_vram) + goto error; if (request_region(0x3b0, 12, "hgafb")) release_io_ports = 1; diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c index ecdcf358ad5eac..ffcf553719a314 100644 --- a/drivers/video/fbdev/imsttfb.c +++ b/drivers/video/fbdev/imsttfb.c @@ -1516,6 +1516,11 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) info->fix.smem_start = addr; info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ? 0x400000 : 0x800000); + if (!info->screen_base) { + release_mem_region(addr, size); + framebuffer_release(info); + return -ENOMEM; + } info->fix.mmio_start = addr + 0x800000; par->dc_regs = ioremap(addr + 0x800000, 0x1000); par->cmap_regs_phys = addr + 0x840000; diff --git a/drivers/video/fbdev/sm712.h b/drivers/video/fbdev/sm712.h index aad1cc4be34a90..c7ebf03b8d5378 100644 --- a/drivers/video/fbdev/sm712.h +++ b/drivers/video/fbdev/sm712.h @@ -15,14 +15,10 @@ #define FB_ACCEL_SMI_LYNX 88 -#define SCREEN_X_RES 1024 -#define SCREEN_Y_RES 600 -#define SCREEN_BPP 16 - -/*Assume SM712 graphics chip has 4MB VRAM */ -#define SM712_VIDEOMEMORYSIZE 0x00400000 -/*Assume SM722 graphics chip has 8MB VRAM */ -#define SM722_VIDEOMEMORYSIZE 0x00800000 +#define SCREEN_X_RES 1024 +#define SCREEN_Y_RES_PC 768 +#define SCREEN_Y_RES_NETBOOK 600 +#define SCREEN_BPP 16 #define dac_reg (0x3c8) #define dac_val (0x3c9) diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c index 502d0de2feec52..f1dcc6766d1ef4 100644 --- a/drivers/video/fbdev/sm712fb.c +++ b/drivers/video/fbdev/sm712fb.c @@ -530,6 +530,65 @@ static const struct modeinit vgamode[] = { 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03, }, }, + { /* 1024 x 768 16Bpp 60Hz */ + 1024, 768, 16, 60, + /* Init_MISC */ + 0xEB, + { /* Init_SR0_SR4 */ + 0x03, 0x01, 0x0F, 0x03, 0x0E, + }, + { /* Init_SR10_SR24 */ + 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C, + 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xC4, 0x30, 0x02, 0x01, 0x01, + }, + { /* Init_SR30_SR75 */ + 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A, + 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF, + 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC, + 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A, + 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03, + 0x0F, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A, + 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00, + 0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02, + 0x04, 0x45, 0x30, 0x30, 0x40, 0x20, + }, + { /* Init_SR80_SR93 */ + 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A, + 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A, + 0x00, 0x00, 0x00, 0x00, + }, + { /* Init_SRA0_SRAF */ + 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED, + 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF, + }, + { /* Init_GR00_GR08 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F, + 0xFF, + }, + { /* Init_AR00_AR14 */ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x41, 0x00, 0x0F, 0x00, 0x00, + }, + { /* Init_CR00_CR18 */ + 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5, + 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3, + 0xFF, + }, + { /* Init_CR30_CR4D */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20, + 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF, + 0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00, + 0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF, + }, + { /* Init_CR90_CRA7 */ + 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26, + 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03, + }, + }, { /* mode#5: 1024 x 768 24Bpp 60Hz */ 1024, 768, 24, 60, /* Init_MISC */ @@ -827,67 +886,80 @@ static inline unsigned int chan_to_field(unsigned int chan, static int smtc_blank(int blank_mode, struct fb_info *info) { + struct smtcfb_info *sfb = info->par; + /* clear DPMS setting */ switch (blank_mode) { case FB_BLANK_UNBLANK: /* Screen On: HSync: On, VSync : On */ + + switch (sfb->chip_id) { + case 0x710: + case 0x712: + smtc_seqw(0x6a, 0x16); + smtc_seqw(0x6b, 0x02); + break; + case 0x720: + smtc_seqw(0x6a, 0x0d); + smtc_seqw(0x6b, 0x02); + break; + } + + smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0))); smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20))); - smtc_seqw(0x6a, 0x16); - smtc_seqw(0x6b, 0x02); smtc_seqw(0x21, (smtc_seqr(0x21) & 0x77)); smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30))); - smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0))); - smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01)); smtc_seqw(0x31, (smtc_seqr(0x31) | 0x03)); + smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01)); break; case FB_BLANK_NORMAL: /* Screen Off: HSync: On, VSync : On Soft blank */ + smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01)); + smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00)); + smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0))); smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20))); + smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30))); smtc_seqw(0x6a, 0x16); smtc_seqw(0x6b, 0x02); - smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30))); - smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0))); - smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01)); - smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00)); break; case FB_BLANK_VSYNC_SUSPEND: /* Screen On: HSync: On, VSync : Off */ + smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01))); + smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00)); + smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20)); smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20)); - smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0))); - smtc_seqw(0x6a, 0x0c); - smtc_seqw(0x6b, 0x02); smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88)); + smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0))); smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x20)); - smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20)); - smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01))); - smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00)); smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80)); + smtc_seqw(0x6a, 0x0c); + smtc_seqw(0x6b, 0x02); break; case FB_BLANK_HSYNC_SUSPEND: /* Screen On: HSync: Off, VSync : On */ + smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01))); + smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00)); + smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8)); smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20)); - smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0))); - smtc_seqw(0x6a, 0x0c); - smtc_seqw(0x6b, 0x02); smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88)); + smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0))); smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x10)); - smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8)); - smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01))); - smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00)); smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80)); + smtc_seqw(0x6a, 0x0c); + smtc_seqw(0x6b, 0x02); break; case FB_BLANK_POWERDOWN: /* Screen On: HSync: Off, VSync : Off */ + smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01))); + smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00)); + smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8)); smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20)); - smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0))); - smtc_seqw(0x6a, 0x0c); - smtc_seqw(0x6b, 0x02); smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88)); + smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0))); smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x30)); - smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8)); - smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01))); - smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00)); smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80)); + smtc_seqw(0x6a, 0x0c); + smtc_seqw(0x6b, 0x02); break; default: return -EINVAL; @@ -1145,8 +1217,10 @@ static void sm7xx_set_timing(struct smtcfb_info *sfb) /* init SEQ register SR30 - SR75 */ for (i = 0; i < SIZE_SR30_SR75; i++) - if ((i + 0x30) != 0x62 && (i + 0x30) != 0x6a && - (i + 0x30) != 0x6b) + if ((i + 0x30) != 0x30 && (i + 0x30) != 0x62 && + (i + 0x30) != 0x6a && (i + 0x30) != 0x6b && + (i + 0x30) != 0x70 && (i + 0x30) != 0x71 && + (i + 0x30) != 0x74 && (i + 0x30) != 0x75) smtc_seqw(i + 0x30, vgamode[j].init_sr30_sr75[i]); @@ -1171,8 +1245,12 @@ static void sm7xx_set_timing(struct smtcfb_info *sfb) smtc_crtcw(i, vgamode[j].init_cr00_cr18[i]); /* init CRTC register CR30 - CR4D */ - for (i = 0; i < SIZE_CR30_CR4D; i++) + for (i = 0; i < SIZE_CR30_CR4D; i++) { + if ((i + 0x30) >= 0x3B && (i + 0x30) <= 0x3F) + /* side-effect, don't write to CR3B-CR3F */ + continue; smtc_crtcw(i + 0x30, vgamode[j].init_cr30_cr4d[i]); + } /* init CRTC register CR90 - CRA7 */ for (i = 0; i < SIZE_CR90_CRA7; i++) @@ -1323,6 +1401,11 @@ static int smtc_map_smem(struct smtcfb_info *sfb, { sfb->fb->fix.smem_start = pci_resource_start(pdev, 0); + if (sfb->chip_id == 0x720) + /* on SM720, the framebuffer starts at the 1 MB offset */ + sfb->fb->fix.smem_start += 0x00200000; + + /* XXX: is it safe for SM720 on Big-Endian? */ if (sfb->fb->var.bits_per_pixel == 32) sfb->fb->fix.smem_start += big_addr; @@ -1360,12 +1443,82 @@ static inline void sm7xx_init_hw(void) outb_p(0x11, 0x3c5); } +static u_long sm7xx_vram_probe(struct smtcfb_info *sfb) +{ + u8 vram; + + switch (sfb->chip_id) { + case 0x710: + case 0x712: + /* + * Assume SM712 graphics chip has 4MB VRAM. + * + * FIXME: SM712 can have 2MB VRAM, which is used on earlier + * laptops, such as IBM Thinkpad 240X. This driver would + * probably crash on those machines. If anyone gets one of + * those and is willing to help, run "git blame" and send me + * an E-mail. + */ + return 0x00400000; + case 0x720: + outb_p(0x76, 0x3c4); + vram = inb_p(0x3c5) >> 6; + + if (vram == 0x00) + return 0x00800000; /* 8 MB */ + else if (vram == 0x01) + return 0x01000000; /* 16 MB */ + else if (vram == 0x02) + return 0x00400000; /* illegal, fallback to 4 MB */ + else if (vram == 0x03) + return 0x00400000; /* 4 MB */ + } + return 0; /* unknown hardware */ +} + +static void sm7xx_resolution_probe(struct smtcfb_info *sfb) +{ + /* get mode parameter from smtc_scr_info */ + if (smtc_scr_info.lfb_width != 0) { + sfb->fb->var.xres = smtc_scr_info.lfb_width; + sfb->fb->var.yres = smtc_scr_info.lfb_height; + sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth; + goto final; + } + + /* + * No parameter, default resolution is 1024x768-16. + * + * FIXME: earlier laptops, such as IBM Thinkpad 240X, has a 800x600 + * panel, also see the comments about Thinkpad 240X above. + */ + sfb->fb->var.xres = SCREEN_X_RES; + sfb->fb->var.yres = SCREEN_Y_RES_PC; + sfb->fb->var.bits_per_pixel = SCREEN_BPP; + +#ifdef CONFIG_MIPS + /* + * Loongson MIPS netbooks use 1024x600 LCD panels, which is the original + * target platform of this driver, but nearly all old x86 laptops have + * 1024x768. Lighting 768 panels using 600's timings would partially + * garble the display, so we don't want that. But it's not possible to + * distinguish them reliably. + * + * So we change the default to 768, but keep 600 as-is on MIPS. + */ + sfb->fb->var.yres = SCREEN_Y_RES_NETBOOK; +#endif + +final: + big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth); +} + static int smtcfb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct smtcfb_info *sfb; struct fb_info *info; - u_long smem_size = 0x00800000; /* default 8MB */ + u_long smem_size; int err; unsigned long mmio_base; @@ -1405,29 +1558,19 @@ static int smtcfb_pci_probe(struct pci_dev *pdev, sm7xx_init_hw(); - /* get mode parameter from smtc_scr_info */ - if (smtc_scr_info.lfb_width != 0) { - sfb->fb->var.xres = smtc_scr_info.lfb_width; - sfb->fb->var.yres = smtc_scr_info.lfb_height; - sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth; - } else { - /* default resolution 1024x600 16bit mode */ - sfb->fb->var.xres = SCREEN_X_RES; - sfb->fb->var.yres = SCREEN_Y_RES; - sfb->fb->var.bits_per_pixel = SCREEN_BPP; - } - - big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth); /* Map address and memory detection */ mmio_base = pci_resource_start(pdev, 0); pci_read_config_byte(pdev, PCI_REVISION_ID, &sfb->chip_rev_id); + smem_size = sm7xx_vram_probe(sfb); + dev_info(&pdev->dev, "%lu MiB of VRAM detected.\n", + smem_size / 1048576); + switch (sfb->chip_id) { case 0x710: case 0x712: sfb->fb->fix.mmio_start = mmio_base + 0x00400000; sfb->fb->fix.mmio_len = 0x00400000; - smem_size = SM712_VIDEOMEMORYSIZE; sfb->lfb = ioremap(mmio_base, mmio_addr); if (!sfb->lfb) { dev_err(&pdev->dev, @@ -1459,8 +1602,7 @@ static int smtcfb_pci_probe(struct pci_dev *pdev, case 0x720: sfb->fb->fix.mmio_start = mmio_base; sfb->fb->fix.mmio_len = 0x00200000; - smem_size = SM722_VIDEOMEMORYSIZE; - sfb->dp_regs = ioremap(mmio_base, 0x00a00000); + sfb->dp_regs = ioremap(mmio_base, 0x00200000 + smem_size); sfb->lfb = sfb->dp_regs + 0x00200000; sfb->mmio = (smtc_regbaseaddress = sfb->dp_regs + 0x000c0000); @@ -1477,6 +1619,9 @@ static int smtcfb_pci_probe(struct pci_dev *pdev, goto failed_fb; } + /* probe and decide resolution */ + sm7xx_resolution_probe(sfb); + /* can support 32 bpp */ if (sfb->fb->var.bits_per_pixel == 15) sfb->fb->var.bits_per_pixel = 16; @@ -1487,7 +1632,11 @@ static int smtcfb_pci_probe(struct pci_dev *pdev, if (err) goto failed; - smtcfb_setmode(sfb); + /* + * The screen would be temporarily garbled when sm712fb takes over + * vesafb or VGA text mode. Zero the framebuffer. + */ + memset_io(sfb->lfb, 0, sfb->fb->fix.smem_len); err = register_framebuffer(info); if (err < 0) diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index 070026a7e55a50..5a0d6fb02bbc58 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -594,8 +594,7 @@ static int dlfb_render_hline(struct dlfb_data *dlfb, struct urb **urb_ptr, return 0; } -static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y, - int width, int height, char *data) +static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y, int width, int height) { int i, ret; char *cmd; @@ -607,21 +606,29 @@ static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y, start_cycles = get_cycles(); + mutex_lock(&dlfb->render_mutex); + aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long)); width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long)); x = aligned_x; if ((width <= 0) || (x + width > dlfb->info->var.xres) || - (y + height > dlfb->info->var.yres)) - return -EINVAL; + (y + height > dlfb->info->var.yres)) { + ret = -EINVAL; + goto unlock_ret; + } - if (!atomic_read(&dlfb->usb_active)) - return 0; + if (!atomic_read(&dlfb->usb_active)) { + ret = 0; + goto unlock_ret; + } urb = dlfb_get_urb(dlfb); - if (!urb) - return 0; + if (!urb) { + ret = 0; + goto unlock_ret; + } cmd = urb->transfer_buffer; for (i = y; i < y + height ; i++) { @@ -641,7 +648,7 @@ static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y, *cmd++ = 0xAF; /* Send partial buffer remaining before exiting */ len = cmd - (char *) urb->transfer_buffer; - ret = dlfb_submit_urb(dlfb, urb, len); + dlfb_submit_urb(dlfb, urb, len); bytes_sent += len; } else dlfb_urb_completion(urb); @@ -655,7 +662,55 @@ static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y, >> 10)), /* Kcycles */ &dlfb->cpu_kcycles_used); - return 0; + ret = 0; + +unlock_ret: + mutex_unlock(&dlfb->render_mutex); + return ret; +} + +static void dlfb_init_damage(struct dlfb_data *dlfb) +{ + dlfb->damage_x = INT_MAX; + dlfb->damage_x2 = 0; + dlfb->damage_y = INT_MAX; + dlfb->damage_y2 = 0; +} + +static void dlfb_damage_work(struct work_struct *w) +{ + struct dlfb_data *dlfb = container_of(w, struct dlfb_data, damage_work); + int x, x2, y, y2; + + spin_lock_irq(&dlfb->damage_lock); + x = dlfb->damage_x; + x2 = dlfb->damage_x2; + y = dlfb->damage_y; + y2 = dlfb->damage_y2; + dlfb_init_damage(dlfb); + spin_unlock_irq(&dlfb->damage_lock); + + if (x < x2 && y < y2) + dlfb_handle_damage(dlfb, x, y, x2 - x, y2 - y); +} + +static void dlfb_offload_damage(struct dlfb_data *dlfb, int x, int y, int width, int height) +{ + unsigned long flags; + int x2 = x + width; + int y2 = y + height; + + if (x >= x2 || y >= y2) + return; + + spin_lock_irqsave(&dlfb->damage_lock, flags); + dlfb->damage_x = min(x, dlfb->damage_x); + dlfb->damage_x2 = max(x2, dlfb->damage_x2); + dlfb->damage_y = min(y, dlfb->damage_y); + dlfb->damage_y2 = max(y2, dlfb->damage_y2); + spin_unlock_irqrestore(&dlfb->damage_lock, flags); + + schedule_work(&dlfb->damage_work); } /* @@ -679,7 +734,7 @@ static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf, (u32)info->var.yres); dlfb_handle_damage(dlfb, 0, start, info->var.xres, - lines, info->screen_base); + lines); } return result; @@ -694,8 +749,8 @@ static void dlfb_ops_copyarea(struct fb_info *info, sys_copyarea(info, area); - dlfb_handle_damage(dlfb, area->dx, area->dy, - area->width, area->height, info->screen_base); + dlfb_offload_damage(dlfb, area->dx, area->dy, + area->width, area->height); } static void dlfb_ops_imageblit(struct fb_info *info, @@ -705,8 +760,8 @@ static void dlfb_ops_imageblit(struct fb_info *info, sys_imageblit(info, image); - dlfb_handle_damage(dlfb, image->dx, image->dy, - image->width, image->height, info->screen_base); + dlfb_offload_damage(dlfb, image->dx, image->dy, + image->width, image->height); } static void dlfb_ops_fillrect(struct fb_info *info, @@ -716,8 +771,8 @@ static void dlfb_ops_fillrect(struct fb_info *info, sys_fillrect(info, rect); - dlfb_handle_damage(dlfb, rect->dx, rect->dy, rect->width, - rect->height, info->screen_base); + dlfb_offload_damage(dlfb, rect->dx, rect->dy, rect->width, + rect->height); } /* @@ -739,17 +794,19 @@ static void dlfb_dpy_deferred_io(struct fb_info *info, int bytes_identical = 0; int bytes_rendered = 0; + mutex_lock(&dlfb->render_mutex); + if (!fb_defio) - return; + goto unlock_ret; if (!atomic_read(&dlfb->usb_active)) - return; + goto unlock_ret; start_cycles = get_cycles(); urb = dlfb_get_urb(dlfb); if (!urb) - return; + goto unlock_ret; cmd = urb->transfer_buffer; @@ -782,6 +839,8 @@ static void dlfb_dpy_deferred_io(struct fb_info *info, atomic_add(((unsigned int) ((end_cycles - start_cycles) >> 10)), /* Kcycles */ &dlfb->cpu_kcycles_used); +unlock_ret: + mutex_unlock(&dlfb->render_mutex); } static int dlfb_get_edid(struct dlfb_data *dlfb, char *edid, int len) @@ -859,8 +918,7 @@ static int dlfb_ops_ioctl(struct fb_info *info, unsigned int cmd, if (area.y > info->var.yres) area.y = info->var.yres; - dlfb_handle_damage(dlfb, area.x, area.y, area.w, area.h, - info->screen_base); + dlfb_handle_damage(dlfb, area.x, area.y, area.w, area.h); } return 0; @@ -942,6 +1000,10 @@ static void dlfb_ops_destroy(struct fb_info *info) { struct dlfb_data *dlfb = info->par; + cancel_work_sync(&dlfb->damage_work); + + mutex_destroy(&dlfb->render_mutex); + if (info->cmap.len != 0) fb_dealloc_cmap(&info->cmap); if (info->monspecs.modedb) @@ -1065,8 +1127,7 @@ static int dlfb_ops_set_par(struct fb_info *info) pix_framebuffer[i] = 0x37e6; } - dlfb_handle_damage(dlfb, 0, 0, info->var.xres, info->var.yres, - info->screen_base); + dlfb_handle_damage(dlfb, 0, 0, info->var.xres, info->var.yres); return 0; } @@ -1598,7 +1659,7 @@ static int dlfb_usb_probe(struct usb_interface *intf, dlfb = kzalloc(sizeof(*dlfb), GFP_KERNEL); if (!dlfb) { dev_err(&intf->dev, "%s: failed to allocate dlfb\n", __func__); - goto error; + return -ENOMEM; } INIT_LIST_HEAD(&dlfb->deferred_free); @@ -1639,6 +1700,11 @@ static int dlfb_usb_probe(struct usb_interface *intf, dlfb->ops = dlfb_ops; info->fbops = &dlfb->ops; + mutex_init(&dlfb->render_mutex); + dlfb_init_damage(dlfb); + spin_lock_init(&dlfb->damage_lock); + INIT_WORK(&dlfb->damage_work, dlfb_damage_work); + INIT_LIST_HEAD(&info->modelist); if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) { @@ -1703,7 +1769,7 @@ static int dlfb_usb_probe(struct usb_interface *intf, error: if (dlfb->info) { dlfb_ops_destroy(dlfb->info); - } else if (dlfb) { + } else { usb_put_dev(dlfb->udev); kfree(dlfb); } @@ -1730,12 +1796,10 @@ static void dlfb_usb_disconnect(struct usb_interface *intf) /* this function will wait for all in-flight urbs to complete */ dlfb_free_urb_list(dlfb); - if (info) { - /* remove udlfb's sysfs interfaces */ - for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) - device_remove_file(info->dev, &fb_device_attrs[i]); - device_remove_bin_file(info->dev, &edid_attr); - } + /* remove udlfb's sysfs interfaces */ + for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) + device_remove_file(info->dev, &fb_device_attrs[i]); + device_remove_bin_file(info->dev, &edid_attr); unregister_framebuffer(info); } diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c index 0364d3329c526b..3516ce6718d94e 100644 --- a/drivers/w1/w1_io.c +++ b/drivers/w1/w1_io.c @@ -432,8 +432,7 @@ int w1_reset_resume_command(struct w1_master *dev) if (w1_reset_bus(dev)) return -1; - /* This will make only the last matched slave perform a skip ROM. */ - w1_write_8(dev, W1_RESUME_CMD); + w1_write_8(dev, dev->slave_count > 1 ? W1_RESUME_CMD : W1_SKIP_ROM); return 0; } EXPORT_SYMBOL_GPL(w1_reset_resume_command); diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 5ea8909a41f952..b165c46aca741b 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -1967,6 +1967,7 @@ comment "Watchdog Pretimeout Governors" config WATCHDOG_PRETIMEOUT_GOV bool "Enable watchdog pretimeout governors" + depends on WATCHDOG_CORE help The option allows to select watchdog pretimeout governors. diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c index 2b52514eaa86a8..7e7bdcbbc741b3 100644 --- a/drivers/watchdog/imx2_wdt.c +++ b/drivers/watchdog/imx2_wdt.c @@ -178,8 +178,10 @@ static void __imx2_wdt_set_timeout(struct watchdog_device *wdog, static int imx2_wdt_set_timeout(struct watchdog_device *wdog, unsigned int new_timeout) { - __imx2_wdt_set_timeout(wdog, new_timeout); + unsigned int actual; + actual = min(new_timeout, wdog->max_hw_heartbeat_ms * 1000); + __imx2_wdt_set_timeout(wdog, actual); wdog->timeout = new_timeout; return 0; } diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index 91da7e44d5d4f4..3a144eecb6a728 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c @@ -538,7 +538,6 @@ static int __write_ring(struct pvcalls_data_intf *intf, int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { - struct pvcalls_bedata *bedata; struct sock_mapping *map; int sent, tot_sent = 0; int count = 0, flags; @@ -550,7 +549,6 @@ int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg, map = pvcalls_enter_sock(sock); if (IS_ERR(map)) return PTR_ERR(map); - bedata = dev_get_drvdata(&pvcalls_front_dev->dev); mutex_lock(&map->active.out_mutex); if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) { @@ -633,7 +631,6 @@ static int __read_ring(struct pvcalls_data_intf *intf, int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { - struct pvcalls_bedata *bedata; int ret; struct sock_mapping *map; @@ -643,7 +640,6 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, map = pvcalls_enter_sock(sock); if (IS_ERR(map)) return PTR_ERR(map); - bedata = dev_get_drvdata(&pvcalls_front_dev->dev); mutex_lock(&map->active.in_mutex); if (len > XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER)) diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c index ea4a08b83fa099..787966f445895b 100644 --- a/drivers/xen/xen-pciback/pciback_ops.c +++ b/drivers/xen/xen-pciback/pciback_ops.c @@ -127,8 +127,6 @@ void xen_pcibk_reset_device(struct pci_dev *dev) if (pci_is_enabled(dev)) pci_disable_device(dev); - pci_write_config_word(dev, PCI_COMMAND, 0); - dev->is_busmaster = 0; } else { pci_read_config_word(dev, PCI_COMMAND, &cmd); diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h index 092981171df17f..d75a2385b37c77 100644 --- a/drivers/xen/xenbus/xenbus.h +++ b/drivers/xen/xenbus/xenbus.h @@ -83,6 +83,7 @@ struct xb_req_data { int num_vecs; int err; enum xb_req_state state; + bool user_req; void (*cb)(struct xb_req_data *); void *par; }; @@ -133,4 +134,6 @@ void xenbus_ring_ops_init(void); int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par); void xenbus_dev_queue_reply(struct xb_req_data *req); +extern unsigned int xb_dev_generation_id; + #endif diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 0782ff3c227352..39c63152a35800 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -62,6 +62,8 @@ #include "xenbus.h" +unsigned int xb_dev_generation_id; + /* * An element of a list of outstanding transactions, for which we're * still waiting a reply. @@ -69,6 +71,7 @@ struct xenbus_transaction_holder { struct list_head list; struct xenbus_transaction handle; + unsigned int generation_id; }; /* @@ -441,6 +444,7 @@ static int xenbus_write_transaction(unsigned msg_type, rc = -ENOMEM; goto out; } + trans->generation_id = xb_dev_generation_id; list_add(&trans->list, &u->transactions); } else if (msg->hdr.tx_id != 0 && !xenbus_get_transaction(u, msg->hdr.tx_id)) @@ -449,6 +453,20 @@ static int xenbus_write_transaction(unsigned msg_type, !(msg->hdr.len == 2 && (!strcmp(msg->body, "T") || !strcmp(msg->body, "F")))) return xenbus_command_reply(u, XS_ERROR, "EINVAL"); + else if (msg_type == XS_TRANSACTION_END) { + trans = xenbus_get_transaction(u, msg->hdr.tx_id); + if (trans && trans->generation_id != xb_dev_generation_id) { + list_del(&trans->list); + kfree(trans); + if (!strcmp(msg->body, "T")) + return xenbus_command_reply(u, XS_ERROR, + "EAGAIN"); + else + return xenbus_command_reply(u, + XS_TRANSACTION_END, + "OK"); + } + } rc = xenbus_dev_request_and_reply(&msg->hdr, u); if (rc && trans) { diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index 49a3874ae6bb45..ddc18da61834e6 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c @@ -105,6 +105,7 @@ static void xs_suspend_enter(void) static void xs_suspend_exit(void) { + xb_dev_generation_id++; spin_lock(&xs_state_lock); xs_suspend_active--; spin_unlock(&xs_state_lock); @@ -125,7 +126,7 @@ static uint32_t xs_request_enter(struct xb_req_data *req) spin_lock(&xs_state_lock); } - if (req->type == XS_TRANSACTION_START) + if (req->type == XS_TRANSACTION_START && !req->user_req) xs_state_users++; xs_state_users++; rq_id = xs_request_id++; @@ -140,7 +141,7 @@ void xs_request_exit(struct xb_req_data *req) spin_lock(&xs_state_lock); xs_state_users--; if ((req->type == XS_TRANSACTION_START && req->msg.type == XS_ERROR) || - (req->type == XS_TRANSACTION_END && + (req->type == XS_TRANSACTION_END && !req->user_req && !WARN_ON_ONCE(req->msg.type == XS_ERROR && !strcmp(req->body, "ENOENT")))) xs_state_users--; @@ -286,6 +287,7 @@ int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par) req->num_vecs = 1; req->cb = xenbus_dev_queue_reply; req->par = par; + req->user_req = true; xs_send(req, msg); @@ -313,6 +315,7 @@ static void *xs_talkv(struct xenbus_transaction t, req->vec = iovec; req->num_vecs = num_vecs; req->cb = xs_wake_up; + req->user_req = false; msg.req_id = 0; msg.tx_id = t.id; diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index ae750b1574a259..ac6c383d63140b 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -710,7 +710,7 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info, * read tree blocks and add keys where required. */ static int add_missing_keys(struct btrfs_fs_info *fs_info, - struct preftrees *preftrees) + struct preftrees *preftrees, bool lock) { struct prelim_ref *ref; struct extent_buffer *eb; @@ -735,12 +735,14 @@ static int add_missing_keys(struct btrfs_fs_info *fs_info, free_extent_buffer(eb); return -EIO; } - btrfs_tree_read_lock(eb); + if (lock) + btrfs_tree_read_lock(eb); if (btrfs_header_level(eb) == 0) btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); else btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); - btrfs_tree_read_unlock(eb); + if (lock) + btrfs_tree_read_unlock(eb); free_extent_buffer(eb); prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL); cond_resched(); @@ -1225,7 +1227,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, btrfs_release_path(path); - ret = add_missing_keys(fs_info, &preftrees); + ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0); if (ret) goto out; @@ -1286,11 +1288,14 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, ret = -EIO; goto out; } - btrfs_tree_read_lock(eb); - btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); + if (!path->skip_locking) { + btrfs_tree_read_lock(eb); + btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); + } ret = find_extent_in_eb(eb, bytenr, *extent_item_pos, &eie, ignore_offset); - btrfs_tree_read_unlock_blocking(eb); + if (!path->skip_locking) + btrfs_tree_read_unlock_blocking(eb); free_extent_buffer(eb); if (ret < 0) goto out; @@ -1452,8 +1457,8 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans, * callers (such as fiemap) which want to know whether the extent is * shared but do not need a ref count. * - * This attempts to allocate a transaction in order to account for - * delayed refs, but continues on even when the alloc fails. + * This attempts to attach to the running transaction in order to account for + * delayed refs, but continues on even when no running transaction exists. * * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error. */ @@ -1476,13 +1481,16 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr) tmp = ulist_alloc(GFP_NOFS); roots = ulist_alloc(GFP_NOFS); if (!tmp || !roots) { - ulist_free(tmp); - ulist_free(roots); - return -ENOMEM; + ret = -ENOMEM; + goto out; } - trans = btrfs_join_transaction(root); + trans = btrfs_attach_transaction(root); if (IS_ERR(trans)) { + if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) { + ret = PTR_ERR(trans); + goto out; + } trans = NULL; down_read(&fs_info->commit_root_sem); } else { @@ -1515,6 +1523,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr) } else { up_read(&fs_info->commit_root_sem); } +out: ulist_free(tmp); ulist_free(roots); return ret; @@ -1904,13 +1913,19 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info, extent_item_objectid); if (!search_commit_root) { - trans = btrfs_join_transaction(fs_info->extent_root); - if (IS_ERR(trans)) - return PTR_ERR(trans); + trans = btrfs_attach_transaction(fs_info->extent_root); + if (IS_ERR(trans)) { + if (PTR_ERR(trans) != -ENOENT && + PTR_ERR(trans) != -EROFS) + return PTR_ERR(trans); + trans = NULL; + } + } + + if (trans) btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem); - } else { + else down_read(&fs_info->commit_root_sem); - } ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid, tree_mod_seq_elem.seq, &refs, @@ -1943,7 +1958,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info, free_leaf_list(refs); out: - if (!search_commit_root) { + if (trans) { btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem); btrfs_end_transaction(trans); } else { diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 48ac8b7c43a55f..79ac1ebabaf784 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -2436,6 +2436,16 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, if (tmp) { /* first we do an atomic uptodate check */ if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { + /* + * Do extra check for first_key, eb can be stale due to + * being cached, read from scrub, or have multiple + * parents (shared tree blocks). + */ + if (btrfs_verify_level_key(fs_info, tmp, + parent_level - 1, &first_key, gen)) { + free_extent_buffer(tmp); + return -EUCLEAN; + } *eb_ret = tmp; return 0; } diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b4f61a3d560aa1..b2dc613ebed2c0 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -408,9 +408,9 @@ static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info, return ret; } -static int verify_level_key(struct btrfs_fs_info *fs_info, - struct extent_buffer *eb, int level, - struct btrfs_key *first_key, u64 parent_transid) +int btrfs_verify_level_key(struct btrfs_fs_info *fs_info, + struct extent_buffer *eb, int level, + struct btrfs_key *first_key, u64 parent_transid) { int found_level; struct btrfs_key found_key; @@ -487,8 +487,8 @@ static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info, if (verify_parent_transid(io_tree, eb, parent_transid, 0)) ret = -EIO; - else if (verify_level_key(fs_info, eb, level, - first_key, parent_transid)) + else if (btrfs_verify_level_key(fs_info, eb, level, + first_key, parent_transid)) ret = -EUCLEAN; else break; @@ -995,13 +995,18 @@ void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr) { struct extent_buffer *buf = NULL; struct inode *btree_inode = fs_info->btree_inode; + int ret; buf = btrfs_find_create_tree_block(fs_info, bytenr); if (IS_ERR(buf)) return; - read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, - buf, WAIT_NONE, 0); - free_extent_buffer(buf); + + ret = read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, buf, + WAIT_NONE, 0); + if (ret < 0) + free_extent_buffer_stale(buf); + else + free_extent_buffer(buf); } int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr, @@ -1021,12 +1026,12 @@ int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr, ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK, mirror_num); if (ret) { - free_extent_buffer(buf); + free_extent_buffer_stale(buf); return ret; } if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) { - free_extent_buffer(buf); + free_extent_buffer_stale(buf); return -EIO; } else if (extent_buffer_uptodate(buf)) { *eb = buf; @@ -1080,7 +1085,7 @@ struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr, ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid, level, first_key); if (ret) { - free_extent_buffer(buf); + free_extent_buffer_stale(buf); return ERR_PTR(ret); } return buf; diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 4cccba22640fb6..7a4a60f26dbf98 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -39,6 +39,9 @@ static inline u64 btrfs_sb_offset(int mirror) struct btrfs_device; struct btrfs_fs_devices; +int btrfs_verify_level_key(struct btrfs_fs_info *fs_info, + struct extent_buffer *eb, int level, + struct btrfs_key *first_key, u64 parent_transid); struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr, u64 parent_transid, int level, struct btrfs_key *first_key); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index c0db7785cedeba..0cc800d22a0814 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3911,8 +3911,7 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags) info->space_info_kobj, "%s", alloc_name(space_info->flags)); if (ret) { - percpu_counter_destroy(&space_info->total_bytes_pinned); - kfree(space_info); + kobject_put(&space_info->kobj); return ret; } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index ca4902c66dc43d..e24c0a69ff5d43 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -2058,6 +2058,18 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) int ret = 0, err; u64 len; + /* + * If the inode needs a full sync, make sure we use a full range to + * avoid log tree corruption, due to hole detection racing with ordered + * extent completion for adjacent ranges, and assertion failures during + * hole detection. + */ + if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, + &BTRFS_I(inode)->runtime_flags)) { + start = 0; + end = LLONG_MAX; + } + /* * The range length can be represented by u64, we have to do the typecasts * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync() @@ -2565,10 +2577,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend, &cached_state); - if (ret) { - inode_unlock(inode); + if (ret) goto out_only_mutex; - } path = btrfs_alloc_path(); if (!path) { @@ -3151,6 +3161,7 @@ static long btrfs_fallocate(struct file *file, int mode, ret = btrfs_qgroup_reserve_data(inode, &data_reserved, cur_offset, last_byte - cur_offset); if (ret < 0) { + cur_offset = last_byte; free_extent_map(em); break; } @@ -3200,7 +3211,7 @@ static long btrfs_fallocate(struct file *file, int mode, /* Let go of our reservation. */ if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE)) btrfs_free_reserved_data_space(inode, data_reserved, - alloc_start, alloc_end - cur_offset); + cur_offset, alloc_end - cur_offset); extent_changeset_free(data_reserved); return ret; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 59f361f7d0c13e..c1cd3fe2b29534 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6426,8 +6426,18 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + name_len * 2); inode_inc_iversion(&parent_inode->vfs_inode); - parent_inode->vfs_inode.i_mtime = parent_inode->vfs_inode.i_ctime = - current_time(&parent_inode->vfs_inode); + /* + * If we are replaying a log tree, we do not want to update the mtime + * and ctime of the parent directory with the current time, since the + * log replay procedure is responsible for setting them to their correct + * values (the ones it had when the fsync was done). + */ + if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) { + struct timespec64 now = current_time(&parent_inode->vfs_inode); + + parent_inode->vfs_inode.i_mtime = now; + parent_inode->vfs_inode.i_ctime = now; + } ret = btrfs_update_inode(trans, root, &parent_inode->vfs_inode); if (ret) btrfs_abort_transaction(trans, ret); diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index dec14b739b1016..859274e384175d 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -745,6 +745,7 @@ static void __reada_start_machine(struct btrfs_fs_info *fs_info) u64 total = 0; int i; +again: do { enqueued = 0; mutex_lock(&fs_devices->device_list_mutex); @@ -756,6 +757,10 @@ static void __reada_start_machine(struct btrfs_fs_info *fs_info) mutex_unlock(&fs_devices->device_list_mutex); total += enqueued; } while (enqueued && total < 10000); + if (fs_devices->seed) { + fs_devices = fs_devices->seed; + goto again; + } if (enqueued == 0) return; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 0526b6c473c798..5d57ed62934555 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -4289,27 +4289,36 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) mutex_lock(&fs_info->cleaner_mutex); ret = relocate_block_group(rc); mutex_unlock(&fs_info->cleaner_mutex); - if (ret < 0) { + if (ret < 0) err = ret; - goto out; - } - - if (rc->extents_found == 0) - break; - - btrfs_info(fs_info, "found %llu extents", rc->extents_found); + /* + * We may have gotten ENOSPC after we already dirtied some + * extents. If writeout happens while we're relocating a + * different block group we could end up hitting the + * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in + * btrfs_reloc_cow_block. Make sure we write everything out + * properly so we don't trip over this problem, and then break + * out of the loop if we hit an error. + */ if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) { ret = btrfs_wait_ordered_range(rc->data_inode, 0, (u64)-1); - if (ret) { + if (ret) err = ret; - goto out; - } invalidate_mapping_pages(rc->data_inode->i_mapping, 0, -1); rc->stage = UPDATE_DATA_PTRS; } + + if (err < 0) + goto out; + + if (rc->extents_found == 0) + break; + + btrfs_info(fs_info, "found %llu extents", rc->extents_found); + } WARN_ON(rc->block_group->pinned > 0); diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 65bda0682928ba..3228d3b3084a4f 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -132,16 +132,17 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root return -ENOMEM; ret = btrfs_search_slot(trans, root, key, path, 0, 1); - if (ret < 0) { - btrfs_abort_transaction(trans, ret); + if (ret < 0) goto out; - } - if (ret != 0) { - btrfs_print_leaf(path->nodes[0]); - btrfs_crit(fs_info, "unable to update root key %llu %u %llu", - key->objectid, key->type, key->offset); - BUG_ON(1); + if (ret > 0) { + btrfs_crit(fs_info, + "unable to find root key (%llu %u %llu) in tree %llu", + key->objectid, key->type, key->offset, + root->root_key.objectid); + ret = -EUCLEAN; + btrfs_abort_transaction(trans, ret); + goto out; } l = path->nodes[0]; diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 84cb6e5ef36c29..258392b75048e5 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -5021,6 +5021,12 @@ static int send_hole(struct send_ctx *sctx, u64 end) if (offset >= sctx->cur_inode_size) return 0; + /* + * Don't go beyond the inode's i_size due to prealloc extents that start + * after the i_size. + */ + end = min_t(u64, end, sctx->cur_inode_size); + if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) return send_update_extent(sctx, offset, end - offset); @@ -6583,6 +6589,38 @@ static int ensure_commit_roots_uptodate(struct send_ctx *sctx) return btrfs_commit_transaction(trans); } +/* + * Make sure any existing dellaloc is flushed for any root used by a send + * operation so that we do not miss any data and we do not race with writeback + * finishing and changing a tree while send is using the tree. This could + * happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and + * a send operation then uses the subvolume. + * After flushing delalloc ensure_commit_roots_uptodate() must be called. + */ +static int flush_delalloc_roots(struct send_ctx *sctx) +{ + struct btrfs_root *root = sctx->parent_root; + int ret; + int i; + + if (root) { + ret = btrfs_start_delalloc_snapshot(root); + if (ret) + return ret; + btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX); + } + + for (i = 0; i < sctx->clone_roots_cnt; i++) { + root = sctx->clone_roots[i].root; + ret = btrfs_start_delalloc_snapshot(root); + if (ret) + return ret; + btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX); + } + + return 0; +} + static void btrfs_root_dec_send_in_progress(struct btrfs_root* root) { spin_lock(&root->root_item_lock); @@ -6807,6 +6845,10 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg) NULL); sort_clone_roots = 1; + ret = flush_delalloc_roots(sctx); + if (ret) + goto out; + ret = ensure_commit_roots_uptodate(sctx); if (ret) goto out; diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 3717c864ba2316..aefb0169d46d75 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -811,7 +811,12 @@ int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs, fs_devs->fsid_kobj.kset = btrfs_kset; error = kobject_init_and_add(&fs_devs->fsid_kobj, &btrfs_ktype, parent, "%pU", fs_devs->fsid); - return error; + if (error) { + kobject_put(&fs_devs->fsid_kobj); + return error; + } + + return 0; } int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 2f4f0958e5f289..0d5840d20efcb4 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3037,6 +3037,12 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, root->log_transid++; log->log_transid = root->log_transid; root->log_start_pid = 0; + /* + * Update or create log root item under the root's log_mutex to prevent + * races with concurrent log syncs that can lead to failure to update + * log root item because it was not created yet. + */ + ret = update_log_root(trans, log); /* * IO has been started, blocks of the log tree have WRITTEN flag set * in their headers. new modifications of the log will be written to @@ -3056,8 +3062,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, mutex_unlock(&log_root_tree->log_mutex); - ret = update_log_root(trans, log); - mutex_lock(&log_root_tree->log_mutex); if (atomic_dec_and_test(&log_root_tree->log_writers)) { /* atomic_dec_and_test implies a barrier */ @@ -4121,6 +4125,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, *last_extent, 0, 0, len, 0, len, 0, 0, 0); + *last_extent += len; } } } @@ -5307,7 +5312,6 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, { int ret = 0; struct dentry *old_parent = NULL; - struct btrfs_inode *orig_inode = inode; /* * for regular files, if its inode is already on disk, we don't @@ -5327,16 +5331,6 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, } while (1) { - /* - * If we are logging a directory then we start with our inode, - * not our parent's inode, so we need to skip setting the - * logged_trans so that further down in the log code we don't - * think this inode has already been logged. - */ - if (inode != orig_inode) - inode->logged_trans = trans->transid; - smp_mb(); - if (btrfs_must_commit_transaction(trans, inode)) { ret = 1; break; @@ -6065,7 +6059,6 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, * if this directory was already logged any new * names for this file/dir will get recorded */ - smp_mb(); if (dir->logged_trans == trans->transid) return; diff --git a/fs/ceph/super.c b/fs/ceph/super.c index eab1359d05532a..c5cf46e43f2e70 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -819,6 +819,12 @@ static void ceph_umount_begin(struct super_block *sb) return; } +static int ceph_remount(struct super_block *sb, int *flags, char *data) +{ + sync_filesystem(sb); + return 0; +} + static const struct super_operations ceph_super_ops = { .alloc_inode = ceph_alloc_inode, .destroy_inode = ceph_destroy_inode, @@ -826,6 +832,7 @@ static const struct super_operations ceph_super_ops = { .drop_inode = ceph_drop_inode, .sync_fs = ceph_sync_fs, .put_super = ceph_put_super, + .remount_fs = ceph_remount, .show_options = ceph_show_options, .statfs = ceph_statfs, .umount_begin = ceph_umount_begin, diff --git a/fs/char_dev.c b/fs/char_dev.c index a279c58fe36062..8a63cfa2900533 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -159,6 +159,12 @@ __register_chrdev_region(unsigned int major, unsigned int baseminor, ret = -EBUSY; goto out; } + + if (new_min < old_min && new_max > old_max) { + ret = -EBUSY; + goto out; + } + } cd->next = *cp; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index d6b45682833bea..23cee91ed442e6 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -2988,7 +2988,9 @@ cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages) } if (rc) { - for (i = 0; i < nr_pages; i++) { + unsigned int nr_page_failed = i; + + for (i = 0; i < nr_page_failed; i++) { put_page(rdata->pages[i]); rdata->pages[i] = NULL; } diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c index 18814f1d67d9b1..3c0bad577859b3 100644 --- a/fs/cifs/smb2maperror.c +++ b/fs/cifs/smb2maperror.c @@ -457,7 +457,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = { {STATUS_FILE_INVALID, -EIO, "STATUS_FILE_INVALID"}, {STATUS_ALLOTTED_SPACE_EXCEEDED, -EIO, "STATUS_ALLOTTED_SPACE_EXCEEDED"}, - {STATUS_INSUFFICIENT_RESOURCES, -EREMOTEIO, + {STATUS_INSUFFICIENT_RESOURCES, -EAGAIN, "STATUS_INSUFFICIENT_RESOURCES"}, {STATUS_DFS_EXIT_PATH_FOUND, -EIO, "STATUS_DFS_EXIT_PATH_FOUND"}, {STATUS_DEVICE_DATA_ERROR, -EIO, "STATUS_DEVICE_DATA_ERROR"}, diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 2001184afe70b6..0ccf8f9b63a2e7 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -2348,26 +2348,28 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, unsigned int epoch, bool *purge_cache) { char message[5] = {0}; + unsigned int new_oplock = 0; oplock &= 0xFF; if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE) return; - cinode->oplock = 0; if (oplock & SMB2_LEASE_READ_CACHING_HE) { - cinode->oplock |= CIFS_CACHE_READ_FLG; + new_oplock |= CIFS_CACHE_READ_FLG; strcat(message, "R"); } if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) { - cinode->oplock |= CIFS_CACHE_HANDLE_FLG; + new_oplock |= CIFS_CACHE_HANDLE_FLG; strcat(message, "H"); } if (oplock & SMB2_LEASE_WRITE_CACHING_HE) { - cinode->oplock |= CIFS_CACHE_WRITE_FLG; + new_oplock |= CIFS_CACHE_WRITE_FLG; strcat(message, "W"); } - if (!cinode->oplock) - strcat(message, "None"); + if (!new_oplock) + strncpy(message, "None", sizeof(message)); + + cinode->oplock = new_oplock; cifs_dbg(FYI, "%s Lease granted on inode %p\n", message, &cinode->vfs_inode); } diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 33afb637e6f891..c181f1621e1af0 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -887,7 +887,8 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) * not supported error. Client should accept it. */ cifs_dbg(VFS, "Server does not support validate negotiate\n"); - return 0; + rc = 0; + goto out_free_inbuf; } else if (rc != 0) { cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc); rc = -EIO; diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 39843fa7e11b07..809c1edffbaf16 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -58,15 +58,13 @@ static void configfs_d_iput(struct dentry * dentry, if (sd) { /* Coordinate with configfs_readdir */ spin_lock(&configfs_dirent_lock); - /* Coordinate with configfs_attach_attr where will increase - * sd->s_count and update sd->s_dentry to new allocated one. - * Only set sd->dentry to null when this dentry is the only - * sd owner. - * If not do so, configfs_d_iput may run just after - * configfs_attach_attr and set sd->s_dentry to null - * even it's still in use. + /* + * Set sd->s_dentry to null only when this dentry is the one + * that is going to be killed. Otherwise configfs_d_iput may + * run just after configfs_attach_attr and set sd->s_dentry to + * NULL even it's still in use. */ - if (atomic_read(&sd->s_count) <= 2) + if (sd->s_dentry == dentry) sd->s_dentry = NULL; spin_unlock(&configfs_dirent_lock); @@ -1755,12 +1753,19 @@ int configfs_register_group(struct config_group *parent_group, inode_lock_nested(d_inode(parent), I_MUTEX_PARENT); ret = create_default_group(parent_group, group); - if (!ret) { - spin_lock(&configfs_dirent_lock); - configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata); - spin_unlock(&configfs_dirent_lock); - } + if (ret) + goto err_out; + + spin_lock(&configfs_dirent_lock); + configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata); + spin_unlock(&configfs_dirent_lock); + inode_unlock(d_inode(parent)); + return 0; +err_out: inode_unlock(d_inode(parent)); + mutex_lock(&subsys->su_mutex); + unlink_group(group); + mutex_unlock(&subsys->su_mutex); return ret; } EXPORT_SYMBOL(configfs_register_group); diff --git a/fs/dax.c b/fs/dax.c index 09fa70683c41ac..75a289c31c7e5d 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -908,7 +908,7 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, goto unlock_pmd; flush_cache_page(vma, address, pfn); - pmd = pmdp_huge_clear_flush(vma, address, pmdp); + pmd = pmdp_invalidate(vma, address, pmdp); pmd = pmd_wrprotect(pmd); pmd = pmd_mkclean(pmd); set_pmd_at(vma->vm_mm, address, pmdp, pmd); @@ -1660,8 +1660,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, } trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); - result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn, - write); + result = vmf_insert_pfn_pmd(vmf, pfn, write); break; case IOMAP_UNWRITTEN: case IOMAP_HOLE: @@ -1775,8 +1774,7 @@ static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf, break; #ifdef CONFIG_FS_DAX_PMD case PE_SIZE_PMD: - ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, - pfn, true); + ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE); break; #endif default: diff --git a/fs/dcache.c b/fs/dcache.c index cb515f183482a2..6e0022326afe32 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -344,7 +344,7 @@ static void dentry_free(struct dentry *dentry) } } /* if dentry was never visible to RCU, immediate free is OK */ - if (!(dentry->d_flags & DCACHE_RCUACCESS)) + if (dentry->d_flags & DCACHE_NORCU) __d_free(&dentry->d_u.d_rcu); else call_rcu(&dentry->d_u.d_rcu, __d_free); @@ -1694,7 +1694,6 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) struct dentry *dentry = __d_alloc(parent->d_sb, name); if (!dentry) return NULL; - dentry->d_flags |= DCACHE_RCUACCESS; spin_lock(&parent->d_lock); /* * don't need child lock because it is not subject @@ -1719,7 +1718,7 @@ struct dentry *d_alloc_cursor(struct dentry * parent) { struct dentry *dentry = d_alloc_anon(parent->d_sb); if (dentry) { - dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR; + dentry->d_flags |= DCACHE_DENTRY_CURSOR; dentry->d_parent = dget(parent); } return dentry; @@ -1732,10 +1731,17 @@ struct dentry *d_alloc_cursor(struct dentry * parent) * * For a filesystem that just pins its dentries in memory and never * performs lookups at all, return an unhashed IS_ROOT dentry. + * This is used for pipes, sockets et.al. - the stuff that should + * never be anyone's children or parents. Unlike all other + * dentries, these will not have RCU delay between dropping the + * last reference and freeing them. */ struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name) { - return __d_alloc(sb, name); + struct dentry *dentry = __d_alloc(sb, name); + if (likely(dentry)) + dentry->d_flags |= DCACHE_NORCU; + return dentry; } EXPORT_SYMBOL(d_alloc_pseudo); @@ -1899,12 +1905,10 @@ struct dentry *d_make_root(struct inode *root_inode) if (root_inode) { res = d_alloc_anon(root_inode->i_sb); - if (res) { - res->d_flags |= DCACHE_RCUACCESS; + if (res) d_instantiate(res, root_inode); - } else { + else iput(root_inode); - } } return res; } @@ -2769,9 +2773,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target, copy_name(dentry, target); target->d_hash.pprev = NULL; dentry->d_parent->d_lockref.count++; - if (dentry == old_parent) - dentry->d_flags |= DCACHE_RCUACCESS; - else + if (dentry != old_parent) /* wasn't IS_ROOT */ WARN_ON(!--old_parent->d_lockref.count); } else { target->d_parent = old_parent; diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 2ddf7833350dd1..1ee51d3a978ad6 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1670,6 +1670,8 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei) #define EXT4_FEATURE_INCOMPAT_INLINE_DATA 0x8000 /* data in inode */ #define EXT4_FEATURE_INCOMPAT_ENCRYPT 0x10000 +extern void ext4_update_dynamic_rev(struct super_block *sb); + #define EXT4_FEATURE_COMPAT_FUNCS(name, flagname) \ static inline bool ext4_has_feature_##name(struct super_block *sb) \ { \ @@ -1678,6 +1680,7 @@ static inline bool ext4_has_feature_##name(struct super_block *sb) \ } \ static inline void ext4_set_feature_##name(struct super_block *sb) \ { \ + ext4_update_dynamic_rev(sb); \ EXT4_SB(sb)->s_es->s_feature_compat |= \ cpu_to_le32(EXT4_FEATURE_COMPAT_##flagname); \ } \ @@ -1695,6 +1698,7 @@ static inline bool ext4_has_feature_##name(struct super_block *sb) \ } \ static inline void ext4_set_feature_##name(struct super_block *sb) \ { \ + ext4_update_dynamic_rev(sb); \ EXT4_SB(sb)->s_es->s_feature_ro_compat |= \ cpu_to_le32(EXT4_FEATURE_RO_COMPAT_##flagname); \ } \ @@ -1712,6 +1716,7 @@ static inline bool ext4_has_feature_##name(struct super_block *sb) \ } \ static inline void ext4_set_feature_##name(struct super_block *sb) \ { \ + ext4_update_dynamic_rev(sb); \ EXT4_SB(sb)->s_es->s_feature_incompat |= \ cpu_to_le32(EXT4_FEATURE_INCOMPAT_##flagname); \ } \ @@ -2679,7 +2684,6 @@ do { \ #endif -extern void ext4_update_dynamic_rev(struct super_block *sb); extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb, __u32 compat); extern int ext4_update_rocompat_feature(handle_t *handle, diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 72a361d5ef7477..45aea792d22a04 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -1035,6 +1035,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, __le32 border; ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ int err = 0; + size_t ext_size = 0; /* make decision: where to split? */ /* FIXME: now decision is simplest: at current extent */ @@ -1126,6 +1127,10 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, le16_add_cpu(&neh->eh_entries, m); } + /* zero out unused area in the extent block */ + ext_size = sizeof(struct ext4_extent_header) + + sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries); + memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); ext4_extent_block_csum_set(inode, neh); set_buffer_uptodate(bh); unlock_buffer(bh); @@ -1205,6 +1210,11 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, sizeof(struct ext4_extent_idx) * m); le16_add_cpu(&neh->eh_entries, m); } + /* zero out unused area in the extent block */ + ext_size = sizeof(struct ext4_extent_header) + + (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries)); + memset(bh->b_data + ext_size, 0, + inode->i_sb->s_blocksize - ext_size); ext4_extent_block_csum_set(inode, neh); set_buffer_uptodate(bh); unlock_buffer(bh); @@ -1270,6 +1280,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, ext4_fsblk_t newblock, goal = 0; struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; int err = 0; + size_t ext_size = 0; /* Try to prepend new index to old one */ if (ext_depth(inode)) @@ -1295,9 +1306,11 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, goto out; } + ext_size = sizeof(EXT4_I(inode)->i_data); /* move top-level index/leaf into new block */ - memmove(bh->b_data, EXT4_I(inode)->i_data, - sizeof(EXT4_I(inode)->i_data)); + memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size); + /* zero out unused area in the extent block */ + memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); /* set size of new block */ neh = ext_block_hdr(bh); diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 98ec11f69cd4d0..2c5baa5e829116 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -264,6 +264,13 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) } ret = __generic_file_write_iter(iocb, from); + /* + * Unaligned direct AIO must be the only IO in flight. Otherwise + * overlapping aligned IO after unaligned might result in data + * corruption. + */ + if (ret == -EIOCBQUEUED && unaligned_aio) + ext4_unwritten_wait(inode); inode_unlock(inode); if (ret > 0) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 2c43c5b92229d5..05dc5a4ba481b8 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -5320,7 +5320,6 @@ static int ext4_do_update_inode(handle_t *handle, err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); if (err) goto out_brelse; - ext4_update_dynamic_rev(sb); ext4_set_feature_large_file(sb); ext4_handle_sync(handle); err = ext4_handle_dirty_super(handle, sb); @@ -5597,25 +5596,22 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) up_write(&EXT4_I(inode)->i_data_sem); ext4_journal_stop(handle); if (error) { - if (orphan) + if (orphan && inode->i_nlink) ext4_orphan_del(NULL, inode); goto err_out; } } - if (!shrink) + if (!shrink) { pagecache_isize_extended(inode, oldsize, inode->i_size); - - /* - * Blocks are going to be removed from the inode. Wait - * for dio in flight. Temporarily disable - * dioread_nolock to prevent livelock. - */ - if (orphan) { - if (!ext4_should_journal_data(inode)) { - inode_dio_wait(inode); - } else - ext4_wait_for_tail_page_commit(inode); + } else { + /* + * Blocks are going to be removed from the inode. Wait + * for dio in flight. + */ + inode_dio_wait(inode); } + if (orphan && ext4_should_journal_data(inode)) + ext4_wait_for_tail_page_commit(inode); down_write(&EXT4_I(inode)->i_mmap_sem); rc = ext4_break_layouts(inode); @@ -5971,7 +5967,7 @@ int ext4_expand_extra_isize(struct inode *inode, ext4_write_lock_xattr(inode, &no_expand); - BUFFER_TRACE(iloc.bh, "get_write_access"); + BUFFER_TRACE(iloc->bh, "get_write_access"); error = ext4_journal_get_write_access(handle, iloc->bh); if (error) { brelse(iloc->bh); diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 5f24fdc140add9..53d57cdf3c4d88 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -977,7 +977,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (err == 0) err = err2; mnt_drop_write_file(filp); - if (!err && (o_group > EXT4_SB(sb)->s_groups_count) && + if (!err && (o_group < EXT4_SB(sb)->s_groups_count) && ext4_has_group_desc_csum(sb) && test_opt(sb, INIT_INODE_TABLE)) err = ext4_register_li_request(sb, o_group); diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index e29fce2fbf25f2..cc229f3357f7e6 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -1539,7 +1539,7 @@ static int mb_find_extent(struct ext4_buddy *e4b, int block, ex->fe_len += 1 << order; } - if (ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))) { + if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) { /* Should never happen! (but apparently sometimes does?!?) */ WARN_ON(1); ext4_error(e4b->bd_sb, "corruption or bug in mb_find_extent " diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 4f8de2b9e87e33..4c5aa5df657314 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -871,12 +871,15 @@ static void dx_release(struct dx_frame *frames) { struct dx_root_info *info; int i; + unsigned int indirect_levels; if (frames[0].bh == NULL) return; info = &((struct dx_root *)frames[0].bh->b_data)->info; - for (i = 0; i <= info->indirect_levels; i++) { + /* save local copy, "info" may be freed after brelse() */ + indirect_levels = info->indirect_levels; + for (i = 0; i <= indirect_levels; i++) { if (frames[i].bh == NULL) break; brelse(frames[i].bh); diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index e7ae26e36c9c11..4d5c0fc9d23a78 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -874,6 +874,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); if (unlikely(err)) { ext4_std_error(sb, err); + iloc.bh = NULL; goto errout; } brelse(dind); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index abba7ece78e94c..a270391228af72 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -698,7 +698,7 @@ void __ext4_abort(struct super_block *sb, const char *function, jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO); save_error_info(sb, function, line); } - if (test_opt(sb, ERRORS_PANIC)) { + if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) { if (EXT4_SB(sb)->s_journal && !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR)) return; @@ -2259,7 +2259,6 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es, es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT); le16_add_cpu(&es->s_mnt_count, 1); ext4_update_tstamp(es, s_mtime); - ext4_update_dynamic_rev(sb); if (sbi->s_journal) ext4_set_feature_journal_needs_recovery(sb); @@ -3514,6 +3513,37 @@ int ext4_calculate_overhead(struct super_block *sb) return 0; } +static void ext4_clamp_want_extra_isize(struct super_block *sb) +{ + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct ext4_super_block *es = sbi->s_es; + + /* determine the minimum size of new large inodes, if present */ + if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE && + sbi->s_want_extra_isize == 0) { + sbi->s_want_extra_isize = sizeof(struct ext4_inode) - + EXT4_GOOD_OLD_INODE_SIZE; + if (ext4_has_feature_extra_isize(sb)) { + if (sbi->s_want_extra_isize < + le16_to_cpu(es->s_want_extra_isize)) + sbi->s_want_extra_isize = + le16_to_cpu(es->s_want_extra_isize); + if (sbi->s_want_extra_isize < + le16_to_cpu(es->s_min_extra_isize)) + sbi->s_want_extra_isize = + le16_to_cpu(es->s_min_extra_isize); + } + } + /* Check if enough inode space is available */ + if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize > + sbi->s_inode_size) { + sbi->s_want_extra_isize = sizeof(struct ext4_inode) - + EXT4_GOOD_OLD_INODE_SIZE; + ext4_msg(sb, KERN_INFO, + "required extra inode space not available"); + } +} + static void ext4_set_resv_clusters(struct super_block *sb) { ext4_fsblk_t resv_clusters; @@ -4239,7 +4269,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) "data=, fs mounted w/o journal"); goto failed_mount_wq; } - sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM; + sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM; clear_opt(sb, JOURNAL_CHECKSUM); clear_opt(sb, DATA_FLAGS); sbi->s_journal = NULL; @@ -4388,30 +4418,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) } else if (ret) goto failed_mount4a; - /* determine the minimum size of new large inodes, if present */ - if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE && - sbi->s_want_extra_isize == 0) { - sbi->s_want_extra_isize = sizeof(struct ext4_inode) - - EXT4_GOOD_OLD_INODE_SIZE; - if (ext4_has_feature_extra_isize(sb)) { - if (sbi->s_want_extra_isize < - le16_to_cpu(es->s_want_extra_isize)) - sbi->s_want_extra_isize = - le16_to_cpu(es->s_want_extra_isize); - if (sbi->s_want_extra_isize < - le16_to_cpu(es->s_min_extra_isize)) - sbi->s_want_extra_isize = - le16_to_cpu(es->s_min_extra_isize); - } - } - /* Check if enough inode space is available */ - if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize > - sbi->s_inode_size) { - sbi->s_want_extra_isize = sizeof(struct ext4_inode) - - EXT4_GOOD_OLD_INODE_SIZE; - ext4_msg(sb, KERN_INFO, "required extra inode space not" - "available"); - } + ext4_clamp_want_extra_isize(sb); ext4_set_resv_clusters(sb); @@ -5197,6 +5204,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) goto restore_opts; } + ext4_clamp_want_extra_isize(sb); + if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^ test_opt(sb, JOURNAL_CHECKSUM)) { ext4_msg(sb, KERN_ERR, "changing journal_checksum " diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 006c277dc22e78..f73fc90e5daa32 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1700,7 +1700,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, /* No failures allowed past this point. */ - if (!s->not_found && here->e_value_size && here->e_value_offs) { + if (!s->not_found && here->e_value_size && !here->e_value_inum) { /* Remove the old value. */ void *first_val = s->base + min_offs; size_t offs = le16_to_cpu(here->e_value_offs); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 08314fb426526e..4d02e76b648a21 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -197,12 +197,14 @@ struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, struct block_device *bdev = sbi->sb->s_bdev; int i; - for (i = 0; i < sbi->s_ndevs; i++) { - if (FDEV(i).start_blk <= blk_addr && - FDEV(i).end_blk >= blk_addr) { - blk_addr -= FDEV(i).start_blk; - bdev = FDEV(i).bdev; - break; + if (f2fs_is_multi_device(sbi)) { + for (i = 0; i < sbi->s_ndevs; i++) { + if (FDEV(i).start_blk <= blk_addr && + FDEV(i).end_blk >= blk_addr) { + blk_addr -= FDEV(i).start_blk; + bdev = FDEV(i).bdev; + break; + } } } if (bio) { @@ -216,6 +218,9 @@ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr) { int i; + if (!f2fs_is_multi_device(sbi)) + return 0; + for (i = 0; i < sbi->s_ndevs; i++) if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr) return i; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 1f5d5f62bb77de..44ea7ac69ef48b 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1336,6 +1336,17 @@ static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) } #endif +/* + * Test if the mounted volume is a multi-device volume. + * - For a single regular disk volume, sbi->s_ndevs is 0. + * - For a single zoned disk volume, sbi->s_ndevs is 1. + * - For a multi-device volume, sbi->s_ndevs is always 2 or more. + */ +static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi) +{ + return sbi->s_ndevs > 1; +} + /* For write statistics. Suppose sector size is 512 bytes, * and the return value is in kbytes. s is of struct f2fs_sb_info. */ @@ -1733,6 +1744,7 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, return -ENOSPC; } +void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...); static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, struct inode *inode, block_t count) @@ -1741,13 +1753,21 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, spin_lock(&sbi->stat_lock); f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); - f2fs_bug_on(sbi, inode->i_blocks < sectors); sbi->total_valid_block_count -= (block_t)count; if (sbi->reserved_blocks && sbi->current_reserved_blocks < sbi->reserved_blocks) sbi->current_reserved_blocks = min(sbi->reserved_blocks, sbi->current_reserved_blocks + count); spin_unlock(&sbi->stat_lock); + if (unlikely(inode->i_blocks < sectors)) { + f2fs_msg(sbi->sb, KERN_WARNING, + "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", + inode->i_ino, + (unsigned long long)inode->i_blocks, + (unsigned long long)sectors); + set_sbi_flag(sbi, SBI_NEED_FSCK); + return; + } f2fs_i_blocks_write(inode, count, false, true); } @@ -2477,7 +2497,9 @@ static inline void *inline_xattr_addr(struct inode *inode, struct page *page) static inline int inline_xattr_size(struct inode *inode) { - return get_inline_xattr_addrs(inode) * sizeof(__le32); + if (f2fs_has_inline_xattr(inode)) + return get_inline_xattr_addrs(inode) * sizeof(__le32); + return 0; } static inline int f2fs_has_inline_data(struct inode *inode) @@ -2716,7 +2738,6 @@ static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi, bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type); -void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...); static inline void verify_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type) { @@ -3455,7 +3476,7 @@ static inline bool f2fs_force_buffered_io(struct inode *inode, int rw) { return (f2fs_post_read_required(inode) || (rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) || - F2FS_I_SB(inode)->s_ndevs); + f2fs_is_multi_device(F2FS_I_SB(inode))); } #ifdef CONFIG_F2FS_FAULT_INJECTION diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index b3f46e3bec1747..8d1eb8dec60582 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -2539,7 +2539,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg) sizeof(range))) return -EFAULT; - if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num || + if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num || sbi->segs_per_sec != 1) { f2fs_msg(sbi->sb, KERN_WARNING, "Can't flush %u in %d for segs_per_sec %u != 1\n", diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 5c8d0042223729..d44b57a363ff11 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -1256,7 +1256,7 @@ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi) sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES; /* give warm/cold data area from slower device */ - if (sbi->s_ndevs && sbi->segs_per_sec == 1) + if (f2fs_is_multi_device(sbi) && sbi->segs_per_sec == 1) SIT_I(sbi)->last_victim[ALLOC_NEXT] = GET_SEGNO(sbi, FDEV(0).end_blk) + 1; } diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index dd608b819a3c33..0f31df01e36c60 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -179,8 +179,8 @@ bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page) if (provided != calculated) f2fs_msg(sbi->sb, KERN_WARNING, - "checksum invalid, ino = %x, %x vs. %x", - ino_of_node(page), provided, calculated); + "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x", + page->index, ino_of_node(page), provided, calculated); return provided == calculated; } @@ -476,6 +476,7 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) return inode; bad_inode: + f2fs_inode_synced(inode); iget_failed(inode); trace_f2fs_iget_exit(inode, ret); return ERR_PTR(ret); diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 19a0d83aae6537..e2d9edad758cdf 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1180,8 +1180,14 @@ int f2fs_remove_inode_page(struct inode *inode) f2fs_put_dnode(&dn); return -EIO; } - f2fs_bug_on(F2FS_I_SB(inode), - inode->i_blocks != 0 && inode->i_blocks != 8); + + if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) { + f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING, + "Inconsistent i_blocks, ino:%lu, iblocks:%llu", + inode->i_ino, + (unsigned long long)inode->i_blocks); + set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); + } /* will put inode & node pages */ err = truncate_node(&dn); @@ -1276,9 +1282,10 @@ static int read_node_page(struct page *page, int op_flags) int err; if (PageUptodate(page)) { -#ifdef CONFIG_F2FS_CHECK_FS - f2fs_bug_on(sbi, !f2fs_inode_chksum_verify(sbi, page)); -#endif + if (!f2fs_inode_chksum_verify(sbi, page)) { + ClearPageUptodate(page); + return -EBADMSG; + } return LOCKED_PAGE; } @@ -2073,6 +2080,9 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, if (unlikely(nid == 0)) return false; + if (unlikely(f2fs_check_nid_range(sbi, nid))) + return false; + i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); i->nid = nid; i->state = FREE_NID; diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index ae0e5f2e67b4ae..bf5c5f4fa77ea2 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -485,7 +485,15 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, goto err; f2fs_bug_on(sbi, ni.ino != ino_of_node(page)); - f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page)); + + if (ofs_of_node(dn.node_page) != ofs_of_node(page)) { + f2fs_msg(sbi->sb, KERN_WARNING, + "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u", + inode->i_ino, ofs_of_node(dn.node_page), + ofs_of_node(page)); + err = -EFAULT; + goto err; + } for (; start < end; start++, dn.ofs_in_node++) { block_t src, dest; diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index ac038563273deb..8fc3edb6760c2f 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -574,7 +574,7 @@ static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino) int ret = 0; int i; - if (!sbi->s_ndevs) + if (!f2fs_is_multi_device(sbi)) return __submit_flush_wait(sbi, sbi->sb->s_bdev); for (i = 0; i < sbi->s_ndevs; i++) { @@ -640,7 +640,8 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino) return ret; } - if (atomic_inc_return(&fcc->issing_flush) == 1 || sbi->s_ndevs > 1) { + if (atomic_inc_return(&fcc->issing_flush) == 1 || + f2fs_is_multi_device(sbi)) { ret = submit_flush_wait(sbi, ino); atomic_dec(&fcc->issing_flush); @@ -746,7 +747,7 @@ int f2fs_flush_device_cache(struct f2fs_sb_info *sbi) { int ret = 0, i; - if (!sbi->s_ndevs) + if (!f2fs_is_multi_device(sbi)) return 0; for (i = 1; i < sbi->s_ndevs; i++) { @@ -1289,7 +1290,7 @@ static int __queue_discard_cmd(struct f2fs_sb_info *sbi, trace_f2fs_queue_discard(bdev, blkstart, blklen); - if (sbi->s_ndevs) { + if (f2fs_is_multi_device(sbi)) { int devi = f2fs_target_device_index(sbi, blkstart); blkstart -= FDEV(devi).start_blk; @@ -1638,7 +1639,7 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi, block_t lblkstart = blkstart; int devi = 0; - if (sbi->s_ndevs) { + if (f2fs_is_multi_device(sbi)) { devi = f2fs_target_device_index(sbi, blkstart); blkstart -= FDEV(devi).start_blk; } @@ -2971,7 +2972,7 @@ static void update_device_state(struct f2fs_io_info *fio) struct f2fs_sb_info *sbi = fio->sbi; unsigned int devidx; - if (!sbi->s_ndevs) + if (!f2fs_is_multi_device(sbi)) return; devidx = f2fs_target_device_index(sbi, fio->new_blkaddr); @@ -3068,13 +3069,18 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio) { int err; struct f2fs_sb_info *sbi = fio->sbi; + unsigned int segno; fio->new_blkaddr = fio->old_blkaddr; /* i/o temperature is needed for passing down write hints */ __get_segment_type(fio); - f2fs_bug_on(sbi, !IS_DATASEG(get_seg_entry(sbi, - GET_SEGNO(sbi, fio->new_blkaddr))->type)); + segno = GET_SEGNO(sbi, fio->new_blkaddr); + + if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) { + set_sbi_flag(sbi, SBI_NEED_FSCK); + return -EFAULT; + } stat_inc_inplace_blocks(fio->sbi); diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index b3d9e317ff0c14..5079532cb176b7 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -660,7 +660,6 @@ static inline void verify_block_addr(struct f2fs_io_info *fio, block_t blk_addr) static inline int check_block_count(struct f2fs_sb_info *sbi, int segno, struct f2fs_sit_entry *raw_sit) { -#ifdef CONFIG_F2FS_CHECK_FS bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false; int valid_blocks = 0; int cur_pos = 0, next_pos; @@ -687,7 +686,7 @@ static inline int check_block_count(struct f2fs_sb_info *sbi, set_sbi_flag(sbi, SBI_NEED_FSCK); return -EINVAL; } -#endif + /* check segment usage, and check boundary of a given segment number */ if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg || segno > TOTAL_SEGS(sbi) - 1)) { diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index 409a637f7a926b..88e30f7cf9e14e 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -205,12 +205,17 @@ static inline const struct xattr_handler *f2fs_xattr_handler(int index) return handler; } -static struct f2fs_xattr_entry *__find_xattr(void *base_addr, int index, - size_t len, const char *name) +static struct f2fs_xattr_entry *__find_xattr(void *base_addr, + void *last_base_addr, int index, + size_t len, const char *name) { struct f2fs_xattr_entry *entry; list_for_each_xattr(entry, base_addr) { + if ((void *)(entry) + sizeof(__u32) > last_base_addr || + (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) + return NULL; + if (entry->e_name_index != index) continue; if (entry->e_name_len != len) @@ -300,20 +305,22 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage, const char *name, struct f2fs_xattr_entry **xe, void **base_addr, int *base_size) { - void *cur_addr, *txattr_addr, *last_addr = NULL; + void *cur_addr, *txattr_addr, *last_txattr_addr; + void *last_addr = NULL; nid_t xnid = F2FS_I(inode)->i_xattr_nid; - unsigned int size = xnid ? VALID_XATTR_BLOCK_SIZE : 0; unsigned int inline_size = inline_xattr_size(inode); int err = 0; - if (!size && !inline_size) + if (!xnid && !inline_size) return -ENODATA; - *base_size = inline_size + size + XATTR_PADDING_SIZE; + *base_size = XATTR_SIZE(xnid, inode) + XATTR_PADDING_SIZE; txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode), *base_size, GFP_NOFS); if (!txattr_addr) return -ENOMEM; + last_txattr_addr = (void *)txattr_addr + XATTR_SIZE(xnid, inode); + /* read from inline xattr */ if (inline_size) { err = read_inline_xattr(inode, ipage, txattr_addr); @@ -340,7 +347,11 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage, else cur_addr = txattr_addr; - *xe = __find_xattr(cur_addr, index, len, name); + *xe = __find_xattr(cur_addr, last_txattr_addr, index, len, name); + if (!*xe) { + err = -EFAULT; + goto out; + } check: if (IS_XATTR_LAST_ENTRY(*xe)) { err = -ENODATA; @@ -584,7 +595,8 @@ static int __f2fs_setxattr(struct inode *inode, int index, struct page *ipage, int flags) { struct f2fs_xattr_entry *here, *last; - void *base_addr; + void *base_addr, *last_base_addr; + nid_t xnid = F2FS_I(inode)->i_xattr_nid; int found, newsize; size_t len; __u32 new_hsize; @@ -608,8 +620,14 @@ static int __f2fs_setxattr(struct inode *inode, int index, if (error) return error; + last_base_addr = (void *)base_addr + XATTR_SIZE(xnid, inode); + /* find entry with wanted name. */ - here = __find_xattr(base_addr, index, len, name); + here = __find_xattr(base_addr, last_base_addr, index, len, name); + if (!here) { + error = -EFAULT; + goto exit; + } found = IS_XATTR_LAST_ENTRY(here) ? 0 : 1; diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h index dbcd1d16e66982..2a4ecaf338eac6 100644 --- a/fs/f2fs/xattr.h +++ b/fs/f2fs/xattr.h @@ -74,6 +74,8 @@ struct f2fs_xattr_entry { entry = XATTR_NEXT_ENTRY(entry)) #define VALID_XATTR_BLOCK_SIZE (PAGE_SIZE - sizeof(struct node_footer)) #define XATTR_PADDING_SIZE (sizeof(__u32)) +#define XATTR_SIZE(x,i) (((x) ? VALID_XATTR_BLOCK_SIZE : 0) + \ + (inline_xattr_size(i))) #define MIN_OFFSET(i) XATTR_ALIGN(inline_xattr_size(i) + \ VALID_XATTR_BLOCK_SIZE) diff --git a/fs/fat/file.c b/fs/fat/file.c index 4f3d72fb1e60d6..f86ea08bd6ce4c 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -193,12 +193,17 @@ static int fat_file_release(struct inode *inode, struct file *filp) int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { struct inode *inode = filp->f_mapping->host; - int res, err; + int err; + + err = __generic_file_fsync(filp, start, end, datasync); + if (err) + return err; - res = generic_file_fsync(filp, start, end, datasync); err = sync_mapping_buffers(MSDOS_SB(inode->i_sb)->fat_inode->i_mapping); + if (err) + return err; - return res ? res : err; + return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); } diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 82ce6d4f7e314d..9544e2f8b79ff8 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -530,8 +530,6 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id) isw->inode = inode; - atomic_inc(&isw_nr_in_flight); - /* * In addition to synchronizing among switchers, I_WB_SWITCH tells * the RCU protected stat update paths to grab the i_page @@ -539,6 +537,9 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id) * Let's continue after I_WB_SWITCH is guaranteed to be visible. */ call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn); + + atomic_inc(&isw_nr_in_flight); + goto out_unlock; out_free: @@ -908,7 +909,11 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi, void cgroup_writeback_umount(void) { if (atomic_read(&isw_nr_in_flight)) { - synchronize_rcu(); + /* + * Use rcu_barrier() to wait for all pending callbacks to + * ensure that all in-flight wb switches are in the workqueue. + */ + rcu_barrier(); flush_workqueue(isw_wq); } } diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 249de20f752a16..6ee471b72a34da 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1681,7 +1681,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, offset = outarg->offset & ~PAGE_MASK; file_size = i_size_read(inode); - num = outarg->size; + num = min(outarg->size, fc->max_write); if (outarg->offset > file_size) num = 0; else if (outarg->offset + num > file_size) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index bd500c3b7858c8..9a22aa580fe7a5 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -179,7 +179,9 @@ void fuse_finish_open(struct inode *inode, struct file *file) file->f_op = &fuse_direct_io_file_operations; if (!(ff->open_flags & FOPEN_KEEP_CACHE)) invalidate_inode_pages2(inode->i_mapping); - if (ff->open_flags & FOPEN_NONSEEKABLE) + if (ff->open_flags & FOPEN_STREAM) + stream_open(inode, file); + else if (ff->open_flags & FOPEN_NONSEEKABLE) nonseekable_open(inode, file); if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { struct fuse_inode *fi = get_fuse_inode(inode); @@ -1526,7 +1528,7 @@ __acquires(fc->lock) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); - size_t crop = i_size_read(inode); + loff_t crop = i_size_read(inode); struct fuse_req *req; while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) { @@ -2975,6 +2977,13 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, } } + if (!(mode & FALLOC_FL_KEEP_SIZE) && + offset + length > i_size_read(inode)) { + err = inode_newsize_ok(inode, offset + length); + if (err) + goto out; + } + if (!(mode & FALLOC_FL_KEEP_SIZE)) set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 9d566e62684c27..ccdd8c821abd72 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -140,6 +140,7 @@ void gfs2_glock_free(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + BUG_ON(atomic_read(&gl->gl_revokes)); rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); smp_mb(); wake_up_glock(gl); @@ -183,15 +184,19 @@ static int demote_ok(const struct gfs2_glock *gl) void gfs2_glock_add_to_lru(struct gfs2_glock *gl) { + if (!(gl->gl_ops->go_flags & GLOF_LRU)) + return; + spin_lock(&lru_lock); - if (!list_empty(&gl->gl_lru)) - list_del_init(&gl->gl_lru); - else + list_del(&gl->gl_lru); + list_add_tail(&gl->gl_lru, &lru_list); + + if (!test_bit(GLF_LRU, &gl->gl_flags)) { + set_bit(GLF_LRU, &gl->gl_flags); atomic_inc(&lru_count); + } - list_add_tail(&gl->gl_lru, &lru_list); - set_bit(GLF_LRU, &gl->gl_flags); spin_unlock(&lru_lock); } @@ -201,7 +206,7 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) return; spin_lock(&lru_lock); - if (!list_empty(&gl->gl_lru)) { + if (test_bit(GLF_LRU, &gl->gl_flags)) { list_del_init(&gl->gl_lru); atomic_dec(&lru_count); clear_bit(GLF_LRU, &gl->gl_flags); @@ -1158,8 +1163,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh) !test_bit(GLF_DEMOTE, &gl->gl_flags)) fast_path = 1; } - if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) && - (glops->go_flags & GLOF_LRU)) + if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl)) gfs2_glock_add_to_lru(gl); trace_gfs2_glock_queue(gh, 0); @@ -1455,6 +1459,7 @@ __acquires(&lru_lock) if (!spin_trylock(&gl->gl_lockref.lock)) { add_back_to_lru: list_add(&gl->gl_lru, &lru_list); + set_bit(GLF_LRU, &gl->gl_flags); atomic_inc(&lru_count); continue; } @@ -1462,7 +1467,6 @@ __acquires(&lru_lock) spin_unlock(&gl->gl_lockref.lock); goto add_back_to_lru; } - clear_bit(GLF_LRU, &gl->gl_flags); gl->gl_lockref.count++; if (demote_ok(gl)) handle_callback(gl, LM_ST_UNLOCKED, 0, false); @@ -1497,6 +1501,7 @@ static long gfs2_scan_glock_lru(int nr) if (!test_bit(GLF_LOCK, &gl->gl_flags)) { list_move(&gl->gl_lru, &dispose); atomic_dec(&lru_count); + clear_bit(GLF_LRU, &gl->gl_flags); freed++; continue; } diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index ac7caa267ed609..62edf8f5615fad 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -31,9 +31,10 @@ * @delta is the difference between the current rtt sample and the * running average srtt. We add 1/8 of that to the srtt in order to * update the current srtt estimate. The variance estimate is a bit - * more complicated. We subtract the abs value of the @delta from - * the current variance estimate and add 1/4 of that to the running - * total. + * more complicated. We subtract the current variance estimate from + * the abs value of the @delta and add 1/4 of that to the running + * total. That's equivalent to 3/4 of the current variance + * estimate plus 1/4 of the abs of @delta. * * Note that the index points at the array entry containing the smoothed * mean value, and the variance is always in the following entry @@ -49,7 +50,7 @@ static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index, s64 delta = sample - s->stats[index]; s->stats[index] += (delta >> 3); index++; - s->stats[index] += ((abs(delta) - s->stats[index]) >> 2); + s->stats[index] += (s64)(abs(delta) - s->stats[index]) >> 2; } /** diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index ee20ea42e7b598..cd85092723dea4 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -604,7 +604,8 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) bd->bd_bh = NULL; bd->bd_ops = &gfs2_revoke_lops; sdp->sd_log_num_revoke++; - atomic_inc(&gl->gl_revokes); + if (atomic_inc_return(&gl->gl_revokes) == 1) + gfs2_glock_hold(gl); set_bit(GLF_LFLUSH, &gl->gl_flags); list_add(&bd->bd_list, &sdp->sd_log_le_revoke); } diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index f2567f958d002c..8f99b395d7bf63 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -662,8 +662,10 @@ static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) bd = list_entry(head->next, struct gfs2_bufdata, bd_list); list_del_init(&bd->bd_list); gl = bd->bd_gl; - atomic_dec(&gl->gl_revokes); - clear_bit(GLF_LFLUSH, &gl->gl_flags); + if (atomic_dec_return(&gl->gl_revokes) == 0) { + clear_bit(GLF_LFLUSH, &gl->gl_flags); + gfs2_glock_queue_put(gl); + } kmem_cache_free(gfs2_bufdata_cachep, bd); } } diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index a3a3d256fb0e43..7a24f91af29e43 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -426,9 +426,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, u32 hash; index = page->index; - hash = hugetlb_fault_mutex_hash(h, current->mm, - &pseudo_vma, - mapping, index, 0); + hash = hugetlb_fault_mutex_hash(h, mapping, index, 0); mutex_lock(&hugetlb_fault_mutex_table[hash]); /* @@ -625,8 +623,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, addr = index * hpage_size; /* mutex taken here, fault path and hole punch */ - hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping, - index, addr); + hash = hugetlb_fault_mutex_hash(h, mapping, index, addr); mutex_lock(&hugetlb_fault_mutex_table[hash]); /* See if already present in mapping to avoid alloc/free */ diff --git a/fs/inode.c b/fs/inode.c index 42f6d25f32a520..5c63693326bb43 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1817,8 +1817,13 @@ int file_remove_privs(struct file *file) int kill; int error = 0; - /* Fast path for nothing security related */ - if (IS_NOSEC(inode)) + /* + * Fast path for nothing security related. + * As well for non-regular files, e.g. blkdev inodes. + * For example, blkdev_write_iter() might get here + * trying to remove privs which it is not allowed to. + */ + if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode)) return 0; kill = dentry_needs_remove_privs(dentry); diff --git a/fs/internal.h b/fs/internal.h index d410186bc369f9..d109665b9e50f4 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -80,9 +80,7 @@ extern int sb_prepare_remount_readonly(struct super_block *); extern void __init mnt_init(void); -extern int __mnt_want_write(struct vfsmount *); extern int __mnt_want_write_file(struct file *); -extern void __mnt_drop_write(struct vfsmount *); extern void __mnt_drop_write_file(struct file *); /* diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 88f2a49338a10c..e9cf88f0bc2917 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1366,6 +1366,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags) journal_superblock_t *sb = journal->j_superblock; int ret; + /* Buffer got discarded which means block device got invalidated */ + if (!buffer_mapped(bh)) + return -EIO; + trace_jbd2_write_superblock(journal, write_flags); if (!(journal->j_flags & JBD2_BARRIER)) write_flags &= ~(REQ_FUA | REQ_PREFLUSH); @@ -2385,22 +2389,19 @@ static struct kmem_cache *jbd2_journal_head_cache; static atomic_t nr_journal_heads = ATOMIC_INIT(0); #endif -static int jbd2_journal_init_journal_head_cache(void) +static int __init jbd2_journal_init_journal_head_cache(void) { - int retval; - - J_ASSERT(jbd2_journal_head_cache == NULL); + J_ASSERT(!jbd2_journal_head_cache); jbd2_journal_head_cache = kmem_cache_create("jbd2_journal_head", sizeof(struct journal_head), 0, /* offset */ SLAB_TEMPORARY | SLAB_TYPESAFE_BY_RCU, NULL); /* ctor */ - retval = 0; if (!jbd2_journal_head_cache) { - retval = -ENOMEM; printk(KERN_EMERG "JBD2: no memory for journal_head cache\n"); + return -ENOMEM; } - return retval; + return 0; } static void jbd2_journal_destroy_journal_head_cache(void) @@ -2646,28 +2647,38 @@ static void __exit jbd2_remove_jbd_stats_proc_entry(void) struct kmem_cache *jbd2_handle_cache, *jbd2_inode_cache; +static int __init jbd2_journal_init_inode_cache(void) +{ + J_ASSERT(!jbd2_inode_cache); + jbd2_inode_cache = KMEM_CACHE(jbd2_inode, 0); + if (!jbd2_inode_cache) { + pr_emerg("JBD2: failed to create inode cache\n"); + return -ENOMEM; + } + return 0; +} + static int __init jbd2_journal_init_handle_cache(void) { + J_ASSERT(!jbd2_handle_cache); jbd2_handle_cache = KMEM_CACHE(jbd2_journal_handle, SLAB_TEMPORARY); - if (jbd2_handle_cache == NULL) { + if (!jbd2_handle_cache) { printk(KERN_EMERG "JBD2: failed to create handle cache\n"); return -ENOMEM; } - jbd2_inode_cache = KMEM_CACHE(jbd2_inode, 0); - if (jbd2_inode_cache == NULL) { - printk(KERN_EMERG "JBD2: failed to create inode cache\n"); - kmem_cache_destroy(jbd2_handle_cache); - return -ENOMEM; - } return 0; } +static void jbd2_journal_destroy_inode_cache(void) +{ + kmem_cache_destroy(jbd2_inode_cache); + jbd2_inode_cache = NULL; +} + static void jbd2_journal_destroy_handle_cache(void) { kmem_cache_destroy(jbd2_handle_cache); jbd2_handle_cache = NULL; - kmem_cache_destroy(jbd2_inode_cache); - jbd2_inode_cache = NULL; } /* @@ -2678,11 +2689,15 @@ static int __init journal_init_caches(void) { int ret; - ret = jbd2_journal_init_revoke_caches(); + ret = jbd2_journal_init_revoke_record_cache(); + if (ret == 0) + ret = jbd2_journal_init_revoke_table_cache(); if (ret == 0) ret = jbd2_journal_init_journal_head_cache(); if (ret == 0) ret = jbd2_journal_init_handle_cache(); + if (ret == 0) + ret = jbd2_journal_init_inode_cache(); if (ret == 0) ret = jbd2_journal_init_transaction_cache(); return ret; @@ -2690,9 +2705,11 @@ static int __init journal_init_caches(void) static void jbd2_journal_destroy_caches(void) { - jbd2_journal_destroy_revoke_caches(); + jbd2_journal_destroy_revoke_record_cache(); + jbd2_journal_destroy_revoke_table_cache(); jbd2_journal_destroy_journal_head_cache(); jbd2_journal_destroy_handle_cache(); + jbd2_journal_destroy_inode_cache(); jbd2_journal_destroy_transaction_cache(); jbd2_journal_destroy_slabs(); } diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c index a1143e57a718ee..69b9bc329964f7 100644 --- a/fs/jbd2/revoke.c +++ b/fs/jbd2/revoke.c @@ -178,33 +178,41 @@ static struct jbd2_revoke_record_s *find_revoke_record(journal_t *journal, return NULL; } -void jbd2_journal_destroy_revoke_caches(void) +void jbd2_journal_destroy_revoke_record_cache(void) { kmem_cache_destroy(jbd2_revoke_record_cache); jbd2_revoke_record_cache = NULL; +} + +void jbd2_journal_destroy_revoke_table_cache(void) +{ kmem_cache_destroy(jbd2_revoke_table_cache); jbd2_revoke_table_cache = NULL; } -int __init jbd2_journal_init_revoke_caches(void) +int __init jbd2_journal_init_revoke_record_cache(void) { J_ASSERT(!jbd2_revoke_record_cache); - J_ASSERT(!jbd2_revoke_table_cache); - jbd2_revoke_record_cache = KMEM_CACHE(jbd2_revoke_record_s, SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY); - if (!jbd2_revoke_record_cache) - goto record_cache_failure; + if (!jbd2_revoke_record_cache) { + pr_emerg("JBD2: failed to create revoke_record cache\n"); + return -ENOMEM; + } + return 0; +} + +int __init jbd2_journal_init_revoke_table_cache(void) +{ + J_ASSERT(!jbd2_revoke_table_cache); jbd2_revoke_table_cache = KMEM_CACHE(jbd2_revoke_table_s, SLAB_TEMPORARY); - if (!jbd2_revoke_table_cache) - goto table_cache_failure; - return 0; -table_cache_failure: - jbd2_journal_destroy_revoke_caches(); -record_cache_failure: + if (!jbd2_revoke_table_cache) { + pr_emerg("JBD2: failed to create revoke_table cache\n"); return -ENOMEM; + } + return 0; } static struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size) diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 914e725c82c4fb..e20a6703531f41 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -42,9 +42,11 @@ int __init jbd2_journal_init_transaction_cache(void) 0, SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY, NULL); - if (transaction_cache) - return 0; - return -ENOMEM; + if (!transaction_cache) { + pr_emerg("JBD2: failed to create transaction cache\n"); + return -ENOMEM; + } + return 0; } void jbd2_journal_destroy_transaction_cache(void) diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c index 9846f7e952826a..7147e4aebecc21 100644 --- a/fs/lockd/xdr.c +++ b/fs/lockd/xdr.c @@ -127,7 +127,7 @@ nlm_decode_lock(__be32 *p, struct nlm_lock *lock) locks_init_lock(fl); fl->fl_owner = current->files; - fl->fl_pid = current->tgid; + fl->fl_pid = (pid_t)lock->svid; fl->fl_flags = FL_POSIX; fl->fl_type = F_RDLCK; /* as good as anything else */ start = ntohl(*p++); @@ -269,7 +269,7 @@ nlmsvc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p) memset(lock, 0, sizeof(*lock)); locks_init_lock(&lock->fl); lock->svid = ~(u32) 0; - lock->fl.fl_pid = current->tgid; + lock->fl.fl_pid = (pid_t)lock->svid; if (!(p = nlm_decode_cookie(p, &argp->cookie)) || !(p = xdr_decode_string_inplace(p, &lock->caller, diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c index 70154f3766951d..7ed9edf9aed4a6 100644 --- a/fs/lockd/xdr4.c +++ b/fs/lockd/xdr4.c @@ -119,7 +119,7 @@ nlm4_decode_lock(__be32 *p, struct nlm_lock *lock) locks_init_lock(fl); fl->fl_owner = current->files; - fl->fl_pid = current->tgid; + fl->fl_pid = (pid_t)lock->svid; fl->fl_flags = FL_POSIX; fl->fl_type = F_RDLCK; /* as good as anything else */ p = xdr_decode_hyper(p, &start); @@ -266,7 +266,7 @@ nlm4svc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p) memset(lock, 0, sizeof(*lock)); locks_init_lock(&lock->fl); lock->svid = ~(u32) 0; - lock->fl.fl_pid = current->tgid; + lock->fl.fl_pid = (pid_t)lock->svid; if (!(p = nlm4_decode_cookie(p, &argp->cookie)) || !(p = xdr_decode_string_inplace(p, &lock->caller, diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 751ca65da8a359..c092661147b305 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -290,6 +290,7 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat struct nfs_client *clp; const struct sockaddr *sap = data->addr; struct nfs_net *nn = net_generic(data->net, nfs_net_id); + int error; again: list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) { @@ -302,9 +303,11 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat if (clp->cl_cons_state > NFS_CS_READY) { refcount_inc(&clp->cl_count); spin_unlock(&nn->nfs_client_lock); - nfs_wait_client_init_complete(clp); + error = nfs_wait_client_init_complete(clp); nfs_put_client(clp); spin_lock(&nn->nfs_client_lock); + if (error < 0) + return ERR_PTR(error); goto again; } @@ -413,6 +416,8 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init) clp = nfs_match_client(cl_init); if (clp) { spin_unlock(&nn->nfs_client_lock); + if (IS_ERR(clp)) + return clp; if (new) new->rpc_ops->free_client(new); return nfs_found_client(cl_init, clp); diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index d175724ff566bf..2478a69da0f0cc 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -904,7 +904,7 @@ fl_pnfs_update_layout(struct inode *ino, status = filelayout_check_deviceid(lo, fl, gfp_flags); if (status) { pnfs_put_lseg(lseg); - lseg = ERR_PTR(status); + lseg = NULL; } out: return lseg; diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index fed06fd9998d32..94f98e190e632b 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -329,9 +329,6 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, }; ssize_t err, err2; - if (!nfs_server_capable(file_inode(dst), NFS_CAP_COPY)) - return -EOPNOTSUPP; - src_lock = nfs_get_lock_context(nfs_file_open_context(src)); if (IS_ERR(src_lock)) return PTR_ERR(src_lock); diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 4288a6ecaf7563..134858507268f1 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -133,15 +133,11 @@ static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, size_t count, unsigned int flags) { - ssize_t ret; - + if (!nfs_server_capable(file_inode(file_out), NFS_CAP_COPY)) + return -EOPNOTSUPP; if (file_inode(file_in) == file_inode(file_out)) - return -EINVAL; -retry: - ret = nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count); - if (ret == -EAGAIN) - goto retry; - return ret; + return -EOPNOTSUPP; + return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count); } static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 580e37bc3fe205..53cf8599a46e40 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -6850,7 +6850,6 @@ struct nfs4_lock_waiter { struct task_struct *task; struct inode *inode; struct nfs_lowner *owner; - bool notified; }; static int @@ -6872,13 +6871,13 @@ nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, vo /* Make sure it's for the right inode */ if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh)) return 0; - - waiter->notified = true; } /* override "private" so we can use default_wake_function */ wait->private = waiter->task; - ret = autoremove_wake_function(wait, mode, flags, key); + ret = woken_wake_function(wait, mode, flags, key); + if (ret) + list_del_init(&wait->entry); wait->private = waiter; return ret; } @@ -6887,7 +6886,6 @@ static int nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) { int status = -ERESTARTSYS; - unsigned long flags; struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner; struct nfs_server *server = NFS_SERVER(state->inode); struct nfs_client *clp = server->nfs_client; @@ -6897,8 +6895,7 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) .s_dev = server->s_dev }; struct nfs4_lock_waiter waiter = { .task = current, .inode = state->inode, - .owner = &owner, - .notified = false }; + .owner = &owner}; wait_queue_entry_t wait; /* Don't bother with waitqueue if we don't expect a callback */ @@ -6908,27 +6905,22 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) init_wait(&wait); wait.private = &waiter; wait.func = nfs4_wake_lock_waiter; - add_wait_queue(q, &wait); while(!signalled()) { - waiter.notified = false; + add_wait_queue(q, &wait); status = nfs4_proc_setlk(state, cmd, request); - if ((status != -EAGAIN) || IS_SETLK(cmd)) + if ((status != -EAGAIN) || IS_SETLK(cmd)) { + finish_wait(q, &wait); break; - - status = -ERESTARTSYS; - spin_lock_irqsave(&q->lock, flags); - if (waiter.notified) { - spin_unlock_irqrestore(&q->lock, flags); - continue; } - set_current_state(TASK_INTERRUPTIBLE); - spin_unlock_irqrestore(&q->lock, flags); - freezable_schedule_timeout(NFS4_LOCK_MAXTIMEOUT); + status = -ERESTARTSYS; + freezer_do_not_count(); + wait_woken(&wait, TASK_INTERRUPTIBLE, NFS4_LOCK_MAXTIMEOUT); + freezer_count(); + finish_wait(q, &wait); } - finish_wait(q, &wait); return status; } #else /* !CONFIG_NFS_V4_1 */ diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index d2f645d34eb134..3ba2087469ac88 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -159,6 +159,10 @@ int nfs40_discover_server_trunking(struct nfs_client *clp, /* Sustain the lease, even if it's empty. If the clientid4 * goes stale it's of no use for trunking discovery. */ nfs4_schedule_state_renewal(*result); + + /* If the client state need to recover, do it. */ + if (clp->cl_state) + nfs4_schedule_state_manager(clp); } out: return status; diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 418fa9c78186b6..db0beefe65ec2c 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2413,8 +2413,10 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp, __be32 status; int err; struct nfs4_acl *acl = NULL; +#ifdef CONFIG_NFSD_V4_SECURITY_LABEL void *context = NULL; int contextlen; +#endif bool contextsupport = false; struct nfsd4_compoundres *resp = rqstp->rq_resp; u32 minorversion = resp->cstate.minorversion; @@ -2899,12 +2901,14 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp, *p++ = cpu_to_be32(NFS4_CHANGE_TYPE_IS_TIME_METADATA); } +#ifdef CONFIG_NFSD_V4_SECURITY_LABEL if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) { status = nfsd4_encode_security_label(xdr, rqstp, context, contextlen); if (status) goto out; } +#endif attrlen = htonl(xdr->buf->len - attrlen_offset - 4); write_bytes_to_xdr_buf(xdr->buf, attrlen_offset, &attrlen, 4); diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h index a7e107309f767d..db351247892d05 100644 --- a/fs/nfsd/vfs.h +++ b/fs/nfsd/vfs.h @@ -120,8 +120,11 @@ void nfsd_put_raparams(struct file *file, struct raparms *ra); static inline int fh_want_write(struct svc_fh *fh) { - int ret = mnt_want_write(fh->fh_export->ex_path.mnt); + int ret; + if (fh->fh_want_write) + return 0; + ret = mnt_want_write(fh->fh_export->ex_path.mnt); if (!ret) fh->fh_want_write = true; return ret; diff --git a/fs/nsfs.c b/fs/nsfs.c index 60702d677bd4e5..30d150a4f0c62a 100644 --- a/fs/nsfs.c +++ b/fs/nsfs.c @@ -85,13 +85,12 @@ static void *__ns_get_path(struct path *path, struct ns_common *ns) inode->i_fop = &ns_file_operations; inode->i_private = ns; - dentry = d_alloc_pseudo(mnt->mnt_sb, &empty_name); + dentry = d_alloc_anon(mnt->mnt_sb); if (!dentry) { iput(inode); return ERR_PTR(-ENOMEM); } d_instantiate(dentry, inode); - dentry->d_flags |= DCACHE_RCUACCESS; dentry->d_fsdata = (void *)ns->ops; d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry); if (d) { diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c index 290373024d9d67..e8ace3b54e9c19 100644 --- a/fs/ocfs2/dcache.c +++ b/fs/ocfs2/dcache.c @@ -310,6 +310,18 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry, out_attach: spin_lock(&dentry_attach_lock); + if (unlikely(dentry->d_fsdata && !alias)) { + /* d_fsdata is set by a racing thread which is doing + * the same thing as this thread is doing. Leave the racing + * thread going ahead and we return here. + */ + spin_unlock(&dentry_attach_lock); + iput(dl->dl_inode); + ocfs2_lock_res_free(&dl->dl_lockres); + kfree(dl); + return 0; + } + dentry->d_fsdata = dl; dl->dl_count++; spin_unlock(&dentry_attach_lock); diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c index 4bf8d5854b2711..af2888d23de38f 100644 --- a/fs/ocfs2/export.c +++ b/fs/ocfs2/export.c @@ -148,16 +148,24 @@ static struct dentry *ocfs2_get_parent(struct dentry *child) u64 blkno; struct dentry *parent; struct inode *dir = d_inode(child); + int set; trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name, (unsigned long long)OCFS2_I(dir)->ip_blkno); + status = ocfs2_nfs_sync_lock(OCFS2_SB(dir->i_sb), 1); + if (status < 0) { + mlog(ML_ERROR, "getting nfs sync lock(EX) failed %d\n", status); + parent = ERR_PTR(status); + goto bail; + } + status = ocfs2_inode_lock(dir, NULL, 0); if (status < 0) { if (status != -ENOENT) mlog_errno(status); parent = ERR_PTR(status); - goto bail; + goto unlock_nfs_sync; } status = ocfs2_lookup_ino_from_name(dir, "..", 2, &blkno); @@ -166,11 +174,31 @@ static struct dentry *ocfs2_get_parent(struct dentry *child) goto bail_unlock; } + status = ocfs2_test_inode_bit(OCFS2_SB(dir->i_sb), blkno, &set); + if (status < 0) { + if (status == -EINVAL) { + status = -ESTALE; + } else + mlog(ML_ERROR, "test inode bit failed %d\n", status); + parent = ERR_PTR(status); + goto bail_unlock; + } + + trace_ocfs2_get_dentry_test_bit(status, set); + if (!set) { + status = -ESTALE; + parent = ERR_PTR(status); + goto bail_unlock; + } + parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0)); bail_unlock: ocfs2_inode_unlock(dir, 0); +unlock_nfs_sync: + ocfs2_nfs_sync_unlock(OCFS2_SB(dir->i_sb), 1); + bail: trace_ocfs2_get_parent_end(parent); diff --git a/fs/ocfs2/filecheck.c b/fs/ocfs2/filecheck.c index f65f2b2f594d4f..1906cc962c4d93 100644 --- a/fs/ocfs2/filecheck.c +++ b/fs/ocfs2/filecheck.c @@ -193,6 +193,7 @@ int ocfs2_filecheck_create_sysfs(struct ocfs2_super *osb) ret = kobject_init_and_add(&entry->fs_kobj, &ocfs2_ktype_filecheck, NULL, "filecheck"); if (ret) { + kobject_put(&entry->fs_kobj); kfree(fcheck); return ret; } diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index 75eeee08d8481a..ffc73600216b65 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -878,14 +878,14 @@ static bool ovl_open_need_copy_up(struct dentry *dentry, int flags) return true; } -int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags) +int ovl_maybe_copy_up(struct dentry *dentry, int flags) { int err = 0; - if (ovl_open_need_copy_up(dentry, file_flags)) { + if (ovl_open_need_copy_up(dentry, flags)) { err = ovl_want_write(dentry); if (!err) { - err = ovl_copy_up_flags(dentry, file_flags); + err = ovl_copy_up_flags(dentry, flags); ovl_drop_write(dentry); } } diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index b2aadd3e1fec94..336f04da80eda3 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -260,7 +260,7 @@ static int ovl_instantiate(struct dentry *dentry, struct inode *inode, * hashed directory inode aliases. */ inode = ovl_get_inode(dentry->d_sb, &oip); - if (WARN_ON(IS_ERR(inode))) + if (IS_ERR(inode)) return PTR_ERR(inode); } else { WARN_ON(ovl_inode_real(inode) != d_inode(newdentry)); diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c index 986313da0c8895..0bd276e4ccbeee 100644 --- a/fs/overlayfs/file.c +++ b/fs/overlayfs/file.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "overlayfs.h" static char ovl_whatisit(struct inode *inode, struct inode *realinode) @@ -29,10 +30,11 @@ static struct file *ovl_open_realfile(const struct file *file, struct inode *inode = file_inode(file); struct file *realfile; const struct cred *old_cred; + int flags = file->f_flags | O_NOATIME | FMODE_NONOTIFY; old_cred = ovl_override_creds(inode->i_sb); - realfile = open_with_fake_path(&file->f_path, file->f_flags | O_NOATIME, - realinode, current_cred()); + realfile = open_with_fake_path(&file->f_path, flags, realinode, + current_cred()); revert_creds(old_cred); pr_debug("open(%p[%pD2/%c], 0%o) -> (%p, 0%o)\n", @@ -50,7 +52,7 @@ static int ovl_change_flags(struct file *file, unsigned int flags) int err; /* No atime modificaton on underlying */ - flags |= O_NOATIME; + flags |= O_NOATIME | FMODE_NONOTIFY; /* If some flag changed that cannot be changed then something's amiss */ if (WARN_ON((file->f_flags ^ flags) & ~OVL_SETFL_MASK)) @@ -116,11 +118,10 @@ static int ovl_real_fdget(const struct file *file, struct fd *real) static int ovl_open(struct inode *inode, struct file *file) { - struct dentry *dentry = file_dentry(file); struct file *realfile; int err; - err = ovl_open_maybe_copy_up(dentry, file->f_flags); + err = ovl_maybe_copy_up(file_dentry(file), file->f_flags); if (err) return err; @@ -145,11 +146,47 @@ static int ovl_release(struct inode *inode, struct file *file) static loff_t ovl_llseek(struct file *file, loff_t offset, int whence) { - struct inode *realinode = ovl_inode_real(file_inode(file)); + struct inode *inode = file_inode(file); + struct fd real; + const struct cred *old_cred; + ssize_t ret; + + /* + * The two special cases below do not need to involve real fs, + * so we can optimizing concurrent callers. + */ + if (offset == 0) { + if (whence == SEEK_CUR) + return file->f_pos; + + if (whence == SEEK_SET) + return vfs_setpos(file, 0, 0); + } + + ret = ovl_real_fdget(file, &real); + if (ret) + return ret; + + /* + * Overlay file f_pos is the master copy that is preserved + * through copy up and modified on read/write, but only real + * fs knows how to SEEK_HOLE/SEEK_DATA and real fs may impose + * limitations that are more strict than ->s_maxbytes for specific + * files, so we use the real file to perform seeks. + */ + inode_lock(inode); + real.file->f_pos = file->f_pos; - return generic_file_llseek_size(file, offset, whence, - realinode->i_sb->s_maxbytes, - i_size_read(realinode)); + old_cred = ovl_override_creds(inode->i_sb); + ret = vfs_llseek(real.file, offset, whence); + revert_creds(old_cred); + + file->f_pos = real.file->f_pos; + inode_unlock(inode); + + fdput(real); + + return ret; } static void ovl_file_accessed(struct file *file) @@ -372,34 +409,118 @@ static long ovl_real_ioctl(struct file *file, unsigned int cmd, return ret; } -static long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +static long ovl_ioctl_set_flags(struct file *file, unsigned int cmd, + unsigned long arg, unsigned int iflags) { long ret; struct inode *inode = file_inode(file); + unsigned int old_iflags; + + if (!inode_owner_or_capable(inode)) + return -EACCES; + + ret = mnt_want_write_file(file); + if (ret) + return ret; + + inode_lock(inode); + + /* Check the capability before cred override */ + ret = -EPERM; + old_iflags = READ_ONCE(inode->i_flags); + if (((iflags ^ old_iflags) & (S_APPEND | S_IMMUTABLE)) && + !capable(CAP_LINUX_IMMUTABLE)) + goto unlock; + + ret = ovl_maybe_copy_up(file_dentry(file), O_WRONLY); + if (ret) + goto unlock; + + ret = ovl_real_ioctl(file, cmd, arg); + + ovl_copyflags(ovl_inode_real(inode), inode); +unlock: + inode_unlock(inode); + + mnt_drop_write_file(file); + + return ret; + +} + +static unsigned int ovl_fsflags_to_iflags(unsigned int flags) +{ + unsigned int iflags = 0; + + if (flags & FS_SYNC_FL) + iflags |= S_SYNC; + if (flags & FS_APPEND_FL) + iflags |= S_APPEND; + if (flags & FS_IMMUTABLE_FL) + iflags |= S_IMMUTABLE; + if (flags & FS_NOATIME_FL) + iflags |= S_NOATIME; + + return iflags; +} + +static long ovl_ioctl_set_fsflags(struct file *file, unsigned int cmd, + unsigned long arg) +{ + unsigned int flags; + + if (get_user(flags, (int __user *) arg)) + return -EFAULT; + + return ovl_ioctl_set_flags(file, cmd, arg, + ovl_fsflags_to_iflags(flags)); +} + +static unsigned int ovl_fsxflags_to_iflags(unsigned int xflags) +{ + unsigned int iflags = 0; + + if (xflags & FS_XFLAG_SYNC) + iflags |= S_SYNC; + if (xflags & FS_XFLAG_APPEND) + iflags |= S_APPEND; + if (xflags & FS_XFLAG_IMMUTABLE) + iflags |= S_IMMUTABLE; + if (xflags & FS_XFLAG_NOATIME) + iflags |= S_NOATIME; + + return iflags; +} + +static long ovl_ioctl_set_fsxflags(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct fsxattr fa; + + memset(&fa, 0, sizeof(fa)); + if (copy_from_user(&fa, (void __user *) arg, sizeof(fa))) + return -EFAULT; + + return ovl_ioctl_set_flags(file, cmd, arg, + ovl_fsxflags_to_iflags(fa.fsx_xflags)); +} + +static long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret; switch (cmd) { case FS_IOC_GETFLAGS: + case FS_IOC_FSGETXATTR: ret = ovl_real_ioctl(file, cmd, arg); break; case FS_IOC_SETFLAGS: - if (!inode_owner_or_capable(inode)) - return -EACCES; - - ret = mnt_want_write_file(file); - if (ret) - return ret; - - ret = ovl_copy_up_with_data(file_dentry(file)); - if (!ret) { - ret = ovl_real_ioctl(file, cmd, arg); - - inode_lock(inode); - ovl_copyflags(ovl_inode_real(inode), inode); - inode_unlock(inode); - } + ret = ovl_ioctl_set_fsflags(file, cmd, arg); + break; - mnt_drop_write_file(file); + case FS_IOC_FSSETXATTR: + ret = ovl_ioctl_set_fsxflags(file, cmd, arg); break; default: diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 3b7ed5d2279c6a..f0389849fd8078 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -553,15 +553,15 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev, int xinobits = ovl_xino_bits(inode->i_sb); /* - * When NFS export is enabled and d_ino is consistent with st_ino - * (samefs or i_ino has enough bits to encode layer), set the same - * value used for d_ino to i_ino, because nfsd readdirplus compares - * d_ino values to i_ino values of child entries. When called from + * When d_ino is consistent with st_ino (samefs or i_ino has enough + * bits to encode layer), set the same value used for st_ino to i_ino, + * so inode number exposed via /proc/locks and a like will be + * consistent with d_ino and st_ino values. An i_ino value inconsistent + * with d_ino also causes nfsd readdirplus to fail. When called from * ovl_new_inode(), ino arg is 0, so i_ino will be updated to real * upper inode i_ino on ovl_inode_init() or ovl_inode_update(). */ - if (inode->i_sb->s_export_op && - (ovl_same_sb(inode->i_sb) || xinobits)) { + if (ovl_same_sb(inode->i_sb) || xinobits) { inode->i_ino = ino; if (xinobits && fsid && !(ino >> (64 - xinobits))) inode->i_ino |= (unsigned long)fsid << (64 - xinobits); @@ -777,6 +777,54 @@ struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real, return inode; } +bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir) +{ + struct inode *key = d_inode(dir); + struct inode *trap; + bool res; + + trap = ilookup5(sb, (unsigned long) key, ovl_inode_test, key); + if (!trap) + return false; + + res = IS_DEADDIR(trap) && !ovl_inode_upper(trap) && + !ovl_inode_lower(trap); + + iput(trap); + return res; +} + +/* + * Create an inode cache entry for layer root dir, that will intentionally + * fail ovl_verify_inode(), so any lookup that will find some layer root + * will fail. + */ +struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir) +{ + struct inode *key = d_inode(dir); + struct inode *trap; + + if (!d_is_dir(dir)) + return ERR_PTR(-ENOTDIR); + + trap = iget5_locked(sb, (unsigned long) key, ovl_inode_test, + ovl_inode_set, key); + if (!trap) + return ERR_PTR(-ENOMEM); + + if (!(trap->i_state & I_NEW)) { + /* Conflicting layer roots? */ + iput(trap); + return ERR_PTR(-ELOOP); + } + + trap->i_mode = S_IFDIR; + trap->i_flags = S_DEAD; + unlock_new_inode(trap); + + return trap; +} + /* * Does overlay inode need to be hashed by lower inode? */ @@ -832,7 +880,7 @@ struct inode *ovl_get_inode(struct super_block *sb, int fsid = bylower ? oip->lowerpath->layer->fsid : 0; bool is_dir, metacopy = false; unsigned long ino = 0; - int err = -ENOMEM; + int err = oip->newinode ? -EEXIST : -ENOMEM; if (!realinode) realinode = d_inode(lowerdentry); @@ -917,6 +965,7 @@ struct inode *ovl_get_inode(struct super_block *sb, return inode; out_err: + pr_warn_ratelimited("overlayfs: failed to get inode (%i)\n", err); inode = ERR_PTR(err); goto out; } diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index efd372312ef100..badf039267a2fd 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c @@ -18,6 +18,7 @@ #include "overlayfs.h" struct ovl_lookup_data { + struct super_block *sb; struct qstr name; bool is_dir; bool opaque; @@ -244,6 +245,12 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d, if (!d->metacopy || d->last) goto out; } else { + if (ovl_lookup_trap_inode(d->sb, this)) { + /* Caught in a trap of overlapping layers */ + err = -ELOOP; + goto out_err; + } + if (last_element) d->is_dir = true; if (d->last) @@ -819,6 +826,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, int err; bool metacopy = false; struct ovl_lookup_data d = { + .sb = dentry->d_sb, .name = dentry->d_name, .is_dir = false, .opaque = false, diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index d9c16ceebfe7da..265bf9cfde089c 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -270,6 +270,7 @@ void ovl_clear_flag(unsigned long flag, struct inode *inode); bool ovl_test_flag(unsigned long flag, struct inode *inode); bool ovl_inuse_trylock(struct dentry *dentry); void ovl_inuse_unlock(struct dentry *dentry); +bool ovl_is_inuse(struct dentry *dentry); bool ovl_need_index(struct dentry *dentry); int ovl_nlink_start(struct dentry *dentry, bool *locked); void ovl_nlink_end(struct dentry *dentry, bool locked); @@ -366,6 +367,8 @@ struct ovl_inode_params { struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev); struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real, bool is_upper); +bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir); +struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir); struct inode *ovl_get_inode(struct super_block *sb, struct ovl_inode_params *oip); static inline void ovl_copyattr(struct inode *from, struct inode *to) @@ -411,7 +414,7 @@ extern const struct file_operations ovl_file_operations; int ovl_copy_up(struct dentry *dentry); int ovl_copy_up_with_data(struct dentry *dentry); int ovl_copy_up_flags(struct dentry *dentry, int flags); -int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags); +int ovl_maybe_copy_up(struct dentry *dentry, int flags); int ovl_copy_xattr(struct dentry *old, struct dentry *new); int ovl_set_attr(struct dentry *upper, struct kstat *stat); struct ovl_fh *ovl_encode_real_fh(struct dentry *real, bool is_upper); diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h index ec237035333afc..6ed1ace8f8b300 100644 --- a/fs/overlayfs/ovl_entry.h +++ b/fs/overlayfs/ovl_entry.h @@ -29,6 +29,8 @@ struct ovl_sb { struct ovl_layer { struct vfsmount *mnt; + /* Trap in ovl inode cache */ + struct inode *trap; struct ovl_sb *fs; /* Index of this layer in fs root (upper idx == 0) */ int idx; @@ -65,6 +67,10 @@ struct ovl_fs { /* Did we take the inuse lock? */ bool upperdir_locked; bool workdir_locked; + /* Traps in ovl inode cache */ + struct inode *upperdir_trap; + struct inode *workdir_trap; + struct inode *indexdir_trap; /* Inode numbers in all layers do not use the high xino_bits */ unsigned int xino_bits; }; diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 0fb0a59a5e5c2f..2d028c02621fa8 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -217,6 +217,9 @@ static void ovl_free_fs(struct ovl_fs *ofs) { unsigned i; + iput(ofs->indexdir_trap); + iput(ofs->workdir_trap); + iput(ofs->upperdir_trap); dput(ofs->indexdir); dput(ofs->workdir); if (ofs->workdir_locked) @@ -225,8 +228,10 @@ static void ovl_free_fs(struct ovl_fs *ofs) if (ofs->upperdir_locked) ovl_inuse_unlock(ofs->upper_mnt->mnt_root); mntput(ofs->upper_mnt); - for (i = 0; i < ofs->numlower; i++) + for (i = 0; i < ofs->numlower; i++) { + iput(ofs->lower_layers[i].trap); mntput(ofs->lower_layers[i].mnt); + } for (i = 0; i < ofs->numlowerfs; i++) free_anon_bdev(ofs->lower_fs[i].pseudo_dev); kfree(ofs->lower_layers); @@ -984,7 +989,26 @@ static const struct xattr_handler *ovl_xattr_handlers[] = { NULL }; -static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath) +static int ovl_setup_trap(struct super_block *sb, struct dentry *dir, + struct inode **ptrap, const char *name) +{ + struct inode *trap; + int err; + + trap = ovl_get_trap_inode(sb, dir); + err = PTR_ERR_OR_ZERO(trap); + if (err) { + if (err == -ELOOP) + pr_err("overlayfs: conflicting %s path\n", name); + return err; + } + + *ptrap = trap; + return 0; +} + +static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs, + struct path *upperpath) { struct vfsmount *upper_mnt; int err; @@ -1004,6 +1028,11 @@ static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath) if (err) goto out; + err = ovl_setup_trap(sb, upperpath->dentry, &ofs->upperdir_trap, + "upperdir"); + if (err) + goto out; + upper_mnt = clone_private_mount(upperpath); err = PTR_ERR(upper_mnt); if (IS_ERR(upper_mnt)) { @@ -1030,7 +1059,8 @@ static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath) return err; } -static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath) +static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs, + struct path *workpath) { struct vfsmount *mnt = ofs->upper_mnt; struct dentry *temp; @@ -1045,6 +1075,10 @@ static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath) if (!ofs->workdir) goto out; + err = ovl_setup_trap(sb, ofs->workdir, &ofs->workdir_trap, "workdir"); + if (err) + goto out; + /* * Upper should support d_type, else whiteouts are visible. Given * workdir and upper are on same fs, we can do iterate_dir() on @@ -1105,7 +1139,8 @@ static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath) return err; } -static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath) +static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs, + struct path *upperpath) { int err; struct path workpath = { }; @@ -1136,19 +1171,16 @@ static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath) pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n"); } - err = ovl_make_workdir(ofs, &workpath); - if (err) - goto out; + err = ovl_make_workdir(sb, ofs, &workpath); - err = 0; out: path_put(&workpath); return err; } -static int ovl_get_indexdir(struct ovl_fs *ofs, struct ovl_entry *oe, - struct path *upperpath) +static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs, + struct ovl_entry *oe, struct path *upperpath) { struct vfsmount *mnt = ofs->upper_mnt; int err; @@ -1167,6 +1199,11 @@ static int ovl_get_indexdir(struct ovl_fs *ofs, struct ovl_entry *oe, ofs->indexdir = ovl_workdir_create(ofs, OVL_INDEXDIR_NAME, true); if (ofs->indexdir) { + err = ovl_setup_trap(sb, ofs->indexdir, &ofs->indexdir_trap, + "indexdir"); + if (err) + goto out; + /* * Verify upper root is exclusively associated with index dir. * Older kernels stored upper fh in "trusted.overlay.origin" @@ -1226,8 +1263,8 @@ static int ovl_get_fsid(struct ovl_fs *ofs, struct super_block *sb) return ofs->numlowerfs; } -static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack, - unsigned int numlower) +static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs, + struct path *stack, unsigned int numlower) { int err; unsigned int i; @@ -1245,16 +1282,28 @@ static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack, for (i = 0; i < numlower; i++) { struct vfsmount *mnt; + struct inode *trap; int fsid; err = fsid = ovl_get_fsid(ofs, stack[i].mnt->mnt_sb); if (err < 0) goto out; + err = -EBUSY; + if (ovl_is_inuse(stack[i].dentry)) { + pr_err("overlayfs: lowerdir is in-use as upperdir/workdir\n"); + goto out; + } + + err = ovl_setup_trap(sb, stack[i].dentry, &trap, "lowerdir"); + if (err) + goto out; + mnt = clone_private_mount(&stack[i]); err = PTR_ERR(mnt); if (IS_ERR(mnt)) { pr_err("overlayfs: failed to clone lowerpath\n"); + iput(trap); goto out; } @@ -1264,6 +1313,7 @@ static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack, */ mnt->mnt_flags |= MNT_READONLY | MNT_NOATIME; + ofs->lower_layers[ofs->numlower].trap = trap; ofs->lower_layers[ofs->numlower].mnt = mnt; ofs->lower_layers[ofs->numlower].idx = i + 1; ofs->lower_layers[ofs->numlower].fsid = fsid; @@ -1358,7 +1408,7 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb, goto out_err; } - err = ovl_get_lower_layers(ofs, stack, numlower); + err = ovl_get_lower_layers(sb, ofs, stack, numlower); if (err) goto out_err; @@ -1390,6 +1440,77 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb, goto out; } +/* + * Check if this layer root is a descendant of: + * - another layer of this overlayfs instance + * - upper/work dir of any overlayfs instance + */ +static int ovl_check_layer(struct super_block *sb, struct dentry *dentry, + const char *name) +{ + struct dentry *next = dentry, *parent; + int err = 0; + + if (!dentry) + return 0; + + parent = dget_parent(next); + + /* Walk back ancestors to root (inclusive) looking for traps */ + while (!err && parent != next) { + if (ovl_is_inuse(parent)) { + err = -EBUSY; + pr_err("overlayfs: %s path overlapping in-use upperdir/workdir\n", + name); + } else if (ovl_lookup_trap_inode(sb, parent)) { + err = -ELOOP; + pr_err("overlayfs: overlapping %s path\n", name); + } + next = parent; + parent = dget_parent(next); + dput(next); + } + + dput(parent); + + return err; +} + +/* + * Check if any of the layers or work dirs overlap. + */ +static int ovl_check_overlapping_layers(struct super_block *sb, + struct ovl_fs *ofs) +{ + int i, err; + + if (ofs->upper_mnt) { + err = ovl_check_layer(sb, ofs->upper_mnt->mnt_root, "upperdir"); + if (err) + return err; + + /* + * Checking workbasedir avoids hitting ovl_is_inuse(parent) of + * this instance and covers overlapping work and index dirs, + * unless work or index dir have been moved since created inside + * workbasedir. In that case, we already have their traps in + * inode cache and we will catch that case on lookup. + */ + err = ovl_check_layer(sb, ofs->workbasedir, "workdir"); + if (err) + return err; + } + + for (i = 0; i < ofs->numlower; i++) { + err = ovl_check_layer(sb, ofs->lower_layers[i].mnt->mnt_root, + "lowerdir"); + if (err) + return err; + } + + return 0; +} + static int ovl_fill_super(struct super_block *sb, void *data, int silent) { struct path upperpath = { }; @@ -1429,17 +1550,20 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) if (ofs->config.xino != OVL_XINO_OFF) ofs->xino_bits = BITS_PER_LONG - 32; + /* alloc/destroy_inode needed for setting up traps in inode cache */ + sb->s_op = &ovl_super_operations; + if (ofs->config.upperdir) { if (!ofs->config.workdir) { pr_err("overlayfs: missing 'workdir'\n"); goto out_err; } - err = ovl_get_upper(ofs, &upperpath); + err = ovl_get_upper(sb, ofs, &upperpath); if (err) goto out_err; - err = ovl_get_workdir(ofs, &upperpath); + err = ovl_get_workdir(sb, ofs, &upperpath); if (err) goto out_err; @@ -1460,7 +1584,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) sb->s_flags |= SB_RDONLY; if (!(ovl_force_readonly(ofs)) && ofs->config.index) { - err = ovl_get_indexdir(ofs, oe, &upperpath); + err = ovl_get_indexdir(sb, ofs, oe, &upperpath); if (err) goto out_free_oe; @@ -1473,6 +1597,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) } + err = ovl_check_overlapping_layers(sb, ofs); + if (err) + goto out_free_oe; + /* Show index=off in /proc/mounts for forced r/o mount */ if (!ofs->indexdir) { ofs->config.index = false; @@ -1494,7 +1622,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) cap_lower(cred->cap_effective, CAP_SYS_RESOURCE); sb->s_magic = OVERLAYFS_SUPER_MAGIC; - sb->s_op = &ovl_super_operations; sb->s_xattr = ovl_xattr_handlers; sb->s_fs_info = ofs; sb->s_flags |= SB_POSIXACL; diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c index c9a2e3c6d5374f..db8bdb29b3207e 100644 --- a/fs/overlayfs/util.c +++ b/fs/overlayfs/util.c @@ -653,6 +653,18 @@ void ovl_inuse_unlock(struct dentry *dentry) } } +bool ovl_is_inuse(struct dentry *dentry) +{ + struct inode *inode = d_inode(dentry); + bool inuse; + + spin_lock(&inode->i_lock); + inuse = (inode->i_state & I_OVL_INUSE); + spin_unlock(&inode->i_lock); + + return inuse; +} + /* * Does this overlay dentry need to be indexed on copy up? */ diff --git a/fs/proc/base.c b/fs/proc/base.c index 81d77b15b34795..f999e8bd3771c9 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2542,6 +2542,11 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf, rcu_read_unlock(); return -EACCES; } + /* Prevent changes to overridden credentials. */ + if (current_cred() != current_real_cred()) { + rcu_read_unlock(); + return -EBUSY; + } rcu_read_unlock(); if (count > PAGE_SIZE) diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c index 5fcb845b9fec12..8cf2218b46a759 100644 --- a/fs/pstore/inode.c +++ b/fs/pstore/inode.c @@ -482,12 +482,10 @@ static struct file_system_type pstore_fs_type = { .kill_sb = pstore_kill_sb, }; -static int __init init_pstore_fs(void) +int __init pstore_init_fs(void) { int err; - pstore_choose_compression(); - /* Create a convenient mount point for people to access pstore */ err = sysfs_create_mount_point(fs_kobj, "pstore"); if (err) @@ -500,14 +498,9 @@ static int __init init_pstore_fs(void) out: return err; } -module_init(init_pstore_fs) -static void __exit exit_pstore_fs(void) +void __exit pstore_exit_fs(void) { unregister_filesystem(&pstore_fs_type); sysfs_remove_mount_point(fs_kobj, "pstore"); } -module_exit(exit_pstore_fs) - -MODULE_AUTHOR("Tony Luck "); -MODULE_LICENSE("GPL"); diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h index fb767e28aeb283..7062ea4bc57c5b 100644 --- a/fs/pstore/internal.h +++ b/fs/pstore/internal.h @@ -37,7 +37,8 @@ extern bool pstore_is_mounted(void); extern void pstore_record_init(struct pstore_record *record, struct pstore_info *psi); -/* Called during module_init() */ -extern void __init pstore_choose_compression(void); +/* Called during pstore init/exit. */ +int __init pstore_init_fs(void); +void __exit pstore_exit_fs(void); #endif diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index 15e99d5a681d29..4bae3f4fe829db 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -124,26 +124,27 @@ static const char *get_reason_str(enum kmsg_dump_reason reason) } } -bool pstore_cannot_block_path(enum kmsg_dump_reason reason) +/* + * Should pstore_dump() wait for a concurrent pstore_dump()? If + * not, the current pstore_dump() will report a failure to dump + * and return. + */ +static bool pstore_cannot_wait(enum kmsg_dump_reason reason) { - /* - * In case of NMI path, pstore shouldn't be blocked - * regardless of reason. - */ + /* In NMI path, pstore shouldn't block regardless of reason. */ if (in_nmi()) return true; switch (reason) { /* In panic case, other cpus are stopped by smp_send_stop(). */ case KMSG_DUMP_PANIC: - /* Emergency restart shouldn't be blocked by spin lock. */ + /* Emergency restart shouldn't be blocked. */ case KMSG_DUMP_EMERG: return true; default: return false; } } -EXPORT_SYMBOL_GPL(pstore_cannot_block_path); #if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS) static int zbufsize_deflate(size_t size) @@ -274,37 +275,59 @@ static int pstore_decompress(void *in, void *out, static void allocate_buf_for_compression(void) { + struct crypto_comp *ctx; + int size; + char *buf; + + /* Skip if not built-in or compression backend not selected yet. */ if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !zbackend) return; + /* Skip if no pstore backend yet or compression init already done. */ + if (!psinfo || tfm) + return; + if (!crypto_has_comp(zbackend->name, 0, 0)) { - pr_err("No %s compression\n", zbackend->name); + pr_err("Unknown compression: %s\n", zbackend->name); return; } - big_oops_buf_sz = zbackend->zbufsize(psinfo->bufsize); - if (big_oops_buf_sz <= 0) + size = zbackend->zbufsize(psinfo->bufsize); + if (size <= 0) { + pr_err("Invalid compression size for %s: %d\n", + zbackend->name, size); return; + } - big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); - if (!big_oops_buf) { - pr_err("allocate compression buffer error!\n"); + buf = kmalloc(size, GFP_KERNEL); + if (!buf) { + pr_err("Failed %d byte compression buffer allocation for: %s\n", + size, zbackend->name); return; } - tfm = crypto_alloc_comp(zbackend->name, 0, 0); - if (IS_ERR_OR_NULL(tfm)) { - kfree(big_oops_buf); - big_oops_buf = NULL; - pr_err("crypto_alloc_comp() failed!\n"); + ctx = crypto_alloc_comp(zbackend->name, 0, 0); + if (IS_ERR_OR_NULL(ctx)) { + kfree(buf); + pr_err("crypto_alloc_comp('%s') failed: %ld\n", zbackend->name, + PTR_ERR(ctx)); return; } + + /* A non-NULL big_oops_buf indicates compression is available. */ + tfm = ctx; + big_oops_buf_sz = size; + big_oops_buf = buf; + + pr_info("Using compression: %s\n", zbackend->name); } static void free_buf_for_compression(void) { - if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && !IS_ERR_OR_NULL(tfm)) + if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm) { crypto_free_comp(tfm); + tfm = NULL; + } kfree(big_oops_buf); big_oops_buf = NULL; big_oops_buf_sz = 0; @@ -358,23 +381,23 @@ static void pstore_dump(struct kmsg_dumper *dumper, unsigned long total = 0; const char *why; unsigned int part = 1; - unsigned long flags = 0; - int is_locked; int ret; why = get_reason_str(reason); - if (pstore_cannot_block_path(reason)) { - is_locked = spin_trylock_irqsave(&psinfo->buf_lock, flags); - if (!is_locked) { - pr_err("pstore dump routine blocked in %s path, may corrupt error record\n" - , in_nmi() ? "NMI" : why); + if (down_trylock(&psinfo->buf_lock)) { + /* Failed to acquire lock: give up if we cannot wait. */ + if (pstore_cannot_wait(reason)) { + pr_err("dump skipped in %s path: may corrupt error record\n", + in_nmi() ? "NMI" : why); + return; + } + if (down_interruptible(&psinfo->buf_lock)) { + pr_err("could not grab semaphore?!\n"); return; } - } else { - spin_lock_irqsave(&psinfo->buf_lock, flags); - is_locked = 1; } + oopscount++; while (total < kmsg_bytes) { char *dst; @@ -391,7 +414,7 @@ static void pstore_dump(struct kmsg_dumper *dumper, record.part = part; record.buf = psinfo->buf; - if (big_oops_buf && is_locked) { + if (big_oops_buf) { dst = big_oops_buf; dst_size = big_oops_buf_sz; } else { @@ -409,7 +432,7 @@ static void pstore_dump(struct kmsg_dumper *dumper, dst_size, &dump_size)) break; - if (big_oops_buf && is_locked) { + if (big_oops_buf) { zipped_len = pstore_compress(dst, psinfo->buf, header_size + dump_size, psinfo->bufsize); @@ -432,8 +455,8 @@ static void pstore_dump(struct kmsg_dumper *dumper, total += record.size; part++; } - if (is_locked) - spin_unlock_irqrestore(&psinfo->buf_lock, flags); + + up(&psinfo->buf_lock); } static struct kmsg_dumper pstore_dumper = { @@ -456,31 +479,14 @@ static void pstore_unregister_kmsg(void) #ifdef CONFIG_PSTORE_CONSOLE static void pstore_console_write(struct console *con, const char *s, unsigned c) { - const char *e = s + c; + struct pstore_record record; - while (s < e) { - struct pstore_record record; - unsigned long flags; - - pstore_record_init(&record, psinfo); - record.type = PSTORE_TYPE_CONSOLE; + pstore_record_init(&record, psinfo); + record.type = PSTORE_TYPE_CONSOLE; - if (c > psinfo->bufsize) - c = psinfo->bufsize; - - if (oops_in_progress) { - if (!spin_trylock_irqsave(&psinfo->buf_lock, flags)) - break; - } else { - spin_lock_irqsave(&psinfo->buf_lock, flags); - } - record.buf = (char *)s; - record.size = c; - psinfo->write(&record); - spin_unlock_irqrestore(&psinfo->buf_lock, flags); - s += c; - c = e - s; - } + record.buf = (char *)s; + record.size = c; + psinfo->write(&record); } static struct console pstore_console = { @@ -569,6 +575,7 @@ int pstore_register(struct pstore_info *psi) psi->write_user = pstore_write_user_compat; psinfo = psi; mutex_init(&psinfo->read_mutex); + sema_init(&psinfo->buf_lock, 1); spin_unlock(&pstore_lock); if (owner && !try_module_get(owner)) { @@ -576,7 +583,8 @@ int pstore_register(struct pstore_info *psi) return -EINVAL; } - allocate_buf_for_compression(); + if (psi->flags & PSTORE_FLAGS_DMESG) + allocate_buf_for_compression(); if (pstore_is_mounted()) pstore_get_records(0); @@ -774,14 +782,43 @@ void __init pstore_choose_compression(void) for (step = zbackends; step->name; step++) { if (!strcmp(compress, step->name)) { zbackend = step; - pr_info("using %s compression\n", zbackend->name); return; } } } +static int __init pstore_init(void) +{ + int ret; + + pstore_choose_compression(); + + /* + * Check if any pstore backends registered earlier but did not + * initialize compression because crypto was not ready. If so, + * initialize compression now. + */ + allocate_buf_for_compression(); + + ret = pstore_init_fs(); + if (ret) + return ret; + + return 0; +} +late_initcall(pstore_init); + +static void __exit pstore_exit(void) +{ + pstore_exit_fs(); +} +module_exit(pstore_exit) + module_param(compress, charp, 0444); MODULE_PARM_DESC(compress, "Pstore compression to use"); module_param(backend, charp, 0444); MODULE_PARM_DESC(backend, "Pstore backend to use"); + +MODULE_AUTHOR("Tony Luck "); +MODULE_LICENSE("GPL"); diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index eb67bb7f04de55..316c16463b20fe 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c @@ -803,27 +803,36 @@ static int ramoops_probe(struct platform_device *pdev) cxt->pstore.data = cxt; /* - * Since bufsize is only used for dmesg crash dumps, it - * must match the size of the dprz record (after PRZ header - * and ECC bytes have been accounted for). + * Prepare frontend flags based on which areas are initialized. + * For ramoops_init_przs() cases, the "max count" variable tells + * if there are regions present. For ramoops_init_prz() cases, + * the single region size is how to check. */ - cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size; - cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL); - if (!cxt->pstore.buf) { - pr_err("cannot allocate pstore crash dump buffer\n"); - err = -ENOMEM; - goto fail_clear; - } - spin_lock_init(&cxt->pstore.buf_lock); - - cxt->pstore.flags = PSTORE_FLAGS_DMESG; + cxt->pstore.flags = 0; + if (cxt->max_dump_cnt) + cxt->pstore.flags |= PSTORE_FLAGS_DMESG; if (cxt->console_size) cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE; - if (cxt->ftrace_size) + if (cxt->max_ftrace_cnt) cxt->pstore.flags |= PSTORE_FLAGS_FTRACE; if (cxt->pmsg_size) cxt->pstore.flags |= PSTORE_FLAGS_PMSG; + /* + * Since bufsize is only used for dmesg crash dumps, it + * must match the size of the dprz record (after PRZ header + * and ECC bytes have been accounted for). + */ + if (cxt->pstore.flags & PSTORE_FLAGS_DMESG) { + cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size; + cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL); + if (!cxt->pstore.buf) { + pr_err("cannot allocate pstore crash dump buffer\n"); + err = -ENOMEM; + goto fail_clear; + } + } + err = pstore_register(&cxt->pstore); if (err) { pr_err("registering with pstore failed\n"); @@ -956,7 +965,7 @@ static int __init ramoops_init(void) return ret; } -late_initcall(ramoops_init); +postcore_initcall(ramoops_init); static void __exit ramoops_exit(void) { diff --git a/fs/ufs/util.h b/fs/ufs/util.h index 1fd3011ea6236b..7fd4802222b8c8 100644 --- a/fs/ufs/util.h +++ b/fs/ufs/util.h @@ -229,7 +229,7 @@ ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode) case UFS_UID_44BSD: return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_gid); case UFS_UID_EFT: - if (inode->ui_u1.oldids.ui_suid == 0xFFFF) + if (inode->ui_u1.oldids.ui_sgid == 0xFFFF) return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid); /* Fall through */ default: diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index e3c40483311569..53be104aab5ce4 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -466,6 +466,7 @@ struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, struct i2c_adapter *adapter); struct edid *drm_edid_duplicate(const struct edid *edid); int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); +int drm_add_override_edid_modes(struct drm_connector *connector); u8 drm_match_cea_mode(const struct drm_display_mode *to_match); enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code); diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 61142aa0ab23ab..0eb3372d031197 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -1174,6 +1174,14 @@ struct drm_plane_helper_funcs { * current one with the new plane configurations in the new * plane_state. * + * Drivers should also swap the framebuffers between current plane + * state (&drm_plane.state) and new_state. + * This is required since cleanup for async commits is performed on + * the new state, rather than old state like for traditional commits. + * Since we want to give up the reference on the current (old) fb + * instead of our brand new one, swap them in the driver during the + * async commit. + * * FIXME: * - It only works for single plane updates * - Async Pageflips are not supported yet diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h index b8ba5886198678..bcc98bd447f7a7 100644 --- a/include/drm/tinydrm/mipi-dbi.h +++ b/include/drm/tinydrm/mipi-dbi.h @@ -42,7 +42,7 @@ struct mipi_dbi { struct spi_device *spi; bool enabled; struct mutex cmdlock; - int (*command)(struct mipi_dbi *mipi, u8 cmd, u8 *param, size_t num); + int (*command)(struct mipi_dbi *mipi, u8 *cmd, u8 *param, size_t num); const u8 *read_commands; struct gpio_desc *dc; u16 *tx_buf; @@ -79,6 +79,7 @@ u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len); int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val); int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len); +int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len); int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, struct drm_clip_rect *clip, bool swap); /** @@ -96,7 +97,7 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, #define mipi_dbi_command(mipi, cmd, seq...) \ ({ \ u8 d[] = { seq }; \ - mipi_dbi_command_buf(mipi, cmd, d, ARRAY_SIZE(d)); \ + mipi_dbi_command_stackbuf(mipi, cmd, d, ARRAY_SIZE(d)); \ }) #ifdef CONFIG_DEBUG_FS diff --git a/include/linux/bio.h b/include/linux/bio.h index 51371740d2a8f0..c7433a20117154 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -257,7 +257,7 @@ static inline void bio_cnt_set(struct bio *bio, unsigned int count) { if (count != 1) { bio->bi_flags |= (1 << BIO_REFFED); - smp_mb__before_atomic(); + smp_mb(); } atomic_set(&bio->__bi_cnt, count); } diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 7ddb1349394dbc..7ac2e46112b7e5 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -60,7 +60,7 @@ static __always_inline unsigned long hweight_long(unsigned long w) */ static inline __u64 rol64(__u64 word, unsigned int shift) { - return (word << shift) | (word >> (64 - shift)); + return (word << (shift & 63)) | (word >> ((-shift) & 63)); } /** @@ -70,7 +70,7 @@ static inline __u64 rol64(__u64 word, unsigned int shift) */ static inline __u64 ror64(__u64 word, unsigned int shift) { - return (word >> shift) | (word << (64 - shift)); + return (word >> (shift & 63)) | (word << ((-shift) & 63)); } /** @@ -80,7 +80,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift) */ static inline __u32 rol32(__u32 word, unsigned int shift) { - return (word << shift) | (word >> ((-shift) & 31)); + return (word << (shift & 31)) | (word >> ((-shift) & 31)); } /** @@ -90,7 +90,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift) */ static inline __u32 ror32(__u32 word, unsigned int shift) { - return (word >> shift) | (word << (32 - shift)); + return (word >> (shift & 31)) | (word << ((-shift) & 31)); } /** @@ -100,7 +100,7 @@ static inline __u32 ror32(__u32 word, unsigned int shift) */ static inline __u16 rol16(__u16 word, unsigned int shift) { - return (word << shift) | (word >> (16 - shift)); + return (word << (shift & 15)) | (word >> ((-shift) & 15)); } /** @@ -110,7 +110,7 @@ static inline __u16 rol16(__u16 word, unsigned int shift) */ static inline __u16 ror16(__u16 word, unsigned int shift) { - return (word >> shift) | (word << (16 - shift)); + return (word >> (shift & 15)) | (word << ((-shift) & 15)); } /** @@ -120,7 +120,7 @@ static inline __u16 ror16(__u16 word, unsigned int shift) */ static inline __u8 rol8(__u8 word, unsigned int shift) { - return (word << shift) | (word >> (8 - shift)); + return (word << (shift & 7)) | (word >> ((-shift) & 7)); } /** @@ -130,7 +130,7 @@ static inline __u8 rol8(__u8 word, unsigned int shift) */ static inline __u8 ror8(__u8 word, unsigned int shift) { - return (word >> shift) | (word << (8 - shift)); + return (word >> (shift & 7)) | (word << ((-shift) & 7)); } /** diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 523481a3471b56..16f6beef5cad7a 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -34,6 +34,7 @@ struct bpf_map_ops { void (*map_free)(struct bpf_map *map); int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); void (*map_release_uref)(struct bpf_map *map); + void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); /* funcs callable from userspace and from eBPF programs */ void *(*map_lookup_elem)(struct bpf_map *map, void *key); @@ -400,7 +401,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, } \ _out: \ rcu_read_unlock(); \ - preempt_enable_no_resched(); \ + preempt_enable(); \ _ret; \ }) diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 6002275937f551..a6090154b2ab75 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -346,6 +346,11 @@ struct cgroup { * Dying cgroups are cgroups which were deleted by a user, * but are still existing because someone else is holding a reference. * max_descendants is a maximum allowed number of descent cgroups. + * + * nr_descendants and nr_dying_descendants are protected + * by cgroup_mutex and css_set_lock. It's fine to read them holding + * any of cgroup_mutex and css_set_lock; for writing both locks + * should be held. */ int nr_descendants; int nr_dying_descendants; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index ca51b2c15bcc6d..8937d48a5389d1 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -485,7 +485,7 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task, * * Find the css for the (@task, @subsys_id) combination, increment a * reference on and return it. This function is guaranteed to return a - * valid css. + * valid css. The returned css may already have been offlined. */ static inline struct cgroup_subsys_state * task_get_css(struct task_struct *task, int subsys_id) @@ -495,7 +495,13 @@ task_get_css(struct task_struct *task, int subsys_id) rcu_read_lock(); while (true) { css = task_css(task, subsys_id); - if (likely(css_tryget_online(css))) + /* + * Can't use css_tryget_online() here. A task which has + * PF_EXITING set may stay associated with an offline css. + * If such task calls this function, css_tryget_online() + * will keep failing. + */ + if (likely(css_tryget(css))) break; cpu_relax(); } diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index a8ff0ca0c3213d..3ebee1ce6f982a 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -201,6 +201,10 @@ #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 #endif +#if GCC_VERSION >= 90100 +#define __copy(symbol) __attribute__((__copy__(symbol))) +#endif + #if !defined(__noclone) #define __noclone /* not needed */ #endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 81c2238b884c59..bb22908c79e836 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -319,29 +319,14 @@ static inline void *offset_to_ptr(const int *off) #endif #ifndef __compiletime_error # define __compiletime_error(message) -/* - * Sparse complains of variable sized arrays due to the temporary variable in - * __compiletime_assert. Unfortunately we can't just expand it out to make - * sparse see a constant array size without breaking compiletime_assert on old - * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether. - */ -# ifndef __CHECKER__ -# define __compiletime_error_fallback(condition) \ - do { ((void)sizeof(char[1 - 2 * condition])); } while (0) -# endif -#endif -#ifndef __compiletime_error_fallback -# define __compiletime_error_fallback(condition) do { } while (0) #endif #ifdef __OPTIMIZE__ # define __compiletime_assert(condition, msg, prefix, suffix) \ do { \ - int __cond = !(condition); \ extern void prefix ## suffix(void) __compiletime_error(msg); \ - if (__cond) \ + if (!(condition)) \ prefix ## suffix(); \ - __compiletime_error_fallback(__cond); \ } while (0) #else # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index db192becfec499..2b8ed70c4c7744 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -151,6 +151,10 @@ struct ftrace_likely_data { #define __assume_aligned(a, ...) #endif +#ifndef asm_volatile_goto +#define asm_volatile_goto(x...) asm goto(x) +#endif + /* Are two types/vars the same type (ignoring qualifiers)? */ #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) @@ -176,6 +180,10 @@ struct ftrace_likely_data { #define __diag_GCC(version, severity, string) #endif +#ifndef __copy +# define __copy(symbol) +#endif + #define __diag_push() __diag(push) #define __diag_pop() __diag(pop) diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 57ae83c4d5f450..006f69f9277bfa 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -183,10 +183,14 @@ enum cpuhp_smt_control { extern enum cpuhp_smt_control cpu_smt_control; extern void cpu_smt_disable(bool force); extern void cpu_smt_check_topology(void); +extern int cpuhp_smt_enable(void); +extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval); #else # define cpu_smt_control (CPU_SMT_ENABLED) static inline void cpu_smt_disable(bool force) { } static inline void cpu_smt_check_topology(void) { } +static inline int cpuhp_smt_enable(void) { return 0; } +static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; } #endif /* diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index caf40ad0bbc6e0..d64d8c2bbdabc5 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -101,6 +101,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_IRQ_MIPS_GIC_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, + CPUHP_AP_MICROCODE_LOADER, CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, CPUHP_AP_PERF_X86_STARTING, CPUHP_AP_PERF_X86_AMD_IBS_STARTING, diff --git a/include/linux/dcache.h b/include/linux/dcache.h index ef4b70f64f333a..0880baefd85f8d 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -175,7 +175,6 @@ struct dentry_operations { * typically using d_splice_alias. */ #define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */ -#define DCACHE_RCUACCESS 0x00000080 /* Entry has ever been RCU-visible */ #define DCACHE_CANT_MOUNT 0x00000100 #define DCACHE_GENOCIDE 0x00000200 @@ -216,6 +215,7 @@ struct dentry_operations { #define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */ #define DCACHE_DENTRY_CURSOR 0x20000000 +#define DCACHE_NORCU 0x40000000 /* No RCU delay for freeing */ extern seqlock_t rename_lock; diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index 2fd8006153c350..b3419da1a77664 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -2,7 +2,7 @@ #ifndef _DYNAMIC_DEBUG_H #define _DYNAMIC_DEBUG_H -#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) +#if defined(CONFIG_JUMP_LABEL) #include #endif @@ -38,7 +38,7 @@ struct _ddebug { #define _DPRINTK_FLAGS_DEFAULT 0 #endif unsigned int flags:8; -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL union { struct static_key_true dd_key_true; struct static_key_false dd_key_false; @@ -83,7 +83,7 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor, dd_key_init(key, init) \ } -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL #define dd_key_init(key, init) key = (init) diff --git a/include/linux/filter.h b/include/linux/filter.h index 037610845892b1..d52a7484aeb2da 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -684,6 +684,7 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) { set_memory_ro((unsigned long)hdr, hdr->pages); + set_memory_x((unsigned long)hdr, hdr->pages); } static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) @@ -836,6 +837,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, extern int bpf_jit_enable; extern int bpf_jit_harden; extern int bpf_jit_kallsyms; +extern int bpf_jit_limit; typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index f767293b00e666..f13272d8433200 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -596,6 +596,7 @@ struct unixware_disklabel { extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt); extern void blk_free_devt(dev_t devt); +extern void blk_invalidate_devt(dev_t devt); extern dev_t blk_lookup_devt(const char *name, int partno); extern char *disk_name (struct gendisk *hd, int partno, char *buf); diff --git a/include/linux/hid.h b/include/linux/hid.h index d44a783629425a..8b3e5e8a72fbc5 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -414,6 +414,7 @@ struct hid_global { struct hid_local { unsigned usage[HID_MAX_USAGES]; /* usage array */ + u8 usage_size[HID_MAX_USAGES]; /* usage size array */ unsigned collection_index[HID_MAX_USAGES]; /* collection index array */ unsigned usage_index; unsigned usage_minimum; diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index fdcb45999b2633..77227224ca880d 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -47,10 +47,8 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, int prot_numa); -vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd, pfn_t pfn, bool write); -vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, - pud_t *pud, pfn_t pfn, bool write); +vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write); +vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write); enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 087fd5f48c9128..d34112fb3d5264 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -123,9 +123,7 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); void free_huge_page(struct page *page); void hugetlb_fix_reserve_counts(struct inode *inode); extern struct mutex *hugetlb_fault_mutex_table; -u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, - struct vm_area_struct *vma, - struct address_space *mapping, +u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, pgoff_t idx, unsigned long address); pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h index 730ead1a46df67..57c122ae54523c 100644 --- a/include/linux/iio/adc/ad_sigma_delta.h +++ b/include/linux/iio/adc/ad_sigma_delta.h @@ -66,6 +66,7 @@ struct ad_sigma_delta { bool irq_dis; bool bus_locked; + bool keep_cs_asserted; uint8_t comm; diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index b708e5169d1df9..583b82b5a1e98e 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1317,7 +1317,7 @@ extern void __wait_on_journal (journal_t *); /* Transaction cache support */ extern void jbd2_journal_destroy_transaction_cache(void); -extern int jbd2_journal_init_transaction_cache(void); +extern int __init jbd2_journal_init_transaction_cache(void); extern void jbd2_journal_free_transaction(transaction_t *); /* @@ -1445,8 +1445,10 @@ static inline void jbd2_free_inode(struct jbd2_inode *jinode) /* Primary revoke support */ #define JOURNAL_REVOKE_DEFAULT_HASH 256 extern int jbd2_journal_init_revoke(journal_t *, int); -extern void jbd2_journal_destroy_revoke_caches(void); -extern int jbd2_journal_init_revoke_caches(void); +extern void jbd2_journal_destroy_revoke_record_cache(void); +extern void jbd2_journal_destroy_revoke_table_cache(void); +extern int __init jbd2_journal_init_revoke_record_cache(void); +extern int __init jbd2_journal_init_revoke_table_cache(void); extern void jbd2_journal_destroy_revoke(journal_t *); extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *); diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 1a0b6f17a5d6c4..4c3e77687d4eeb 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -71,10 +71,6 @@ * Additional babbling in: Documentation/static-keys.txt */ -#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) -# define HAVE_JUMP_LABEL -#endif - #ifndef __ASSEMBLY__ #include @@ -86,7 +82,7 @@ extern bool static_key_initialized; "%s(): static key '%pS' used before call to jump_label_init()", \ __func__, (key)) -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL struct static_key { atomic_t enabled; @@ -114,10 +110,10 @@ struct static_key { struct static_key { atomic_t enabled; }; -#endif /* HAVE_JUMP_LABEL */ +#endif /* CONFIG_JUMP_LABEL */ #endif /* __ASSEMBLY__ */ -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL #include #endif @@ -130,7 +126,7 @@ enum jump_label_type { struct module; -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL #define JUMP_TYPE_FALSE 0UL #define JUMP_TYPE_TRUE 1UL @@ -184,7 +180,7 @@ extern void static_key_disable_cpuslocked(struct static_key *key); { .enabled = { 0 }, \ { .entries = (void *)JUMP_TYPE_FALSE } } -#else /* !HAVE_JUMP_LABEL */ +#else /* !CONFIG_JUMP_LABEL */ #include #include @@ -271,7 +267,7 @@ static inline void static_key_disable(struct static_key *key) #define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) } #define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) } -#endif /* HAVE_JUMP_LABEL */ +#endif /* CONFIG_JUMP_LABEL */ #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE #define jump_label_enabled static_key_enabled @@ -335,7 +331,7 @@ extern bool ____wrong_branch_error(void); static_key_count((struct static_key *)x) > 0; \ }) -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL /* * Combine the right initial value (type) with the right branch order @@ -417,12 +413,12 @@ extern bool ____wrong_branch_error(void); unlikely(branch); \ }) -#else /* !HAVE_JUMP_LABEL */ +#else /* !CONFIG_JUMP_LABEL */ #define static_branch_likely(x) likely(static_key_enabled(&(x)->key)) #define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key)) -#endif /* HAVE_JUMP_LABEL */ +#endif /* CONFIG_JUMP_LABEL */ /* * Advanced usage; refcount, branch is enabled when: count != 0 diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h index baa8eabbaa56b7..a49f2b45b3f0ff 100644 --- a/include/linux/jump_label_ratelimit.h +++ b/include/linux/jump_label_ratelimit.h @@ -5,21 +5,19 @@ #include #include -#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) +#if defined(CONFIG_JUMP_LABEL) struct static_key_deferred { struct static_key key; unsigned long timeout; struct delayed_work work; }; -#endif -#ifdef HAVE_JUMP_LABEL extern void static_key_slow_dec_deferred(struct static_key_deferred *key); extern void static_key_deferred_flush(struct static_key_deferred *key); extern void jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl); -#else /* !HAVE_JUMP_LABEL */ +#else /* !CONFIG_JUMP_LABEL */ struct static_key_deferred { struct static_key key; }; @@ -38,5 +36,5 @@ jump_label_rate_limit(struct static_key_deferred *key, { STATIC_KEY_CHECK_USE(key); } -#endif /* HAVE_JUMP_LABEL */ +#endif /* CONFIG_JUMP_LABEL */ #endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */ diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index aa5efd9351ebd8..d5ceb2839a2ded 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -54,6 +54,7 @@ struct list_lru { #ifdef CONFIG_MEMCG_KMEM struct list_head list; int shrinker_id; + bool memcg_aware; #endif }; diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h index 5d42859cb44110..844fc2973392b1 100644 --- a/include/linux/mfd/da9063/registers.h +++ b/include/linux/mfd/da9063/registers.h @@ -215,9 +215,9 @@ /* DA9063 Configuration registers */ /* OTP */ -#define DA9063_REG_OPT_COUNT 0x101 -#define DA9063_REG_OPT_ADDR 0x102 -#define DA9063_REG_OPT_DATA 0x103 +#define DA9063_REG_OTP_CONT 0x101 +#define DA9063_REG_OTP_ADDR 0x102 +#define DA9063_REG_OTP_DATA 0x103 /* Customer Trim and Configuration */ #define DA9063_REG_T_OFFSET 0x104 diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h index ad2a9a852aead1..b4fd5a7c2aaa7c 100644 --- a/include/linux/mfd/max77620.h +++ b/include/linux/mfd/max77620.h @@ -136,8 +136,8 @@ #define MAX77620_FPS_PERIOD_MIN_US 40 #define MAX20024_FPS_PERIOD_MIN_US 20 -#define MAX77620_FPS_PERIOD_MAX_US 2560 -#define MAX20024_FPS_PERIOD_MAX_US 5120 +#define MAX20024_FPS_PERIOD_MAX_US 2560 +#define MAX77620_FPS_PERIOD_MAX_US 5120 #define MAX77620_REG_FPS_GPIO1 0x54 #define MAX77620_REG_FPS_GPIO2 0x55 diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index beed7121c7818b..2ff52de1c2b894 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -395,6 +395,7 @@ struct mmc_host { unsigned int retune_now:1; /* do re-tuning at next req */ unsigned int retune_paused:1; /* re-tuning is temporarily disabled */ unsigned int use_blk_mq:1; /* use blk-mq */ + unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */ int rescan_disable; /* disable card detection */ int rescan_entered; /* used with nonremovable devices */ diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h index 97ca105347a6c5..6905f3f641cce6 100644 --- a/include/linux/mmc/sdio_func.h +++ b/include/linux/mmc/sdio_func.h @@ -159,4 +159,10 @@ extern void sdio_f0_writeb(struct sdio_func *func, unsigned char b, extern mmc_pm_flag_t sdio_get_host_pm_caps(struct sdio_func *func); extern int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags); +extern void sdio_retune_crc_disable(struct sdio_func *func); +extern void sdio_retune_crc_enable(struct sdio_func *func); + +extern void sdio_retune_hold_now(struct sdio_func *func); +extern void sdio_retune_release(struct sdio_func *func); + #endif /* LINUX_MMC_SDIO_FUNC_H */ diff --git a/include/linux/module.h b/include/linux/module.h index 904f9462813227..9915397715fc2f 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -130,13 +130,13 @@ extern void cleanup_module(void); #define module_init(initfn) \ static inline initcall_t __maybe_unused __inittest(void) \ { return initfn; } \ - int init_module(void) __attribute__((alias(#initfn))); + int init_module(void) __copy(initfn) __attribute__((alias(#initfn))); /* This is only required if you want to be unloadable. */ #define module_exit(exitfn) \ static inline exitcall_t __maybe_unused __exittest(void) \ { return exitfn; } \ - void cleanup_module(void) __attribute__((alias(#exitfn))); + void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn))); #endif @@ -433,7 +433,7 @@ struct module { unsigned int num_tracepoints; tracepoint_ptr_t *tracepoints_ptrs; #endif -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL struct jump_entry *jump_entries; unsigned int num_jump_entries; #endif diff --git a/include/linux/mount.h b/include/linux/mount.h index 45b1f56c6c2f9f..4b0db44189544b 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -86,6 +86,8 @@ extern bool mnt_may_suid(struct vfsmount *mnt); struct path; extern struct vfsmount *clone_private_mount(const struct path *path); +extern int __mnt_want_write(struct vfsmount *); +extern void __mnt_drop_write(struct vfsmount *); struct file_system_type; extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index bbe99d2b28b4c6..72cb19c3db6aa9 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -176,7 +176,7 @@ void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg, int nf_register_sockopt(struct nf_sockopt_ops *reg); void nf_unregister_sockopt(struct nf_sockopt_ops *reg); -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; #endif @@ -198,7 +198,7 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, struct nf_hook_entries *hook_head = NULL; int ret = 1; -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL if (__builtin_constant_p(pf) && __builtin_constant_p(hook) && !static_key_false(&nf_hooks_needed[pf][hook])) diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h index 554c920691dd29..a13774be2eb5b3 100644 --- a/include/linux/netfilter_ingress.h +++ b/include/linux/netfilter_ingress.h @@ -8,7 +8,7 @@ #ifdef CONFIG_NETFILTER_INGRESS static inline bool nf_hook_ingress_active(const struct sk_buff *skb) { -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS])) return false; #endif diff --git a/include/linux/of.h b/include/linux/of.h index 40e58b0e9cf4f1..d5a863c1ee390f 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -236,8 +236,8 @@ extern struct device_node *of_find_all_nodes(struct device_node *prev); static inline u64 of_read_number(const __be32 *cell, int size) { u64 r = 0; - while (size--) - r = (r << 32) | be32_to_cpu(*(cell++)); + for (; size--; cell++) + r = (r << 32) | be32_to_cpu(*cell); return r; } @@ -968,6 +968,12 @@ static inline int of_cpu_node_to_id(struct device_node *np) #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) #endif +static inline int of_prop_val_eq(struct property *p1, struct property *p2) +{ + return p1->length == p2->length && + !memcmp(p1->value, p2->value, (size_t)p1->length); +} + #if defined(CONFIG_OF) && defined(CONFIG_NUMA) extern int of_node_to_nid(struct device_node *np); #else diff --git a/include/linux/overflow.h b/include/linux/overflow.h index 40b48e2133cb8e..15eb85de92269e 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -36,6 +36,12 @@ #define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) #define type_min(T) ((T)((T)-type_max(T)-(T)1)) +/* + * Avoids triggering -Wtype-limits compilation warning, + * while using unsigned data types to check a < 0. + */ +#define is_non_negative(a) ((a) > 0 || (a) == 0) +#define is_negative(a) (!(is_non_negative(a))) #ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW /* @@ -227,10 +233,10 @@ typeof(d) _d = d; \ u64 _a_full = _a; \ unsigned int _to_shift = \ - _s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \ + is_non_negative(_s) && _s < 8 * sizeof(*d) ? _s : 0; \ *_d = (_a_full << _to_shift); \ - (_to_shift != _s || *_d < 0 || _a < 0 || \ - (*_d >> _to_shift) != _a); \ + (_to_shift != _s || is_negative(*_d) || is_negative(_a) || \ + (*_d >> _to_shift) != _a); \ }) /** diff --git a/include/linux/pci.h b/include/linux/pci.h index 6925828f9f250f..b1f297f4b7b0b2 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -346,6 +346,8 @@ struct pci_dev { unsigned int hotplug_user_indicators:1; /* SlotCtl indicators controlled exclusively by user sysfs */ + unsigned int clear_retrain_link:1; /* Need to clear Retrain Link + bit manually */ unsigned int d3_delay; /* D3->D0 transition time in ms */ unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */ diff --git a/include/linux/pstore.h b/include/linux/pstore.h index 30fcec375a3af2..de9093d6e6609a 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include @@ -88,7 +88,7 @@ struct pstore_record { * @owner: module which is repsonsible for this backend driver * @name: name of the backend driver * - * @buf_lock: spinlock to serialize access to @buf + * @buf_lock: semaphore to serialize access to @buf * @buf: preallocated crash dump buffer * @bufsize: size of @buf available for crash dump bytes (must match * smallest number of bytes available for writing to a @@ -173,7 +173,7 @@ struct pstore_info { struct module *owner; char *name; - spinlock_t buf_lock; + struct semaphore buf_lock; char *buf; size_t bufsize; @@ -199,7 +199,6 @@ struct pstore_info { extern int pstore_register(struct pstore_info *); extern void pstore_unregister(struct pstore_info *); -extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); struct pstore_ftrace_record { unsigned long ip; diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 56518adc31dd70..bd7d611d63e914 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -639,7 +639,6 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num) #ifdef CONFIG_PWM_SYSFS void pwmchip_sysfs_export(struct pwm_chip *chip); void pwmchip_sysfs_unexport(struct pwm_chip *chip); -void pwmchip_sysfs_unexport_children(struct pwm_chip *chip); #else static inline void pwmchip_sysfs_export(struct pwm_chip *chip) { @@ -648,10 +647,6 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip) static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip) { } - -static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip) -{ -} #endif /* CONFIG_PWM_SYSFS */ #endif /* __LINUX_PWM_H */ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 75e5b393cf4408..e102c5bccbb9fd 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -78,14 +78,12 @@ void synchronize_rcu(void); static inline void __rcu_read_lock(void) { - if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) - preempt_disable(); + preempt_disable(); } static inline void __rcu_read_unlock(void) { - if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) - preempt_enable(); + preempt_enable(); } static inline void synchronize_rcu(void) diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index cebb79fe2c72a6..0d10b7ce0da744 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -54,6 +54,10 @@ static inline void mmdrop(struct mm_struct *mm) * followed by taking the mmap_sem for writing before modifying the * vmas or anything the coredump pretends not to change from under it. * + * It also has to be called when mmgrab() is used in the context of + * the process, but then the mm_count refcount is transferred outside + * the context of the process to run down_write() on that pinned mm. + * * NOTE: find_extend_vma() called from GUP context is the only place * that can modify the "mm" (notably the vm_start/end) under mmap_sem * for reading and outside the context of the process, so it is also diff --git a/include/linux/siphash.h b/include/linux/siphash.h index fa7a6b9cedbffe..bf21591a9e5e65 100644 --- a/include/linux/siphash.h +++ b/include/linux/siphash.h @@ -21,6 +21,11 @@ typedef struct { u64 key[2]; } siphash_key_t; +static inline bool siphash_key_is_zero(const siphash_key_t *key) +{ + return !(key->key[0] | key->key[1]); +} + u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key); #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 820903ceac4f94..28baccb1efd53d 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1333,10 +1333,12 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy) struct ubuf_info *uarg = skb_zcopy(skb); if (uarg) { - if (uarg->callback == sock_zerocopy_callback) { + if (skb_zcopy_is_nouarg(skb)) { + /* no notification callback */ + } else if (uarg->callback == sock_zerocopy_callback) { uarg->zerocopy = uarg->zerocopy && zerocopy; sock_zerocopy_put(uarg); - } else if (!skb_zcopy_is_nouarg(skb)) { + } else { uarg->callback(uarg, zerocopy); } @@ -2587,7 +2589,8 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask) { if (likely(!skb_zcopy(skb))) return 0; - if (skb_uarg(skb)->callback == sock_zerocopy_callback) + if (!skb_zcopy_is_nouarg(skb) && + skb_uarg(skb)->callback == sock_zerocopy_callback) return 0; return skb_copy_ubufs(skb, gfp_mask); } diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h index d0884b52500100..9d1bc65d226cc0 100644 --- a/include/linux/smpboot.h +++ b/include/linux/smpboot.h @@ -29,7 +29,7 @@ struct smpboot_thread_data; * @thread_comm: The base name of the thread */ struct smp_hotplug_thread { - struct task_struct __percpu **store; + struct task_struct * __percpu *store; struct list_head list; int (*thread_should_run)(unsigned int cpu); void (*thread_fn)(unsigned int cpu); diff --git a/include/linux/tcp.h b/include/linux/tcp.h index d2c8f280e48fa9..4374196b98ea48 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -485,4 +485,8 @@ static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) return (user_mss && user_mss < mss) ? user_mss : mss; } + +int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount, + int shiftlen); + #endif /* _LINUX_TCP_H */ diff --git a/include/linux/time64.h b/include/linux/time64.h index 05634afba0db62..4a45aea0f96e92 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -41,6 +41,17 @@ struct itimerspec64 { #define KTIME_MAX ((s64)~((u64)1 << 63)) #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) +/* + * Limits for settimeofday(): + * + * To prevent setting the time close to the wraparound point time setting + * is limited so a reasonable uptime can be accomodated. Uptime of 30 years + * should be really sufficient, which means the cutoff is 2232. At that + * point the cutoff is just a small part of the larger problem. + */ +#define TIME_UPTIME_SEC_MAX (30LL * 365 * 24 *3600) +#define TIME_SETTOD_SEC_MAX (KTIME_SEC_MAX - TIME_UPTIME_SEC_MAX) + static inline int timespec64_equal(const struct timespec64 *a, const struct timespec64 *b) { @@ -108,6 +119,16 @@ static inline bool timespec64_valid_strict(const struct timespec64 *ts) return true; } +static inline bool timespec64_valid_settod(const struct timespec64 *ts) +{ + if (!timespec64_valid(ts)) + return false; + /* Disallow values which cause overflow issues vs. CLOCK_REALTIME */ + if ((unsigned long long)ts->tv_sec >= TIME_SETTOD_SEC_MAX) + return false; + return true; +} + /** * timespec64_to_ns - Convert timespec64 to nanoseconds * @ts: pointer to the timespec64 variable to be converted diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index f6818f732f347f..bddd86c11f5f5b 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -551,6 +551,7 @@ struct vb2_queue { unsigned int start_streaming_called:1; unsigned int error:1; unsigned int waiting_for_buffers:1; + unsigned int waiting_in_dqbuf:1; unsigned int is_multiplanar:1; unsigned int is_output:1; unsigned int copy_timestamp:1; diff --git a/include/net/arp.h b/include/net/arp.h index 977aabfcdc03bf..c8f580a0e6b1f5 100644 --- a/include/net/arp.h +++ b/include/net/arp.h @@ -18,6 +18,7 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32 return val * hash_rnd[0]; } +#ifdef CONFIG_INET static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key) { if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) @@ -25,6 +26,13 @@ static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev); } +#else +static inline +struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key) +{ + return NULL; +} +#endif static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key) { diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index cdd9f1fe7cfa90..845d947dbae82a 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -270,6 +270,7 @@ enum { HCI_FORCE_BREDR_SMP, HCI_FORCE_STATIC_ADDR, HCI_LL_RPA_RESOLUTION, + HCI_CMD_PENDING, __HCI_NUM_FLAGS, }; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 4de121e24ce58f..67e0a990144a6e 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -3448,7 +3448,8 @@ struct cfg80211_ops { * on wiphy_new(), but can be changed by the driver if it has a good * reason to override the default * @WIPHY_FLAG_4ADDR_AP: supports 4addr mode even on AP (with a single station - * on a VLAN interface) + * on a VLAN interface). This flag also serves an extra purpose of + * supporting 4ADDR AP mode on devices which do not support AP/VLAN iftype. * @WIPHY_FLAG_4ADDR_STATION: supports 4addr mode even as a station * @WIPHY_FLAG_CONTROL_PORT_PROTOCOL: This device supports setting the * control port protocol ethertype. The device also honours the diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 2d31e22babd8f1..62c936230cc8bb 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -171,7 +171,8 @@ struct fib6_info { dst_nocount:1, dst_nopolicy:1, dst_host:1, - unused:3; + fib6_destroying:1, + unused:2; struct fib6_nh fib6_nh; struct rcu_head rcu; @@ -259,8 +260,7 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt) rcu_read_lock(); from = rcu_dereference(rt->from); - if (from && (rt->rt6i_flags & RTF_PCPU || - unlikely(!list_empty(&rt->rt6i_uncached)))) + if (from) fib6_get_cookie_safe(from, &cookie); rcu_read_unlock(); diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index e47503b4e4d178..366e2a60010e81 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -9,6 +9,7 @@ #include #include #include +#include struct tcpm_hash_bucket; struct ctl_table_header; @@ -113,6 +114,7 @@ struct netns_ipv4 { #endif int sysctl_tcp_mtu_probing; int sysctl_tcp_base_mss; + int sysctl_tcp_min_snd_mss; int sysctl_tcp_probe_threshold; u32 sysctl_tcp_probe_interval; @@ -214,5 +216,6 @@ struct netns_ipv4 { unsigned int ipmr_seq; /* protected by rtnl_mutex */ atomic_t rt_genid; + siphash_key_t ip_id_key; }; #endif diff --git a/include/net/tcp.h b/include/net/tcp.h index 770917d0caa718..e75661f92daaaa 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -55,6 +55,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define MAX_TCP_HEADER (128 + MAX_HEADER) #define MAX_TCP_OPTION_SPACE 40 +#define TCP_MIN_SND_MSS 48 +#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE) /* * Never offer a window over 32767 without using window scaling. Some diff --git a/include/net/tls.h b/include/net/tls.h index c423b7d0b6abc0..95411057589146 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -161,6 +161,10 @@ enum { TLS_PENDING_CLOSED_RECORD }; +enum tls_context_flags { + TLS_RX_SYNC_RUNNING = 0, +}; + struct cipher_context { u16 prepend_size; u16 tag_size; diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 5e3daf53b3d1ec..4ddd2b13ac8d69 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -295,7 +295,8 @@ struct xfrm_replay { }; struct xfrm_if_cb { - struct xfrm_if *(*decode_session)(struct sk_buff *skb); + struct xfrm_if *(*decode_session)(struct sk_buff *skb, + unsigned short family); }; void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb); @@ -1430,6 +1431,23 @@ static inline int xfrm_state_kern(const struct xfrm_state *x) return atomic_read(&x->tunnel_users); } +static inline bool xfrm_id_proto_valid(u8 proto) +{ + switch (proto) { + case IPPROTO_AH: + case IPPROTO_ESP: + case IPPROTO_COMP: +#if IS_ENABLED(CONFIG_IPV6) + case IPPROTO_ROUTING: + case IPPROTO_DSTOPTS: +#endif + return true; + default: + return false; + } +} + +/* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */ static inline int xfrm_id_proto_match(u8 proto, u8 userproto) { return (!userproto || proto == userproto || diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 7f5634ce8e885d..4671c9150d4d15 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -942,7 +942,7 @@ struct drm_i915_gem_execbuffer2 { * struct drm_i915_gem_exec_fence *fences. */ __u64 cliprects_ptr; -#define I915_EXEC_RING_MASK (7<<0) +#define I915_EXEC_RING_MASK (0x3f) #define I915_EXEC_DEFAULT (0<<0) #define I915_EXEC_RENDER (1<<0) #define I915_EXEC_BSD (2<<0) diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 92fa24c24c9265..2170e58a2a97dd 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -219,10 +219,12 @@ struct fuse_file_lock { * FOPEN_DIRECT_IO: bypass page cache for this open file * FOPEN_KEEP_CACHE: don't invalidate the data cache on open * FOPEN_NONSEEKABLE: the file is not seekable + * FOPEN_STREAM: the file is stream-like (no file position at all) */ #define FOPEN_DIRECT_IO (1 << 0) #define FOPEN_KEEP_CACHE (1 << 1) #define FOPEN_NONSEEKABLE (1 << 2) +#define FOPEN_STREAM (1 << 4) /** * INIT request/reply flags diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index f80135e5feaa88..abae27c3001c3d 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -282,6 +282,7 @@ enum LINUX_MIB_TCPACKCOMPRESSED, /* TCPAckCompressed */ LINUX_MIB_TCPZEROWINDOWDROP, /* TCPZeroWindowDrop */ LINUX_MIB_TCPRCVQDROP, /* TCPRcvQDrop */ + LINUX_MIB_TCPWQUEUETOOBIG, /* TCPWqueueTooBig */ __LINUX_MIB_MAX }; diff --git a/include/uapi/linux/tipc_config.h b/include/uapi/linux/tipc_config.h index 4b2c93b1934cf5..4955e1a9f1bc4b 100644 --- a/include/uapi/linux/tipc_config.h +++ b/include/uapi/linux/tipc_config.h @@ -307,8 +307,10 @@ static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len) tlv_ptr = (struct tlv_desc *)tlv; tlv_ptr->tlv_type = htons(type); tlv_ptr->tlv_len = htons(tlv_len); - if (len && data) - memcpy(TLV_DATA(tlv_ptr), data, tlv_len); + if (len && data) { + memcpy(TLV_DATA(tlv_ptr), data, len); + memset(TLV_DATA(tlv_ptr) + len, 0, TLV_SPACE(len) - tlv_len); + } return TLV_SPACE(len); } @@ -405,8 +407,10 @@ static inline int TCM_SET(void *msg, __u16 cmd, __u16 flags, tcm_hdr->tcm_len = htonl(msg_len); tcm_hdr->tcm_type = htons(cmd); tcm_hdr->tcm_flags = htons(flags); - if (data_len && data) + if (data_len && data) { memcpy(TCM_DATA(msg), data, data_len); + memset(TCM_DATA(msg) + data_len, 0, TCM_SPACE(data_len) - msg_len); + } return TCM_SPACE(data_len); } diff --git a/include/video/udlfb.h b/include/video/udlfb.h index 7d09e54ae54e0f..58fb5732831a43 100644 --- a/include/video/udlfb.h +++ b/include/video/udlfb.h @@ -48,6 +48,13 @@ struct dlfb_data { int base8; u32 pseudo_palette[256]; int blank_mode; /*one of FB_BLANK_ */ + struct mutex render_mutex; + int damage_x; + int damage_y; + int damage_x2; + int damage_y2; + spinlock_t damage_lock; + struct work_struct damage_work; struct fb_ops ops; /* blit-only rendering path metrics, exposed through sysfs */ atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */ diff --git a/init/Kconfig b/init/Kconfig index 864af10bb1b9c4..47035b5a46f645 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -23,6 +23,9 @@ config CLANG_VERSION int default $(shell,$(srctree)/scripts/clang-version.sh $(CC)) +config CC_HAS_ASM_GOTO + def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC)) + config CONSTRUCTORS bool depends on !UML diff --git a/init/initramfs.c b/init/initramfs.c index f6f4a1e4cd5475..cd5fb00fcb5496 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -612,13 +612,12 @@ static int __init populate_rootfs(void) printk(KERN_INFO "Trying to unpack rootfs image as initramfs...\n"); err = unpack_to_rootfs((char *)initrd_start, initrd_end - initrd_start); - if (!err) { - free_initrd(); + if (!err) goto done; - } else { - clean_rootfs(); - unpack_to_rootfs(__initramfs_start, __initramfs_size); - } + + clean_rootfs(); + unpack_to_rootfs(__initramfs_start, __initramfs_size); + printk(KERN_INFO "rootfs image is not initramfs (%s)" "; looks like an initrd\n", err); fd = ksys_open("/initrd.image", @@ -632,7 +631,6 @@ static int __init populate_rootfs(void) written, initrd_end - initrd_start); ksys_close(fd); - free_initrd(); } done: /* empty statement */; @@ -642,9 +640,9 @@ static int __init populate_rootfs(void) initrd_end - initrd_start); if (err) printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err); - free_initrd(); #endif } + free_initrd(); flush_delayed_fput(); /* * Try loading default modules from initramfs. This gives diff --git a/ipc/mqueue.c b/ipc/mqueue.c index c0d58f390c3b43..bce7af1546d9c5 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -391,7 +391,8 @@ static void mqueue_evict_inode(struct inode *inode) struct user_struct *user; unsigned long mq_bytes, mq_treesize; struct ipc_namespace *ipc_ns; - struct msg_msg *msg; + struct msg_msg *msg, *nmsg; + LIST_HEAD(tmp_msg); clear_inode(inode); @@ -402,10 +403,15 @@ static void mqueue_evict_inode(struct inode *inode) info = MQUEUE_I(inode); spin_lock(&info->lock); while ((msg = msg_get(info)) != NULL) - free_msg(msg); + list_add_tail(&msg->m_list, &tmp_msg); kfree(info->node_cache); spin_unlock(&info->lock); + list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) { + list_del(&msg->m_list); + free_msg(msg); + } + /* Total amount of bytes accounted for the mqueue */ mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * diff --git a/ipc/msgutil.c b/ipc/msgutil.c index 84598025a6ade1..e65593742e2be5 100644 --- a/ipc/msgutil.c +++ b/ipc/msgutil.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "util.h" @@ -64,6 +65,9 @@ static struct msg_msg *alloc_msg(size_t len) pseg = &msg->next; while (len > 0) { struct msg_msgseg *seg; + + cond_resched(); + alen = min(len, DATALEN_SEG); seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL_ACCOUNT); if (seg == NULL) @@ -176,6 +180,8 @@ void free_msg(struct msg_msg *msg) kfree(msg); while (seg != NULL) { struct msg_msgseg *tmp = seg->next; + + cond_resched(); kfree(seg); seg = tmp; } diff --git a/kernel/Makefile b/kernel/Makefile index 7a63d567fdb571..df5e3ca30acd26 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -30,6 +30,7 @@ KCOV_INSTRUMENT_extable.o := n # Don't self-instrument. KCOV_INSTRUMENT_kcov.o := n KASAN_SANITIZE_kcov.o := n +CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) # cond_syscall is currently not LTO compatible CFLAGS_sys_ni.o = $(DISABLE_LTO) diff --git a/kernel/acct.c b/kernel/acct.c index addf7732fb562f..81f9831a785925 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -227,7 +227,7 @@ static int acct_on(struct filename *pathname) filp_close(file, NULL); return PTR_ERR(internal); } - err = mnt_want_write(internal); + err = __mnt_want_write(internal); if (err) { mntput(internal); kfree(acct); @@ -252,7 +252,7 @@ static int acct_on(struct filename *pathname) old = xchg(&ns->bacct, &acct->pin); mutex_unlock(&acct->lock); pin_kill(old); - mnt_drop_write(mnt); + __mnt_drop_write(mnt); mntput(mnt); return 0; } diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index bf309f2592c461..425c67e4f5681a 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -1114,22 +1114,24 @@ int audit_rule_change(int type, int seq, void *data, size_t datasz) int err = 0; struct audit_entry *entry; - entry = audit_data_to_entry(data, datasz); - if (IS_ERR(entry)) - return PTR_ERR(entry); - switch (type) { case AUDIT_ADD_RULE: + entry = audit_data_to_entry(data, datasz); + if (IS_ERR(entry)) + return PTR_ERR(entry); err = audit_add_rule(entry); audit_log_rule_change("add_rule", &entry->rule, !err); break; case AUDIT_DEL_RULE: + entry = audit_data_to_entry(data, datasz); + if (IS_ERR(entry)) + return PTR_ERR(entry); err = audit_del_rule(entry); audit_log_rule_change("remove_rule", &entry->rule, !err); break; default: - err = -EINVAL; WARN_ON(1); + return -EINVAL; } if (err || type == AUDIT_DEL_RULE) { diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 474525e3a9db5d..bad9985b8a0851 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -366,10 +366,13 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) } #ifdef CONFIG_BPF_JIT +# define BPF_JIT_LIMIT_DEFAULT (PAGE_SIZE * 40000) + /* All BPF JIT sysctl knobs here. */ int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON); int bpf_jit_harden __read_mostly; int bpf_jit_kallsyms __read_mostly; +int bpf_jit_limit __read_mostly = BPF_JIT_LIMIT_DEFAULT; static __always_inline void bpf_get_prog_addr_region(const struct bpf_prog *prog, @@ -578,27 +581,64 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, return ret; } +static atomic_long_t bpf_jit_current; + +#if defined(MODULES_VADDR) +static int __init bpf_jit_charge_init(void) +{ + /* Only used as heuristic here to derive limit. */ + bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2, + PAGE_SIZE), INT_MAX); + return 0; +} +pure_initcall(bpf_jit_charge_init); +#endif + +static int bpf_jit_charge_modmem(u32 pages) +{ + if (atomic_long_add_return(pages, &bpf_jit_current) > + (bpf_jit_limit >> PAGE_SHIFT)) { + if (!capable(CAP_SYS_ADMIN)) { + atomic_long_sub(pages, &bpf_jit_current); + return -EPERM; + } + } + + return 0; +} + +static void bpf_jit_uncharge_modmem(u32 pages) +{ + atomic_long_sub(pages, &bpf_jit_current); +} + struct bpf_binary_header * bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, unsigned int alignment, bpf_jit_fill_hole_t bpf_fill_ill_insns) { struct bpf_binary_header *hdr; - unsigned int size, hole, start; + u32 size, hole, start, pages; /* Most of BPF filters are really small, but if some of them * fill a page, allow at least 128 extra bytes to insert a * random section of illegal instructions. */ size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); + pages = size / PAGE_SIZE; + + if (bpf_jit_charge_modmem(pages)) + return NULL; hdr = module_alloc(size); - if (hdr == NULL) + if (!hdr) { + bpf_jit_uncharge_modmem(pages); return NULL; + } /* Fill space with illegal/arch-dep instructions. */ bpf_fill_ill_insns(hdr, size); - hdr->pages = size / PAGE_SIZE; + hdr->pages = pages; hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), PAGE_SIZE - sizeof(*hdr)); start = (get_random_int() % hole) & ~(alignment - 1); @@ -611,7 +651,10 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, void bpf_jit_binary_free(struct bpf_binary_header *hdr) { + u32 pages = hdr->pages; + module_memfree(hdr); + bpf_jit_uncharge_modmem(pages); } /* This symbol is only overridden by archs that have different diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 141710b82a6c4b..2faad033715f5d 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -164,6 +164,9 @@ static void dev_map_free(struct bpf_map *map) bpf_clear_redirect_map(map); synchronize_rcu(); + /* Make sure prior __dev_map_entry_free() have completed. */ + rcu_barrier(); + /* To ensure all pending flush operations have completed wait for flush * bitmap to indicate all flush_needed bits to be zero on _all_ cpus. * Because the above synchronize_rcu() ensures the map is disconnected diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index cebadd6af4d9d6..6fe72792312d85 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -518,18 +518,30 @@ static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) return insn - insn_buf; } -static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) +static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map, + void *key, const bool mark) { struct htab_elem *l = __htab_map_lookup_elem(map, key); if (l) { - bpf_lru_node_set_ref(&l->lru_node); + if (mark) + bpf_lru_node_set_ref(&l->lru_node); return l->key + round_up(map->key_size, 8); } return NULL; } +static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) +{ + return __htab_lru_map_lookup_elem(map, key, true); +} + +static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key) +{ + return __htab_lru_map_lookup_elem(map, key, false); +} + static u32 htab_lru_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) { @@ -1206,6 +1218,7 @@ const struct bpf_map_ops htab_lru_map_ops = { .map_free = htab_map_free, .map_get_next_key = htab_map_get_next_key, .map_lookup_elem = htab_lru_map_lookup_elem, + .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys, .map_update_elem = htab_lru_map_update_elem, .map_delete_elem = htab_lru_map_delete_elem, .map_gen_lookup = htab_lru_map_gen_lookup, @@ -1237,7 +1250,6 @@ static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key) int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) { - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); struct htab_elem *l; void __percpu *pptr; int ret = -ENOENT; @@ -1253,8 +1265,9 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) l = __htab_map_lookup_elem(map, key); if (!l) goto out; - if (htab_is_lru(htab)) - bpf_lru_node_set_ref(&l->lru_node); + /* We do not mark LRU map element here in order to not mess up + * eviction heuristics when user space does a map walk. + */ pptr = htab_elem_get_ptr(l, map->key_size); for_each_possible_cpu(cpu) { bpf_long_memcpy(value + off, diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index 4a8f390a2b821d..dc9d7ac8228db0 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -518,7 +518,7 @@ int bpf_obj_get_user(const char __user *pathname, int flags) static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type) { struct bpf_prog *prog; - int ret = inode_permission(inode, MAY_READ | MAY_WRITE); + int ret = inode_permission(inode, MAY_READ); if (ret) return ERR_PTR(ret); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index cc40b8be1171d1..ede82382dd325c 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -721,7 +721,10 @@ static int map_lookup_elem(union bpf_attr *attr) err = bpf_fd_reuseport_array_lookup_elem(map, key, value); } else { rcu_read_lock(); - ptr = map->ops->map_lookup_elem(map, key); + if (map->ops->map_lookup_elem_sys_only) + ptr = map->ops->map_lookup_elem_sys_only(map, key); + else + ptr = map->ops->map_lookup_elem(map, key); if (ptr) memcpy(value, ptr, value_size); rcu_read_unlock(); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index acc2305ad89578..d3580a68dbefa9 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5743,7 +5743,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) insn->dst_reg, shift); insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, - (1 << size * 8) - 1); + (1ULL << size * 8) - 1); } } diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 63dae7e0ccae78..81441117f6114e 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -4659,9 +4659,11 @@ static void css_release_work_fn(struct work_struct *work) if (cgroup_on_dfl(cgrp)) cgroup_rstat_flush(cgrp); + spin_lock_irq(&css_set_lock); for (tcgrp = cgroup_parent(cgrp); tcgrp; tcgrp = cgroup_parent(tcgrp)) tcgrp->nr_dying_descendants--; + spin_unlock_irq(&css_set_lock); cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id); cgrp->id = -1; @@ -4874,12 +4876,14 @@ static struct cgroup *cgroup_create(struct cgroup *parent) if (ret) goto out_idr_free; + spin_lock_irq(&css_set_lock); for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) { cgrp->ancestor_ids[tcgrp->level] = tcgrp->id; if (tcgrp != cgrp) tcgrp->nr_descendants++; } + spin_unlock_irq(&css_set_lock); if (notify_on_release(parent)) set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); @@ -5162,10 +5166,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) if (parent && cgroup_is_threaded(cgrp)) parent->nr_threaded_children--; + spin_lock_irq(&css_set_lock); for (tcgrp = cgroup_parent(cgrp); tcgrp; tcgrp = cgroup_parent(tcgrp)) { tcgrp->nr_descendants--; tcgrp->nr_dying_descendants++; } + spin_unlock_irq(&css_set_lock); cgroup1_check_for_release(parent); diff --git a/kernel/cpu.c b/kernel/cpu.c index bc6c880a093f9a..5d65eae893bd74 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -2035,7 +2035,7 @@ static void cpuhp_online_cpu_device(unsigned int cpu) kobject_uevent(&dev->kobj, KOBJ_ONLINE); } -static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) +int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { int cpu, ret = 0; @@ -2069,7 +2069,7 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) return ret; } -static int cpuhp_smt_enable(void) +int cpuhp_smt_enable(void) { int cpu, ret = 0; diff --git a/kernel/cred.c b/kernel/cred.c index ecf03657e71c9c..efd04b2ec84c2c 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -448,6 +448,15 @@ int commit_creds(struct cred *new) if (task->mm) set_dumpable(task->mm, suid_dumpable); task->pdeath_signal = 0; + /* + * If a task drops privileges and becomes nondumpable, + * the dumpability change must become visible before + * the credential change; otherwise, a __ptrace_may_access() + * racing with this change may be able to attach to a task it + * shouldn't be able to attach to (as if the task had dropped + * privileges without becoming nondumpable). + * Pairs with a read barrier in __ptrace_may_access(). + */ smp_wmb(); } diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 99c7f199f2d4e4..12f351b253bbb4 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -49,14 +49,30 @@ static void perf_output_put_handle(struct perf_output_handle *handle) unsigned long head; again: + /* + * In order to avoid publishing a head value that goes backwards, + * we must ensure the load of @rb->head happens after we've + * incremented @rb->nest. + * + * Otherwise we can observe a @rb->head value before one published + * by an IRQ/NMI happening between the load and the increment. + */ + barrier(); head = local_read(&rb->head); /* - * IRQ/NMI can happen here, which means we can miss a head update. + * IRQ/NMI can happen here and advance @rb->head, causing our + * load above to be stale. */ - if (!local_dec_and_test(&rb->nest)) + /* + * If this isn't the outermost nesting, we don't have to update + * @rb->user_page->data_head. + */ + if (local_read(&rb->nest) > 1) { + local_dec(&rb->nest); goto out; + } /* * Since the mmap() consumer (userspace) can run on a different CPU: @@ -85,12 +101,21 @@ static void perf_output_put_handle(struct perf_output_handle *handle) * See perf_output_begin(). */ smp_wmb(); /* B, matches C */ - rb->user_page->data_head = head; + WRITE_ONCE(rb->user_page->data_head, head); + + /* + * We must publish the head before decrementing the nest count, + * otherwise an IRQ/NMI can publish a more recent head value and our + * write will (temporarily) publish a stale value. + */ + barrier(); + local_set(&rb->nest, 0); /* - * Now check if we missed an update -- rely on previous implied - * compiler barriers to force a re-read. + * Ensure we decrement @rb->nest before we validate the @rb->head. + * Otherwise we cannot be sure we caught the 'last' nested update. */ + barrier(); if (unlikely(head != local_read(&rb->head))) { local_inc(&rb->nest); goto again; @@ -465,7 +490,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size) handle->aux_flags); } - rb->user_page->aux_head = rb->aux_head; + WRITE_ONCE(rb->user_page->aux_head, rb->aux_head); if (rb_need_aux_wakeup(rb)) wakeup = true; @@ -497,7 +522,7 @@ int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size) rb->aux_head += size; - rb->user_page->aux_head = rb->aux_head; + WRITE_ONCE(rb->user_page->aux_head, rb->aux_head); if (rb_need_aux_wakeup(rb)) { perf_output_wakeup(handle); handle->wakeup = rb->aux_wakeup + rb->aux_watermark; diff --git a/kernel/fork.c b/kernel/fork.c index 64ef113e387ed0..69874db3fba831 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -907,6 +907,15 @@ static void mm_init_aio(struct mm_struct *mm) #endif } +static __always_inline void mm_clear_owner(struct mm_struct *mm, + struct task_struct *p) +{ +#ifdef CONFIG_MEMCG + if (mm->owner == p) + WRITE_ONCE(mm->owner, NULL); +#endif +} + static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) { #ifdef CONFIG_MEMCG @@ -1286,6 +1295,7 @@ static struct mm_struct *dup_mm(struct task_struct *tsk) free_pt: /* don't put binfmt in mmput, we haven't got module yet */ mm->binfmt = NULL; + mm_init_owner(mm, NULL); mmput(mm); fail_nomem: @@ -1617,6 +1627,21 @@ static inline void rcu_copy_process(struct task_struct *p) #endif /* #ifdef CONFIG_TASKS_RCU */ } +static void __delayed_free_task(struct rcu_head *rhp) +{ + struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); + + free_task(tsk); +} + +static __always_inline void delayed_free_task(struct task_struct *tsk) +{ + if (IS_ENABLED(CONFIG_MEMCG)) + call_rcu(&tsk->rcu, __delayed_free_task); + else + free_task(tsk); +} + /* * This creates a new process as a copy of the old one, * but does not actually start it yet. @@ -2072,8 +2097,10 @@ static __latent_entropy struct task_struct *copy_process( bad_fork_cleanup_namespaces: exit_task_namespaces(p); bad_fork_cleanup_mm: - if (p->mm) + if (p->mm) { + mm_clear_owner(p->mm, p); mmput(p->mm); + } bad_fork_cleanup_signal: if (!(clone_flags & CLONE_THREAD)) free_signal_struct(p->signal); @@ -2104,7 +2131,7 @@ static __latent_entropy struct task_struct *copy_process( bad_fork_free: p->state = TASK_DEAD; put_task_stack(p); - free_task(p); + delayed_free_task(p); fork_out: spin_lock_irq(¤t->sighand->siglock); hlist_del_init(&delayed.node); diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 6b7cdf17ccf890..73288914ed5e78 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -56,61 +56,70 @@ void __weak arch_irq_work_raise(void) */ } -/* - * Enqueue the irq_work @work on @cpu unless it's already pending - * somewhere. - * - * Can be re-enqueued while the callback is still in progress. - */ -bool irq_work_queue_on(struct irq_work *work, int cpu) +/* Enqueue on current CPU, work must already be claimed and preempt disabled */ +static void __irq_work_queue_local(struct irq_work *work) { - /* All work should have been flushed before going offline */ - WARN_ON_ONCE(cpu_is_offline(cpu)); - -#ifdef CONFIG_SMP - - /* Arch remote IPI send/receive backend aren't NMI safe */ - WARN_ON_ONCE(in_nmi()); + /* If the work is "lazy", handle it from next tick if any */ + if (work->flags & IRQ_WORK_LAZY) { + if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && + tick_nohz_tick_stopped()) + arch_irq_work_raise(); + } else { + if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) + arch_irq_work_raise(); + } +} +/* Enqueue the irq work @work on the current CPU */ +bool irq_work_queue(struct irq_work *work) +{ /* Only queue if not already pending */ if (!irq_work_claim(work)) return false; - if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) - arch_send_call_function_single_ipi(cpu); - -#else /* #ifdef CONFIG_SMP */ - irq_work_queue(work); -#endif /* #else #ifdef CONFIG_SMP */ + /* Queue the entry and raise the IPI if needed. */ + preempt_disable(); + __irq_work_queue_local(work); + preempt_enable(); return true; } +EXPORT_SYMBOL_GPL(irq_work_queue); -/* Enqueue the irq work @work on the current CPU */ -bool irq_work_queue(struct irq_work *work) +/* + * Enqueue the irq_work @work on @cpu unless it's already pending + * somewhere. + * + * Can be re-enqueued while the callback is still in progress. + */ +bool irq_work_queue_on(struct irq_work *work, int cpu) { +#ifndef CONFIG_SMP + return irq_work_queue(work); + +#else /* CONFIG_SMP: */ + /* All work should have been flushed before going offline */ + WARN_ON_ONCE(cpu_is_offline(cpu)); + /* Only queue if not already pending */ if (!irq_work_claim(work)) return false; - /* Queue the entry and raise the IPI if needed. */ preempt_disable(); - - /* If the work is "lazy", handle it from next tick if any */ - if (work->flags & IRQ_WORK_LAZY) { - if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && - tick_nohz_tick_stopped()) - arch_irq_work_raise(); + if (cpu != smp_processor_id()) { + /* Arch remote IPI send/receive backend aren't NMI safe */ + WARN_ON_ONCE(in_nmi()); + if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) + arch_send_call_function_single_ipi(cpu); } else { - if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) - arch_irq_work_raise(); + __irq_work_queue_local(work); } - preempt_enable(); return true; +#endif /* CONFIG_SMP */ } -EXPORT_SYMBOL_GPL(irq_work_queue); + bool irq_work_needs_cpu(void) { diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 2e62503bea0d7d..7c8262635b2921 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -18,8 +18,6 @@ #include #include -#ifdef HAVE_JUMP_LABEL - /* mutex to protect coming/going of the the jump_label table */ static DEFINE_MUTEX(jump_label_mutex); @@ -60,13 +58,13 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) static void jump_label_update(struct static_key *key); /* - * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h. + * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h. * The use of 'atomic_read()' requires atomic.h and its problematic for some * kernel headers such as kernel.h and others. Since static_key_count() is not - * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok + * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok * to have it be a function here. Similarly, for 'static_key_enable()' and * 'static_key_disable()', which require bug.h. This should allow jump_label.h - * to be included from most/all places for HAVE_JUMP_LABEL. + * to be included from most/all places for CONFIG_JUMP_LABEL. */ int static_key_count(struct static_key *key) { @@ -796,5 +794,3 @@ static __init int jump_label_test(void) } early_initcall(jump_label_test); #endif /* STATIC_KEYS_SELFTEST */ - -#endif /* HAVE_JUMP_LABEL */ diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index ef909357b84e10..e41e4b4b5267bd 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c @@ -130,6 +130,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem, { struct rwsem_waiter *waiter, *tmp; long oldcount, woken = 0, adjustment = 0; + struct list_head wlist; /* * Take a peek at the queue head waiter such that we can determine @@ -188,18 +189,42 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem, * of the queue. We know that woken will be at least 1 as we accounted * for above. Note we increment the 'active part' of the count by the * number of readers before waking any processes up. + * + * We have to do wakeup in 2 passes to prevent the possibility that + * the reader count may be decremented before it is incremented. It + * is because the to-be-woken waiter may not have slept yet. So it + * may see waiter->task got cleared, finish its critical section and + * do an unlock before the reader count increment. + * + * 1) Collect the read-waiters in a separate list, count them and + * fully increment the reader count in rwsem. + * 2) For each waiters in the new list, clear waiter->task and + * put them into wake_q to be woken up later. */ - list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) { - struct task_struct *tsk; - + list_for_each_entry(waiter, &sem->wait_list, list) { if (waiter->type == RWSEM_WAITING_FOR_WRITE) break; woken++; - tsk = waiter->task; + } + list_cut_before(&wlist, &sem->wait_list, &waiter->list); + + adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment; + if (list_empty(&sem->wait_list)) { + /* hit end of list above */ + adjustment -= RWSEM_WAITING_BIAS; + } + + if (adjustment) + atomic_long_add(adjustment, &sem->count); + + /* 2nd pass */ + list_for_each_entry_safe(waiter, tmp, &wlist, list) { + struct task_struct *tsk; + tsk = waiter->task; get_task_struct(tsk); - list_del(&waiter->list); + /* * Ensure calling get_task_struct() before setting the reader * waiter to nil such that rwsem_down_read_failed() cannot @@ -215,15 +240,6 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem, /* wake_q_add() already take the task ref */ put_task_struct(tsk); } - - adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment; - if (list_empty(&sem->wait_list)) { - /* hit end of list above */ - adjustment -= RWSEM_WAITING_BIAS; - } - - if (adjustment) - atomic_long_add(adjustment, &sem->count); } /* diff --git a/kernel/module.c b/kernel/module.c index 38bf28b5cc202f..b8f37376856bdf 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1949,8 +1949,13 @@ void module_enable_ro(const struct module *mod, bool after_init) return; frob_text(&mod->core_layout, set_memory_ro); + frob_text(&mod->core_layout, set_memory_x); + frob_rodata(&mod->core_layout, set_memory_ro); + frob_text(&mod->init_layout, set_memory_ro); + frob_text(&mod->init_layout, set_memory_x); + frob_rodata(&mod->init_layout, set_memory_ro); if (after_init) @@ -3095,7 +3100,7 @@ static int find_module_sections(struct module *mod, struct load_info *info) sizeof(*mod->tracepoints_ptrs), &mod->num_tracepoints); #endif -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL mod->jump_entries = section_objs(info, "__jump_table", sizeof(*mod->jump_entries), &mod->num_jump_entries); diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index abef759de7c8fb..f5ce9f7ec132d3 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -258,6 +258,11 @@ void swsusp_show_speed(ktime_t start, ktime_t stop, (kps % 1000) / 10); } +__weak int arch_resume_nosmt(void) +{ + return 0; +} + /** * create_image - Create a hibernation image. * @platform_mode: Whether or not to use the platform driver. @@ -325,6 +330,10 @@ static int create_image(int platform_mode) Enable_cpus: enable_nonboot_cpus(); + /* Allow architectures to do nosmt-specific post-resume dances */ + if (!in_suspend) + error = arch_resume_nosmt(); + Platform_finish: platform_finish(platform_mode); diff --git a/kernel/ptrace.c b/kernel/ptrace.c index fc0d667f57929f..5d0838c2349eeb 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -323,6 +323,16 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode) return -EPERM; ok: rcu_read_unlock(); + /* + * If a task drops privileges and becomes nondumpable (through a syscall + * like setresuid()) while we are trying to access it, we must ensure + * that the dumpability is read after the credentials; otherwise, + * we may be able to attach to a task that we shouldn't be able to + * attach to (as if the task had dropped privileges without becoming + * nondumpable). + * Pairs with a write barrier in commit_creds(). + */ + smp_rmb(); mm = task->mm; if (mm && ((get_dumpable(mm) != SUID_DUMP_USER) && @@ -704,6 +714,10 @@ static int ptrace_peek_siginfo(struct task_struct *child, if (arg.nr < 0) return -EINVAL; + /* Ensure arg.off fits in an unsigned long */ + if (arg.off > ULONG_MAX) + return 0; + if (arg.flags & PTRACE_PEEKSIGINFO_SHARED) pending = &child->signal->shared_pending; else @@ -711,18 +725,20 @@ static int ptrace_peek_siginfo(struct task_struct *child, for (i = 0; i < arg.nr; ) { siginfo_t info; - s32 off = arg.off + i; + unsigned long off = arg.off + i; + bool found = false; spin_lock_irq(&child->sighand->siglock); list_for_each_entry(q, &pending->list, list) { if (!off--) { + found = true; copy_siginfo(&info, &q->info); break; } } spin_unlock_irq(&child->sighand->siglock); - if (off >= 0) /* beyond the end of the list */ + if (!found) /* beyond the end of the list */ break; #ifdef CONFIG_COMPAT diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c index 34244523550e16..19249b86fb33e3 100644 --- a/kernel/rcu/rcuperf.c +++ b/kernel/rcu/rcuperf.c @@ -561,6 +561,10 @@ rcu_perf_cleanup(void) if (torture_cleanup_begin()) return; + if (!cur_ops) { + torture_cleanup_end(); + return; + } if (reader_tasks) { for (i = 0; i < nrealreaders; i++) @@ -681,6 +685,7 @@ rcu_perf_init(void) pr_cont(" %s", perf_ops[i]->name); pr_cont("\n"); firsterr = -EINVAL; + cur_ops = NULL; goto unwind; } if (cur_ops->init) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index c596c6f1e45717..0b7af7e2bcbb17 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1826,6 +1826,10 @@ rcu_torture_cleanup(void) cur_ops->cb_barrier(); return; } + if (!cur_ops) { + torture_cleanup_end(); + return; + } rcu_torture_barrier_cleanup(); torture_stop_kthread(rcu_torture_stall, stall_task); @@ -1964,6 +1968,7 @@ rcu_torture_init(void) pr_cont(" %s", torture_ops[i]->name); pr_cont("\n"); firsterr = -EINVAL; + cur_ops = NULL; goto unwind; } if (cur_ops->fqs == NULL && fqs_duration != 0) { diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d7f409866cdf5d..6859ea1d5c0492 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -24,7 +24,7 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); -#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) +#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) /* * Debugging: various feature bits * @@ -6491,6 +6491,8 @@ static void cpu_cgroup_attach(struct cgroup_taskset *tset) static int cpu_shares_write_u64(struct cgroup_subsys_state *css, struct cftype *cftype, u64 shareval) { + if (shareval > scale_load_down(ULONG_MAX)) + shareval = MAX_SHARES; return sched_group_set_shares(css_tg(css), scale_load(shareval)); } @@ -6593,8 +6595,10 @@ int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) period = ktime_to_ns(tg->cfs_bandwidth.period); if (cfs_quota_us < 0) quota = RUNTIME_INF; - else + else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) quota = (u64)cfs_quota_us * NSEC_PER_USEC; + else + return -EINVAL; return tg_set_cfs_bandwidth(tg, period, quota); } @@ -6616,6 +6620,9 @@ int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) { u64 quota, period; + if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) + return -EINVAL; + period = (u64)cfs_period_us * NSEC_PER_USEC; quota = tg->cfs_bandwidth.quota; diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 217f81ecae1761..4e3625109b28d8 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -751,6 +751,7 @@ static int sugov_init(struct cpufreq_policy *policy) return 0; fail: + kobject_put(&tunables->attr_set.kobj); policy->governor_data = NULL; sugov_tunables_free(tunables); diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 141ea9ff210e36..78fadf0438ea0a 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -73,7 +73,7 @@ static int sched_feat_show(struct seq_file *m, void *v) return 0; } -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL #define jump_label_key__true STATIC_KEY_INIT_TRUE #define jump_label_key__false STATIC_KEY_INIT_FALSE @@ -99,7 +99,7 @@ static void sched_feat_enable(int i) #else static void sched_feat_disable(int i) { }; static void sched_feat_enable(int i) { }; -#endif /* HAVE_JUMP_LABEL */ +#endif /* CONFIG_JUMP_LABEL */ static int sched_feat_set(char *cmp) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d31916366d39ca..4a433608ba74af 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4209,7 +4209,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) #ifdef CONFIG_CFS_BANDWIDTH -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL static struct static_key __cfs_bandwidth_used; static inline bool cfs_bandwidth_used(void) @@ -4226,7 +4226,7 @@ void cfs_bandwidth_usage_dec(void) { static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used); } -#else /* HAVE_JUMP_LABEL */ +#else /* CONFIG_JUMP_LABEL */ static bool cfs_bandwidth_used(void) { return true; @@ -4234,7 +4234,7 @@ static bool cfs_bandwidth_used(void) void cfs_bandwidth_usage_inc(void) {} void cfs_bandwidth_usage_dec(void) {} -#endif /* HAVE_JUMP_LABEL */ +#endif /* CONFIG_JUMP_LABEL */ /* * default period for cfs group bandwidth. @@ -9083,22 +9083,26 @@ static inline int on_null_domain(struct rq *rq) * - When one of the busy CPUs notice that there may be an idle rebalancing * needed, they will kick the idle load balancer, which then does idle * load balancing for all the idle CPUs. + * - HK_FLAG_MISC CPUs are used for this task, because HK_FLAG_SCHED not set + * anywhere yet. */ static inline int find_new_ilb(void) { - int ilb = cpumask_first(nohz.idle_cpus_mask); + int ilb; - if (ilb < nr_cpu_ids && idle_cpu(ilb)) - return ilb; + for_each_cpu_and(ilb, nohz.idle_cpus_mask, + housekeeping_cpumask(HK_FLAG_MISC)) { + if (idle_cpu(ilb)) + return ilb; + } return nr_cpu_ids; } /* - * Kick a CPU to do the nohz balancing, if it is time for it. We pick the - * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle - * CPU (if there is one). + * Kick a CPU to do the nohz balancing, if it is time for it. We pick any + * idle CPU in the HK_FLAG_MISC housekeeping set (if there is one). */ static void kick_ilb(unsigned int flags) { diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 2e2955a8cf8fe3..b980cc96604fa2 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2559,6 +2559,8 @@ int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; if (rt_runtime_us < 0) rt_runtime = RUNTIME_INF; + else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC) + return -EINVAL; return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); } @@ -2579,6 +2581,9 @@ int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us) { u64 rt_runtime, rt_period; + if (rt_period_us > U64_MAX / NSEC_PER_USEC) + return -EINVAL; + rt_period = rt_period_us * NSEC_PER_USEC; rt_runtime = tg->rt_bandwidth.rt_runtime; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 4c7a837d7c14ed..9a7c3d08b39f88 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1359,7 +1359,7 @@ enum { #undef SCHED_FEAT -#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) +#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) /* * To support run-time toggling of sched features, all the translation units @@ -1379,7 +1379,7 @@ static __always_inline bool static_branch_##name(struct static_key *key) \ extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) -#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ +#else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */ /* * Each translation unit has its own copy of sysctl_sched_features to allow @@ -1395,7 +1395,7 @@ static const_debug __maybe_unused unsigned int sysctl_sched_features = #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) -#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */ +#endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */ extern struct static_key_false sched_numa_balancing; extern struct static_key_false sched_schedstats; diff --git a/kernel/signal.c b/kernel/signal.c index 9102d60fc5c62d..0e6bc3049427e4 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2436,6 +2436,8 @@ bool get_signal(struct ksignal *ksig) if (signal_group_exit(signal)) { ksig->info.si_signo = signr = SIGKILL; sigdelset(¤t->pending.signal, SIGKILL); + trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO, + &sighand->action[SIGKILL - 1]); recalc_sigpending(); goto fatal; } diff --git a/kernel/sys.c b/kernel/sys.c index 123bd73046ec6b..096932a4504666 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1919,7 +1919,7 @@ static int validate_prctl_map(struct prctl_mm_map *prctl_map) ((unsigned long)prctl_map->__m1 __op \ (unsigned long)prctl_map->__m2) ? 0 : -EINVAL error = __prctl_check_order(start_code, <, end_code); - error |= __prctl_check_order(start_data, <, end_data); + error |= __prctl_check_order(start_data,<=, end_data); error |= __prctl_check_order(start_brk, <=, brk); error |= __prctl_check_order(arg_start, <=, arg_end); error |= __prctl_check_order(env_start, <=, env_end); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 9a85c7ae736210..f8576509c7bef2 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2791,8 +2791,10 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int if (neg) continue; val = convmul * val / convdiv; - if ((min && val < *min) || (max && val > *max)) - continue; + if ((min && val < *min) || (max && val > *max)) { + err = -EINVAL; + break; + } *i = val; } else { val = convdiv * (*i) / convmul; diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index c5e0cba3b39cc1..6b23cd584295f5 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -698,7 +698,7 @@ static inline void process_adjtimex_modes(const struct timex *txc, s32 *time_tai time_constant = max(time_constant, 0l); } - if (txc->modes & ADJ_TAI && txc->constant > 0) + if (txc->modes & ADJ_TAI && txc->constant >= 0) *time_tai = txc->constant; if (txc->modes & ADJ_OFFSET) diff --git a/kernel/time/time.c b/kernel/time/time.c index ccdb351277eecf..be057d6579f138 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -172,7 +172,7 @@ int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz static int firsttime = 1; int error = 0; - if (tv && !timespec64_valid(tv)) + if (tv && !timespec64_valid_settod(tv)) return -EINVAL; error = security_settime64(tv, tz); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 7846ce24ecc032..443edcddac8ab2 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -812,17 +812,18 @@ ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs) struct timekeeper *tk = &tk_core.timekeeper; unsigned int seq; ktime_t base, *offset = offsets[offs]; + u64 nsecs; WARN_ON(timekeeping_suspended); do { seq = read_seqcount_begin(&tk_core.seq); base = ktime_add(tk->tkr_mono.base, *offset); + nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; } while (read_seqcount_retry(&tk_core.seq, seq)); - return base; - + return base + nsecs; } EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset); @@ -1242,7 +1243,7 @@ int do_settimeofday64(const struct timespec64 *ts) unsigned long flags; int ret = 0; - if (!timespec64_valid_strict(ts)) + if (!timespec64_valid_settod(ts)) return -EINVAL; raw_spin_lock_irqsave(&timekeeper_lock, flags); @@ -1299,7 +1300,7 @@ static int timekeeping_inject_offset(const struct timespec64 *ts) /* Make sure the proposed value is valid */ tmp = timespec64_add(tk_xtime(tk), *ts); if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 || - !timespec64_valid_strict(&tmp)) { + !timespec64_valid_settod(&tmp)) { ret = -EINVAL; goto error; } @@ -1556,7 +1557,7 @@ void __init timekeeping_init(void) unsigned long flags; read_persistent_wall_and_boot_offset(&wall_time, &boot_offset); - if (timespec64_valid_strict(&wall_time) && + if (timespec64_valid_settod(&wall_time) && timespec64_to_ns(&wall_time) > 0) { persistent_clock_exists = true; } else if (timespec64_to_ns(&wall_time) != 0) { diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1bd7a758583b15..181dba75a203c8 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -8351,12 +8351,8 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) cnt++; - /* reset all but tr, trace, and overruns */ - memset(&iter.seq, 0, - sizeof(struct trace_iterator) - - offsetof(struct trace_iterator, seq)); + trace_iterator_reset(&iter); iter.iter_flags |= TRACE_FILE_LAT_FMT; - iter.pos = -1; if (trace_find_next_entry_inc(&iter) != NULL) { int ret; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 447bd96ee658aa..d11d7bfc3fa5cc 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -1895,4 +1895,22 @@ static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { } extern struct trace_iterator *tracepoint_print_iter; +/* + * Reset the state of the trace_iterator so that it can read consumed data. + * Normally, the trace_iterator is used for reading the data when it is not + * consumed, and must retain state. + */ +static __always_inline void trace_iterator_reset(struct trace_iterator *iter) +{ + const size_t offset = offsetof(struct trace_iterator, seq); + + /* + * Keep gcc from complaining about overwriting more than just one + * member in the structure. + */ + memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset); + + iter->pos = -1; +} + #endif /* _LINUX_KERNEL_TRACE_H */ diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 4ad967453b6fb0..3ea65cdff30d50 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c @@ -205,6 +205,8 @@ void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect) void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect, int is_constant) { + unsigned long flags = user_access_save(); + /* A constant is always correct */ if (is_constant) { f->constant++; @@ -223,6 +225,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, f->data.correct++; else f->data.incorrect++; + + user_access_restore(flags); } EXPORT_SYMBOL(ftrace_likely_update); diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index f94be0c2827b0e..7345f5f8f3fe67 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -1318,9 +1318,6 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) char buf[32]; int len; - if (*ppos) - return 0; - if (unlikely(!id)) return -ENODEV; diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 5a1c64a26e819a..2fb78467582bd1 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -427,7 +427,7 @@ predicate_parse(const char *str, int nr_parens, int nr_preds, op_stack = kmalloc_array(nr_parens, sizeof(*op_stack), GFP_KERNEL); if (!op_stack) return ERR_PTR(-ENOMEM); - prog_stack = kmalloc_array(nr_preds, sizeof(*prog_stack), GFP_KERNEL); + prog_stack = kcalloc(nr_preds, sizeof(*prog_stack), GFP_KERNEL); if (!prog_stack) { parse_error(pe, -ENOMEM, 0); goto out_free; @@ -576,7 +576,11 @@ predicate_parse(const char *str, int nr_parens, int nr_preds, out_free: kfree(op_stack); kfree(inverts); - kfree(prog_stack); + if (prog_stack) { + for (i = 0; prog_stack[i].pred; i++) + kfree(prog_stack[i].pred); + kfree(prog_stack); + } return ERR_PTR(ret); } diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 11853e90b64972..3f34cfb66a85f2 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1632,6 +1632,9 @@ static u64 hist_field_var_ref(struct hist_field *hist_field, struct hist_elt_data *elt_data; u64 var_val = 0; + if (WARN_ON_ONCE(!elt)) + return var_val; + elt_data = elt->private_data; var_val = elt_data->var_ref_vals[hist_field->var_ref_idx]; diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c index 810d78a8d14c76..2905a3dd94c1db 100644 --- a/kernel/trace/trace_kdb.c +++ b/kernel/trace/trace_kdb.c @@ -41,12 +41,8 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) kdb_printf("Dumping ftrace buffer:\n"); - /* reset all but tr, trace, and overruns */ - memset(&iter.seq, 0, - sizeof(struct trace_iterator) - - offsetof(struct trace_iterator, seq)); + trace_iterator_reset(&iter); iter.iter_flags |= TRACE_FILE_LAT_FMT; - iter.pos = -1; if (cpu_file == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) { diff --git a/lib/Makefile b/lib/Makefile index 42387644681094..0ab808318202c1 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -17,6 +17,17 @@ KCOV_INSTRUMENT_list_debug.o := n KCOV_INSTRUMENT_debugobjects.o := n KCOV_INSTRUMENT_dynamic_debug.o := n +# Early boot use of cmdline, don't instrument it +ifdef CONFIG_AMD_MEM_ENCRYPT +KASAN_SANITIZE_string.o := n + +ifdef CONFIG_FUNCTION_TRACER +CFLAGS_REMOVE_string.o = -pg +endif + +CFLAGS_string.o := $(call cc-option, -fno-stack-protector) +endif + lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o timerqueue.o\ idr.o int_sqrt.o extable.o \ diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index c7c96bc7654af7..dbf2b457e47e6a 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -188,7 +188,7 @@ static int ddebug_change(const struct ddebug_query *query, newflags = (dp->flags & mask) | flags; if (newflags == dp->flags) continue; -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL if (dp->flags & _DPRINTK_FLAGS_PRINT) { if (!(flags & _DPRINTK_FLAGS_PRINT)) static_branch_disable(&dp->key.dd_key_true); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 8be175df30753c..acd7b97c16f242 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -817,8 +817,21 @@ EXPORT_SYMBOL(_copy_from_iter_full_nocache); static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) { - struct page *head = compound_head(page); - size_t v = n + offset + page_address(page) - page_address(head); + struct page *head; + size_t v = n + offset; + + /* + * The general case needs to access the page order in order + * to compute the page size. + * However, we mostly deal with order-0 pages and thus can + * avoid a possible cache line miss for requests that fit all + * page orders. + */ + if (n <= v && v <= PAGE_SIZE) + return true; + + head = compound_head(page); + v += (page - head) << PAGE_SHIFT; if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head)))) return true; diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 63d0816ab23b09..7761f329433915 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -464,6 +464,13 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, int i = 0; int retval = 0; + /* + * Mark "remove" event done regardless of result, for some subsystems + * do not want to re-trigger "remove" event via automatic cleanup. + */ + if (action == KOBJ_REMOVE) + kobj->state_remove_uevent_sent = 1; + pr_debug("kobject: '%s' (%p): %s\n", kobject_name(kobj), kobj, __func__); @@ -565,10 +572,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, kobj->state_add_uevent_sent = 1; break; - case KOBJ_REMOVE: - kobj->state_remove_uevent_sent = 1; - break; - case KOBJ_UNBIND: zap_modalias_env(env); break; diff --git a/lib/sbitmap.c b/lib/sbitmap.c index fdd1b8aa8ac63c..0572ac340325a6 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -356,7 +356,7 @@ static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, * to ensure that the batch size is updated before the wait * counts. */ - smp_mb__before_atomic(); + smp_mb(); for (i = 0; i < SBQ_WAIT_QUEUES; i++) atomic_set(&sbq->ws[i].wait_cnt, 1); } diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index b53e1b5d80f429..e304b54c9c7dd3 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c @@ -23,10 +23,11 @@ * hit it), 'max' is the address space maximum (and we return * -EFAULT if we hit it). */ -static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max) +static inline long do_strncpy_from_user(char *dst, const char __user *src, + unsigned long count, unsigned long max) { const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; - long res = 0; + unsigned long res = 0; /* * Truncate 'max' to the user-specified limit, so that diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 60d0bbda8f5e58..184f80f7bacfa9 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -28,7 +28,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max) { const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; - long align, res = 0; + unsigned long align, res = 0; unsigned long c; /* @@ -42,7 +42,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, * Do everything aligned. But that means that we * need to also expand the maximum.. */ - align = (sizeof(long) - 1) & (unsigned long)src; + align = (sizeof(unsigned long) - 1) & (unsigned long)src; src -= align; max += align; diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 7cab9a9869ace9..fd48a15a0710c7 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -223,30 +223,30 @@ static ssize_t config_show(struct device *dev, mutex_lock(&test_fw_mutex); - len += snprintf(buf, PAGE_SIZE, + len += scnprintf(buf, PAGE_SIZE - len, "Custom trigger configuration for: %s\n", dev_name(dev)); if (test_fw_config->name) - len += snprintf(buf+len, PAGE_SIZE, + len += scnprintf(buf+len, PAGE_SIZE - len, "name:\t%s\n", test_fw_config->name); else - len += snprintf(buf+len, PAGE_SIZE, + len += scnprintf(buf+len, PAGE_SIZE - len, "name:\tEMTPY\n"); - len += snprintf(buf+len, PAGE_SIZE, + len += scnprintf(buf+len, PAGE_SIZE - len, "num_requests:\t%u\n", test_fw_config->num_requests); - len += snprintf(buf+len, PAGE_SIZE, + len += scnprintf(buf+len, PAGE_SIZE - len, "send_uevent:\t\t%s\n", test_fw_config->send_uevent ? "FW_ACTION_HOTPLUG" : "FW_ACTION_NOHOTPLUG"); - len += snprintf(buf+len, PAGE_SIZE, + len += scnprintf(buf+len, PAGE_SIZE - len, "sync_direct:\t\t%s\n", test_fw_config->sync_direct ? "true" : "false"); - len += snprintf(buf+len, PAGE_SIZE, + len += scnprintf(buf+len, PAGE_SIZE - len, "read_fw_idx:\t%u\n", test_fw_config->read_fw_idx); mutex_unlock(&test_fw_mutex); diff --git a/mm/Kconfig b/mm/Kconfig index de64ea658716a8..b457e94ae6182a 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -700,12 +700,12 @@ config DEV_PAGEMAP_OPS config HMM bool + select MMU_NOTIFIER select MIGRATE_VMA_HELPER config HMM_MIRROR bool "HMM mirror CPU page table into a device page table" depends on ARCH_HAS_HMM - select MMU_NOTIFIER select HMM help Select HMM_MIRROR if you want to mirror range of the CPU page table of a diff --git a/mm/cma.c b/mm/cma.c index bfe9f5397165c7..476dfe13a701f7 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -106,8 +106,10 @@ static int __init cma_activate_area(struct cma *cma) cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); - if (!cma->bitmap) + if (!cma->bitmap) { + cma->count = 0; return -ENOMEM; + } WARN_ON_ONCE(!pfn_valid(pfn)); zone = page_zone(pfn_to_page(pfn)); @@ -369,23 +371,26 @@ int __init cma_declare_contiguous(phys_addr_t base, #ifdef CONFIG_CMA_DEBUG static void cma_debug_show_areas(struct cma *cma) { - unsigned long next_zero_bit, next_set_bit; + unsigned long next_zero_bit, next_set_bit, nr_zero; unsigned long start = 0; - unsigned int nr_zero, nr_total = 0; + unsigned long nr_part, nr_total = 0; + unsigned long nbits = cma_bitmap_maxno(cma); mutex_lock(&cma->lock); pr_info("number of available pages: "); for (;;) { - next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start); - if (next_zero_bit >= cma->count) + next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); + if (next_zero_bit >= nbits) break; - next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit); + next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit); nr_zero = next_set_bit - next_zero_bit; - pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit); - nr_total += nr_zero; + nr_part = nr_zero << cma->order_per_bit; + pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part, + next_zero_bit); + nr_total += nr_part; start = next_zero_bit + nr_zero; } - pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count); + pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); mutex_unlock(&cma->lock); } #else diff --git a/mm/cma_debug.c b/mm/cma_debug.c index ad6723e9d110a9..3e0415076cc9ef 100644 --- a/mm/cma_debug.c +++ b/mm/cma_debug.c @@ -58,7 +58,7 @@ static int cma_maxchunk_get(void *data, u64 *val) mutex_lock(&cma->lock); for (;;) { start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end); - if (start >= cma->count) + if (start >= bitmap_maxno) break; end = find_next_bit(cma->bitmap, bitmap_maxno, start); maxchunk = max(end - start, maxchunk); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 7d08e89361ee8e..6fad1864ba03bc 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -772,11 +772,13 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, pte_free(mm, pgtable); } -vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd, pfn_t pfn, bool write) +vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) { + unsigned long addr = vmf->address & PMD_MASK; + struct vm_area_struct *vma = vmf->vma; pgprot_t pgprot = vma->vm_page_prot; pgtable_t pgtable = NULL; + /* * If we had pmd_special, we could avoid all these restrictions, * but we need to be consistent with PTEs and architectures that @@ -799,7 +801,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, track_pfn_insert(vma, &pgprot, pfn); - insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable); + insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); return VM_FAULT_NOPAGE; } EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); @@ -848,10 +850,12 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, spin_unlock(ptl); } -vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, - pud_t *pud, pfn_t pfn, bool write) +vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) { + unsigned long addr = vmf->address & PUD_MASK; + struct vm_area_struct *vma = vmf->vma; pgprot_t pgprot = vma->vm_page_prot; + /* * If we had pud_special, we could avoid all these restrictions, * but we need to be consistent with PTEs and architectures that @@ -868,7 +872,7 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, track_pfn_insert(vma, &pgprot, pfn); - insert_pfn_pud(vma, addr, pud, pfn, pgprot, write); + insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write); return VM_FAULT_NOPAGE; } EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 5fb779cda972cc..65179513c2b251 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1256,12 +1256,23 @@ void free_huge_page(struct page *page) ClearPagePrivate(page); /* - * A return code of zero implies that the subpool will be under its - * minimum size if the reservation is not restored after page is free. - * Therefore, force restore_reserve operation. + * If PagePrivate() was set on page, page allocation consumed a + * reservation. If the page was associated with a subpool, there + * would have been a page reserved in the subpool before allocation + * via hugepage_subpool_get_pages(). Since we are 'restoring' the + * reservtion, do not call hugepage_subpool_put_pages() as this will + * remove the reserved page from the subpool. */ - if (hugepage_subpool_put_pages(spool, 1) == 0) - restore_reserve = true; + if (!restore_reserve) { + /* + * A return code of zero implies that the subpool will be + * under its minimum size if the reservation is not restored + * after page is free. Therefore, force restore_reserve + * operation. + */ + if (hugepage_subpool_put_pages(spool, 1) == 0) + restore_reserve = true; + } spin_lock(&hugetlb_lock); clear_page_huge_active(page); @@ -1572,8 +1583,9 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, */ if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { SetPageHugeTemporary(page); + spin_unlock(&hugetlb_lock); put_page(page); - page = NULL; + return NULL; } else { h->surplus_huge_pages++; h->surplus_huge_pages_node[page_to_nid(page)]++; @@ -3777,8 +3789,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, * handling userfault. Reacquire after handling * fault to make calling code simpler. */ - hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, - idx, haddr); + hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr); mutex_unlock(&hugetlb_fault_mutex_table[hash]); ret = handle_userfault(&vmf, VM_UFFD_MISSING); mutex_lock(&hugetlb_fault_mutex_table[hash]); @@ -3886,21 +3897,14 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, } #ifdef CONFIG_SMP -u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, - struct vm_area_struct *vma, - struct address_space *mapping, +u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, pgoff_t idx, unsigned long address) { unsigned long key[2]; u32 hash; - if (vma->vm_flags & VM_SHARED) { - key[0] = (unsigned long) mapping; - key[1] = idx; - } else { - key[0] = (unsigned long) mm; - key[1] = address >> huge_page_shift(h); - } + key[0] = (unsigned long) mapping; + key[1] = idx; hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0); @@ -3911,9 +3915,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, * For uniprocesor systems we always use a single mutex, so just * return 0 and avoid the hashing overhead. */ -u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, - struct vm_area_struct *vma, - struct address_space *mapping, +u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, pgoff_t idx, unsigned long address) { return 0; @@ -3958,7 +3960,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, * get spurious allocation failures if two CPUs race to instantiate * the same page in the page cache. */ - hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr); + hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr); mutex_lock(&hugetlb_fault_mutex_table[hash]); entry = huge_ptep_get(ptep); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index fde5820be24d8a..ecefdba4b0ddae 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1005,6 +1005,9 @@ static void collapse_huge_page(struct mm_struct *mm, * handled by the anon_vma lock + PG_lock. */ down_write(&mm->mmap_sem); + result = SCAN_ANY_PROCESS; + if (!mmget_still_valid(mm)) + goto out; result = hugepage_vma_revalidate(mm, address, &vma); if (result) goto out; diff --git a/mm/list_lru.c b/mm/list_lru.c index 5b30625fd3651a..758653dd144314 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -37,11 +37,7 @@ static int lru_shrinker_id(struct list_lru *lru) static inline bool list_lru_memcg_aware(struct list_lru *lru) { - /* - * This needs node 0 to be always present, even - * in the systems supporting sparse numa ids. - */ - return !!lru->node[0].memcg_lrus; + return lru->memcg_aware; } static inline struct list_lru_one * @@ -357,7 +353,7 @@ static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus, } return 0; fail: - __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1); + __memcg_destroy_list_lru_node(memcg_lrus, begin, i); return -ENOMEM; } @@ -451,6 +447,8 @@ static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) { int i; + lru->memcg_aware = memcg_aware; + if (!memcg_aware) return 0; diff --git a/mm/mincore.c b/mm/mincore.c index fc37afe226e658..2732c8c0764c19 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -169,6 +169,22 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, return 0; } +static inline bool can_do_mincore(struct vm_area_struct *vma) +{ + if (vma_is_anonymous(vma)) + return true; + if (!vma->vm_file) + return false; + /* + * Reveal pagecache information only for non-anonymous mappings that + * correspond to the files the calling process could (if tried) open + * for writing; otherwise we'd be including shared non-exclusive + * mappings, which opens a side channel. + */ + return inode_owner_or_capable(file_inode(vma->vm_file)) || + inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0; +} + /* * Do a chunk of "sys_mincore()". We've already checked * all the arguments, we hold the mmap semaphore: we should @@ -189,8 +205,13 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v vma = find_vma(current->mm, addr); if (!vma || addr < vma->vm_start) return -ENOMEM; - mincore_walk.mm = vma->vm_mm; end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); + if (!can_do_mincore(vma)) { + unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE); + memset(vec, 1, pages); + return pages; + } + mincore_walk.mm = vma->vm_mm; err = walk_page_range(addr, end, &mincore_walk); if (err < 0) return err; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8e6932a140b827..2d04bd2e1ced75 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5937,13 +5937,15 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid, unsigned long *zone_end_pfn, unsigned long *ignored) { + unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; + unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; /* When hotadd a new node from cpu_up(), the node should be empty */ if (!node_start_pfn && !node_end_pfn) return 0; /* Get the start and end of the zone */ - *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; - *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; + *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); + *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); adjust_zone_range_for_zone_movable(nid, zone_type, node_start_pfn, node_end_pfn, zone_start_pfn, zone_end_pfn); diff --git a/mm/percpu.c b/mm/percpu.c index 41e58f3d8fbf22..ff76fa0b752883 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -988,7 +988,8 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits, /* * Search to find a fit. */ - end = start + alloc_bits + PCPU_BITMAP_BLOCK_BITS; + end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS, + pcpu_chunk_map_bits(chunk)); bit_off = bitmap_find_next_zero_area(chunk->alloc_map, end, start, alloc_bits, align_mask); if (bit_off >= end) @@ -1721,6 +1722,7 @@ void free_percpu(void __percpu *ptr) struct pcpu_chunk *chunk; unsigned long flags; int off; + bool need_balance = false; if (!ptr) return; @@ -1742,7 +1744,7 @@ void free_percpu(void __percpu *ptr) list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) if (pos != chunk) { - pcpu_schedule_balance_work(); + need_balance = true; break; } } @@ -1750,6 +1752,9 @@ void free_percpu(void __percpu *ptr) trace_percpu_free_percpu(chunk->base_addr, off, ptr); spin_unlock_irqrestore(&pcpu_lock, flags); + + if (need_balance) + pcpu_schedule_balance_work(); } EXPORT_SYMBOL_GPL(free_percpu); diff --git a/mm/rmap.c b/mm/rmap.c index 85b7f94233526a..f048c2651954b6 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -926,7 +926,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, continue; flush_cache_page(vma, address, page_to_pfn(page)); - entry = pmdp_huge_clear_flush(vma, address, pmd); + entry = pmdp_invalidate(vma, address, pmd); entry = pmd_wrprotect(entry); entry = pmd_mkclean(entry); set_pmd_at(vma->vm_mm, address, pmd, entry); diff --git a/mm/slab.c b/mm/slab.c index 018d32496e8d13..46f21e73db2f8f 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4326,8 +4326,12 @@ static int leaks_show(struct seq_file *m, void *p) * whole processing. */ do { - set_store_user_clean(cachep); drain_cpu_caches(cachep); + /* + * drain_cpu_caches() could make kmemleak_object and + * debug_objects_cache dirty, so reset afterwards. + */ + set_store_user_clean(cachep); x[1] = 0; diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 458acda96f2075..7529d3fcc89990 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -271,8 +271,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, */ idx = linear_page_index(dst_vma, dst_addr); mapping = dst_vma->vm_file->f_mapping; - hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping, - idx, dst_addr); + hash = hugetlb_fault_mutex_hash(h, mapping, idx, dst_addr); mutex_lock(&hugetlb_fault_mutex_table[hash]); err = -ENOMEM; diff --git a/mm/vmscan.c b/mm/vmscan.c index ee545d1e9894de..dec88fcf887651 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1510,7 +1510,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, list_for_each_entry_safe(page, next, page_list, lru) { if (page_is_file_cache(page) && !PageDirty(page) && - !__PageMovable(page)) { + !__PageMovable(page) && !PageUnevictable(page)) { ClearPageActive(page); list_move(&page->lru, &clean_pages); } diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c index 66f74c85cf6bd1..66d54fc11831c0 100644 --- a/net/ax25/ax25_route.c +++ b/net/ax25/ax25_route.c @@ -429,9 +429,11 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) } if (ax25->sk != NULL) { + local_bh_disable(); bh_lock_sock(ax25->sk); sock_reset_flag(ax25->sk, SOCK_ZAPPED); bh_unlock_sock(ax25->sk); + local_bh_enable(); } put: diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index a60bacf7120be8..2895e3b26e9304 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -1394,7 +1394,6 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, hw_src, &ip_src, hw_dst, &ip_dst, dat_entry->mac_addr, &dat_entry->ip); dropped = true; - goto out; } /* Update our internal cache with both the IP addresses the node got @@ -1403,6 +1402,9 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid); batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid); + if (dropped) + goto out; + /* If BLA is enabled, only forward ARP replies if we have claimed the * source of the ARP reply or if no one else of the same backbone has * already claimed that client. This prevents that different gateways diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 69c0d85bceb3e0..79b8a2d8793e8b 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -160,6 +160,7 @@ int batadv_mesh_init(struct net_device *soft_iface) spin_lock_init(&bat_priv->tt.commit_lock); spin_lock_init(&bat_priv->gw.list_lock); #ifdef CONFIG_BATMAN_ADV_MCAST + spin_lock_init(&bat_priv->mcast.mla_lock); spin_lock_init(&bat_priv->mcast.want_lists_lock); #endif spin_lock_init(&bat_priv->tvlv.container_list_lock); diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index 86725d792e1550..b90fe25d6b0b11 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -325,8 +325,6 @@ static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list) * translation table except the ones listed in the given mcast_list. * * If mcast_list is NULL then all are retracted. - * - * Do not call outside of the mcast worker! (or cancel mcast worker first) */ static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv, struct hlist_head *mcast_list) @@ -334,8 +332,6 @@ static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv, struct batadv_hw_addr *mcast_entry; struct hlist_node *tmp; - WARN_ON(delayed_work_pending(&bat_priv->mcast.work)); - hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list, list) { if (mcast_list && @@ -359,8 +355,6 @@ static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv, * * Adds multicast listener announcements from the given mcast_list to the * translation table if they have not been added yet. - * - * Do not call outside of the mcast worker! (or cancel mcast worker first) */ static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv, struct hlist_head *mcast_list) @@ -368,8 +362,6 @@ static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv, struct batadv_hw_addr *mcast_entry; struct hlist_node *tmp; - WARN_ON(delayed_work_pending(&bat_priv->mcast.work)); - if (!mcast_list) return; @@ -658,7 +650,10 @@ static void batadv_mcast_mla_update(struct work_struct *work) priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work); bat_priv = container_of(priv_mcast, struct batadv_priv, mcast); + spin_lock(&bat_priv->mcast.mla_lock); __batadv_mcast_mla_update(bat_priv); + spin_unlock(&bat_priv->mcast.mla_lock); + batadv_mcast_start_timer(bat_priv); } diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 343d304851a5c9..eeee3e61c625df 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -1215,6 +1215,11 @@ struct batadv_priv_mcast { /** @bridged: whether the soft interface has a bridge on top */ unsigned char bridged:1; + /** + * @mla_lock: a lock protecting mla_list and mla_flags + */ + spinlock_t mla_lock; + /** * @num_want_all_unsnoopables: number of nodes wanting unsnoopable IP * traffic diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 3cf0764d5793f4..15d1cb5aee18da 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -1276,14 +1276,6 @@ int hci_conn_check_link_mode(struct hci_conn *conn) !test_bit(HCI_CONN_ENCRYPT, &conn->flags)) return 0; - /* The minimum encryption key size needs to be enforced by the - * host stack before establishing any L2CAP connections. The - * specification in theory allows a minimum of 1, but to align - * BR/EDR and LE transports, a minimum of 7 is chosen. - */ - if (conn->enc_key_size < HCI_MIN_ENC_KEY_SIZE) - return 0; - return 1; } @@ -1400,8 +1392,16 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type, return 0; encrypt: - if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) + if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) { + /* Ensure that the encryption key size has been read, + * otherwise stall the upper layer responses. + */ + if (!conn->enc_key_size) + return 0; + + /* Nothing else needed, all requirements are met */ return 1; + } hci_conn_encrypt(conn); return 0; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index a06f0304771734..5afd67ef797a63 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -4274,6 +4274,9 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, return; } + /* If we reach this point this event matches the last command sent */ + hci_dev_clear_flag(hdev, HCI_CMD_PENDING); + /* If the command succeeded and there's still more commands in * this request the request is not yet complete. */ @@ -4384,6 +4387,8 @@ static void hci_cmd_work(struct work_struct *work) hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); if (hdev->sent_cmd) { + if (hci_req_status_pend(hdev)) + hci_dev_set_flag(hdev, HCI_CMD_PENDING); atomic_dec(&hdev->cmd_cnt); hci_send_frame(hdev, skb); if (test_bit(HCI_RESET, &hdev->flags)) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 7f800c3480f7a1..3e7badb3ac2d50 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -3357,6 +3357,12 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, hci_req_cmd_complete(hdev, *opcode, *status, req_complete, req_complete_skb); + if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { + bt_dev_err(hdev, + "unexpected event for opcode 0x%4.4x", *opcode); + return; + } + if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) queue_work(hdev->workqueue, &hdev->cmd_work); } @@ -3464,6 +3470,12 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb, hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete, req_complete_skb); + if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { + bt_dev_err(hdev, + "unexpected event for opcode 0x%4.4x", *opcode); + return; + } + if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) queue_work(hdev->workqueue, &hdev->cmd_work); } diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index e8c9ef1e192273..9448ebd3780a37 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -46,6 +46,11 @@ void hci_req_purge(struct hci_request *req) skb_queue_purge(&req->cmd_q); } +bool hci_req_status_pend(struct hci_dev *hdev) +{ + return hdev->req_status == HCI_REQ_PEND; +} + static int req_run(struct hci_request *req, hci_req_complete_t complete, hci_req_complete_skb_t complete_skb) { diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h index 692cc8b133682a..55b2050cc9ff05 100644 --- a/net/bluetooth/hci_request.h +++ b/net/bluetooth/hci_request.h @@ -37,6 +37,7 @@ struct hci_request { void hci_req_init(struct hci_request *req, struct hci_dev *hdev); void hci_req_purge(struct hci_request *req); +bool hci_req_status_pend(struct hci_dev *hdev); int hci_req_run(struct hci_request *req, hci_req_complete_t complete); int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete); void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 2c6eabf294b31c..69e3be51a2c331 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -1340,6 +1340,21 @@ static void l2cap_request_info(struct l2cap_conn *conn) sizeof(req), &req); } +static bool l2cap_check_enc_key_size(struct hci_conn *hcon) +{ + /* The minimum encryption key size needs to be enforced by the + * host stack before establishing any L2CAP connections. The + * specification in theory allows a minimum of 1, but to align + * BR/EDR and LE transports, a minimum of 7 is chosen. + * + * This check might also be called for unencrypted connections + * that have no key size requirements. Ensure that the link is + * actually encrypted before enforcing a key size. + */ + return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || + hcon->enc_key_size > HCI_MIN_ENC_KEY_SIZE); +} + static void l2cap_do_start(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; @@ -1357,9 +1372,14 @@ static void l2cap_do_start(struct l2cap_chan *chan) if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) return; - if (l2cap_chan_check_security(chan, true) && - __l2cap_no_conn_pending(chan)) + if (!l2cap_chan_check_security(chan, true) || + !__l2cap_no_conn_pending(chan)) + return; + + if (l2cap_check_enc_key_size(conn->hcon)) l2cap_start_connection(chan); + else + __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); } static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask) @@ -1438,7 +1458,10 @@ static void l2cap_conn_start(struct l2cap_conn *conn) continue; } - l2cap_start_connection(chan); + if (l2cap_check_enc_key_size(conn->hcon)) + l2cap_start_connection(chan); + else + l2cap_chan_close(chan, ECONNREFUSED); } else if (chan->state == BT_CONNECT2) { struct l2cap_conn_rsp rsp; @@ -7455,7 +7478,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) } if (chan->state == BT_CONNECT) { - if (!status) + if (!status && l2cap_check_enc_key_size(hcon)) l2cap_start_connection(chan); else __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); @@ -7464,7 +7487,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) struct l2cap_conn_rsp rsp; __u16 res, stat; - if (!status) { + if (!status && l2cap_check_enc_key_size(hcon)) { if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { res = L2CAP_CR_PEND; stat = L2CAP_CS_AUTHOR_PEND; diff --git a/net/can/af_can.c b/net/can/af_can.c index 1684ba5b51eb6c..e386d654116dd6 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -105,6 +105,7 @@ EXPORT_SYMBOL(can_ioctl); static void can_sock_destruct(struct sock *sk) { skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_error_queue); } static const struct can_proto *can_get_proto(int protocol) diff --git a/net/core/dev.c b/net/core/dev.c index 3bcec116a5f2ca..138951d286432b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1821,7 +1821,7 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue); #endif static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key); -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL static atomic_t netstamp_needed_deferred; static atomic_t netstamp_wanted; static void netstamp_clear(struct work_struct *work) @@ -1840,7 +1840,7 @@ static DECLARE_WORK(netstamp_work, netstamp_clear); void net_enable_timestamp(void) { -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL int wanted; while (1) { @@ -1860,7 +1860,7 @@ EXPORT_SYMBOL(net_enable_timestamp); void net_disable_timestamp(void) { -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL int wanted; while (1) { @@ -5725,7 +5725,6 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi) skb_reset_mac_header(skb); skb_gro_reset_offset(skb); - eth = skb_gro_header_fast(skb, 0); if (unlikely(skb_gro_header_hard(skb, hlen))) { eth = skb_gro_header_slow(skb, hlen, 0); if (unlikely(!eth)) { @@ -5735,6 +5734,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi) return NULL; } } else { + eth = (const struct ethhdr *)skb->data; gro_pull_from_frag0(skb, hlen); NAPI_GRO_CB(skb)->frag0 += hlen; NAPI_GRO_CB(skb)->frag0_len -= hlen; @@ -8716,7 +8716,7 @@ static void netdev_wait_allrefs(struct net_device *dev) refcnt = netdev_refcnt_read(dev); - if (time_after(jiffies, warning_time + 10 * HZ)) { + if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) { pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", dev->name, refcnt); warning_time = jiffies; diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 7cc97f43f13859..996813f345d545 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -880,8 +880,13 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, if (rc >= 0) info.n_priv_flags = rc; } - if (ops->get_regs_len) - info.regdump_len = ops->get_regs_len(dev); + if (ops->get_regs_len) { + int ret = ops->get_regs_len(dev); + + if (ret > 0) + info.regdump_len = ret; + } + if (ops->get_eeprom_len) info.eedump_len = ops->get_eeprom_len(dev); @@ -1424,6 +1429,9 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) return -EFAULT; reglen = ops->get_regs_len(dev); + if (reglen <= 0) + return reglen; + if (regs.len > reglen) regs.len = reglen; @@ -1434,13 +1442,16 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) return -ENOMEM; } + if (regs.len < reglen) + reglen = regs.len; + ops->get_regs(dev, ®s, regbuf); ret = -EFAULT; if (copy_to_user(useraddr, ®s, sizeof(regs))) goto out; useraddr += offsetof(struct ethtool_regs, data); - if (regbuf && copy_to_user(useraddr, regbuf, regs.len)) + if (copy_to_user(useraddr, regbuf, reglen)) goto out; ret = 0; diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 338147b14d0e7c..0ff3953f64aa78 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -756,9 +756,9 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, if (err) goto errout; - if (rule_exists(ops, frh, tb, rule)) { - if (nlh->nlmsg_flags & NLM_F_EXCL) - err = -EEXIST; + if ((nlh->nlmsg_flags & NLM_F_EXCL) && + rule_exists(ops, frh, tb, rule)) { + err = -EEXIST; goto errout_free; } diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 4e07824eec5e00..cd9e991f21d734 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -2536,7 +2537,13 @@ int neigh_xmit(int index, struct net_device *dev, if (!tbl) goto out; rcu_read_lock_bh(); - neigh = __neigh_lookup_noref(tbl, addr, dev); + if (index == NEIGH_ARP_TABLE) { + u32 key = *((u32 *)addr); + + neigh = __ipv4_neigh_lookup_noref(dev, key); + } else { + neigh = __neigh_lookup_noref(tbl, addr, dev); + } if (!neigh) neigh = __neigh_create(tbl, addr, dev, false); err = PTR_ERR(neigh); @@ -2744,6 +2751,7 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos) } void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags) + __acquires(tbl->lock) __acquires(rcu_bh) { struct neigh_seq_state *state = seq->private; @@ -2754,6 +2762,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl rcu_read_lock_bh(); state->nht = rcu_dereference_bh(tbl->nht); + read_lock(&tbl->lock); return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN; } @@ -2787,8 +2796,13 @@ void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos) EXPORT_SYMBOL(neigh_seq_next); void neigh_seq_stop(struct seq_file *seq, void *v) + __releases(tbl->lock) __releases(rcu_bh) { + struct neigh_seq_state *state = seq->private; + struct neigh_table *tbl = state->tbl; + + read_unlock(&tbl->lock); rcu_read_unlock_bh(); } EXPORT_SYMBOL(neigh_seq_stop); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 7f6938405fa192..092fa3d75b32a8 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3065,7 +3065,13 @@ static int pktgen_wait_thread_run(struct pktgen_thread *t) { while (thread_is_running(t)) { + /* note: 't' will still be around even after the unlock/lock + * cycle because pktgen_thread threads are only cleared at + * net exit + */ + mutex_unlock(&pktgen_thread_lock); msleep_interruptible(100); + mutex_lock(&pktgen_thread_lock); if (signal_pending(current)) goto signal; @@ -3080,6 +3086,10 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn) struct pktgen_thread *t; int sig = 1; + /* prevent from racing with rmmod */ + if (!try_module_get(THIS_MODULE)) + return sig; + mutex_lock(&pktgen_thread_lock); list_for_each_entry(t, &pn->pktgen_threads, th_list) { @@ -3093,6 +3103,7 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn) t->control |= (T_STOP); mutex_unlock(&pktgen_thread_lock); + module_put(THIS_MODULE); return sig; } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index ebde98b565e9dd..3932eed379a40f 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1496,14 +1496,15 @@ static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev) return ret; } -static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev) +static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev, + bool force) { int ifindex = dev_get_iflink(dev); - if (dev->ifindex == ifindex) - return 0; + if (force || dev->ifindex != ifindex) + return nla_put_u32(skb, IFLA_LINK, ifindex); - return nla_put_u32(skb, IFLA_LINK, ifindex); + return 0; } static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb, @@ -1520,6 +1521,8 @@ static int rtnl_fill_link_netnsid(struct sk_buff *skb, const struct net_device *dev, struct net *src_net) { + bool put_iflink = false; + if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) { struct net *link_net = dev->rtnl_link_ops->get_link_net(dev); @@ -1528,10 +1531,12 @@ static int rtnl_fill_link_netnsid(struct sk_buff *skb, if (nla_put_s32(skb, IFLA_LINK_NETNSID, id)) return -EMSGSIZE; + + put_iflink = true; } } - return 0; + return nla_put_iflink(skb, dev, put_iflink); } static int rtnl_fill_link_af(struct sk_buff *skb, @@ -1617,7 +1622,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, #ifdef CONFIG_RPS nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) || #endif - nla_put_iflink(skb, dev) || put_master_ifindex(skb, dev) || nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || (dev->qdisc && diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index b1a2c5e38530a7..37b4667128a380 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -279,7 +279,6 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write, return ret; } -# ifdef CONFIG_HAVE_EBPF_JIT static int proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, @@ -290,7 +289,6 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write, return proc_dointvec_minmax(table, write, buffer, lenp, ppos); } -# endif #endif static struct ctl_table net_core_table[] = { @@ -397,6 +395,14 @@ static struct ctl_table net_core_table[] = { .extra2 = &one, }, # endif + { + .procname = "bpf_jit_limit", + .data = &bpf_jit_limit, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec_minmax_bpf_restricted, + .extra1 = &one, + }, #endif { .procname = "netdev_tstamp_prequeue", diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 12a43a5369a54d..114f9def1ec548 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -223,7 +223,7 @@ static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto) tail[plen - 1] = proto; } -static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) +static int esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) { int encap_type; struct udphdr *uh; @@ -231,6 +231,7 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru __be16 sport, dport; struct xfrm_encap_tmpl *encap = x->encap; struct ip_esp_hdr *esph = esp->esph; + unsigned int len; spin_lock_bh(&x->lock); sport = encap->encap_sport; @@ -238,11 +239,14 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru encap_type = encap->encap_type; spin_unlock_bh(&x->lock); + len = skb->len + esp->tailen - skb_transport_offset(skb); + if (len + sizeof(struct iphdr) >= IP_MAX_MTU) + return -EMSGSIZE; + uh = (struct udphdr *)esph; uh->source = sport; uh->dest = dport; - uh->len = htons(skb->len + esp->tailen - - skb_transport_offset(skb)); + uh->len = htons(len); uh->check = 0; switch (encap_type) { @@ -259,6 +263,8 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru *skb_mac_header(skb) = IPPROTO_UDP; esp->esph = esph; + + return 0; } int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) @@ -272,8 +278,12 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * int tailen = esp->tailen; /* this is non-NULL only with UDP Encapsulation */ - if (x->encap) - esp_output_udp_encap(x, skb, esp); + if (x->encap) { + int err = esp_output_udp_encap(x, skb, esp); + + if (err < 0) + return err; + } if (!skb_cloned(skb)) { if (tailen <= skb_tailroom(skb)) { diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 4da39446da2d89..d187ee8156a19f 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -190,6 +190,17 @@ static void ip_ma_put(struct ip_mc_list *im) pmc != NULL; \ pmc = rtnl_dereference(pmc->next_rcu)) +static void ip_sf_list_clear_all(struct ip_sf_list *psf) +{ + struct ip_sf_list *next; + + while (psf) { + next = psf->sf_next; + kfree(psf); + psf = next; + } +} + #ifdef CONFIG_IP_MULTICAST /* @@ -635,6 +646,13 @@ static void igmpv3_clear_zeros(struct ip_sf_list **ppsf) } } +static void kfree_pmc(struct ip_mc_list *pmc) +{ + ip_sf_list_clear_all(pmc->sources); + ip_sf_list_clear_all(pmc->tomb); + kfree(pmc); +} + static void igmpv3_send_cr(struct in_device *in_dev) { struct ip_mc_list *pmc, *pmc_prev, *pmc_next; @@ -671,7 +689,7 @@ static void igmpv3_send_cr(struct in_device *in_dev) else in_dev->mc_tomb = pmc_next; in_dev_put(pmc->interface); - kfree(pmc); + kfree_pmc(pmc); } else pmc_prev = pmc; } @@ -1201,14 +1219,18 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) im->interface = pmc->interface; if (im->sfmode == MCAST_INCLUDE) { im->tomb = pmc->tomb; + pmc->tomb = NULL; + im->sources = pmc->sources; + pmc->sources = NULL; + for (psf = im->sources; psf; psf = psf->sf_next) psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; } else { im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; } in_dev_put(pmc->interface); - kfree(pmc); + kfree_pmc(pmc); } spin_unlock_bh(&im->lock); } @@ -1229,21 +1251,18 @@ static void igmpv3_clear_delrec(struct in_device *in_dev) nextpmc = pmc->next; ip_mc_clear_src(pmc); in_dev_put(pmc->interface); - kfree(pmc); + kfree_pmc(pmc); } /* clear dead sources, too */ rcu_read_lock(); for_each_pmc_rcu(in_dev, pmc) { - struct ip_sf_list *psf, *psf_next; + struct ip_sf_list *psf; spin_lock_bh(&pmc->lock); psf = pmc->tomb; pmc->tomb = NULL; spin_unlock_bh(&pmc->lock); - for (; psf; psf = psf_next) { - psf_next = psf->sf_next; - kfree(psf); - } + ip_sf_list_clear_all(psf); } rcu_read_unlock(); } @@ -2114,7 +2133,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, static void ip_mc_clear_src(struct ip_mc_list *pmc) { - struct ip_sf_list *psf, *nextpsf, *tomb, *sources; + struct ip_sf_list *tomb, *sources; spin_lock_bh(&pmc->lock); tomb = pmc->tomb; @@ -2126,14 +2145,8 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc) pmc->sfcount[MCAST_EXCLUDE] = 1; spin_unlock_bh(&pmc->lock); - for (psf = tomb; psf; psf = nextpsf) { - nextpsf = psf->sf_next; - kfree(psf); - } - for (psf = sources; psf; psf = nextpsf) { - nextpsf = psf->sf_next; - kfree(psf); - } + ip_sf_list_clear_all(tomb); + ip_sf_list_clear_all(sources); } /* Join a multicast group diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 40a7cd56e00873..808f8d15c5197c 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -659,9 +659,9 @@ static int __init vti_init(void) return err; rtnl_link_failed: - xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP); -xfrm_tunnel_failed: xfrm4_tunnel_deregister(&ipip_handler, AF_INET); +xfrm_tunnel_failed: + xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP); xfrm_proto_comp_failed: xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); xfrm_proto_ah_failed: @@ -676,6 +676,7 @@ static int __init vti_init(void) static void __exit vti_fini(void) { rtnl_link_unregister(&vti_link_ops); + xfrm4_tunnel_deregister(&ipip_handler, AF_INET); xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP); xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP); diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 70289682a67014..eab5c02da8ae84 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -290,6 +290,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPAckCompressed", LINUX_MIB_TCPACKCOMPRESSED), SNMP_MIB_ITEM("TCPZeroWindowDrop", LINUX_MIB_TCPZEROWINDOWDROP), SNMP_MIB_ITEM("TCPRcvQDrop", LINUX_MIB_TCPRCVQDROP), + SNMP_MIB_ITEM("TCPWqueueTooBig", LINUX_MIB_TCPWQUEUETOOBIG), SNMP_MIB_SENTINEL }; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 8bacbcd2db90b9..232581c140a05e 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -500,15 +500,17 @@ EXPORT_SYMBOL(ip_idents_reserve); void __ip_select_ident(struct net *net, struct iphdr *iph, int segs) { - static u32 ip_idents_hashrnd __read_mostly; u32 hash, id; - net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd)); + /* Note the following code is not safe, but this is okay. */ + if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key))) + get_random_bytes(&net->ipv4.ip_id_key, + sizeof(net->ipv4.ip_id_key)); - hash = jhash_3words((__force u32)iph->daddr, + hash = siphash_3u32((__force u32)iph->daddr, (__force u32)iph->saddr, - iph->protocol ^ net_hash_mix(net), - ip_idents_hashrnd); + iph->protocol, + &net->ipv4.ip_id_key); id = ip_idents_reserve(hash, segs); iph->id = htons(id); } @@ -1958,7 +1960,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, u32 itag = 0; struct rtable *rth; struct flowi4 fl4; - bool do_cache; + bool do_cache = true; /* IP on this device is disabled. */ @@ -2035,6 +2037,9 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, if (res->type == RTN_BROADCAST) { if (IN_DEV_BFORWARD(in_dev)) goto make_route; + /* not do cache if bc_forwarding is enabled */ + if (IPV4_DEVCONF_ALL(net, BC_FORWARDING)) + do_cache = false; goto brd_input; } @@ -2072,16 +2077,13 @@ out: return err; RT_CACHE_STAT_INC(in_brd); local_input: - do_cache = false; - if (res->fi) { - if (!itag) { - rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); - if (rt_cache_valid(rth)) { - skb_dst_set_noref(skb, &rth->dst); - err = 0; - goto out; - } - do_cache = true; + do_cache &= res->fi && !itag; + if (do_cache) { + rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); + if (rt_cache_valid(rth)) { + skb_dst_set_noref(skb, &rth->dst); + err = 0; + goto out; } } diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index ce64453d337d34..ad132b6e8cfade 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -39,6 +39,8 @@ static int ip_local_port_range_min[] = { 1, 1 }; static int ip_local_port_range_max[] = { 65535, 65535 }; static int tcp_adv_win_scale_min = -31; static int tcp_adv_win_scale_max = 31; +static int tcp_min_snd_mss_min = TCP_MIN_SND_MSS; +static int tcp_min_snd_mss_max = 65535; static int ip_privileged_port_min; static int ip_privileged_port_max = 65535; static int ip_ttl_min = 1; @@ -737,6 +739,15 @@ static struct ctl_table ipv4_net_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "tcp_min_snd_mss", + .data = &init_net.ipv4.sysctl_tcp_min_snd_mss, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &tcp_min_snd_mss_min, + .extra2 = &tcp_min_snd_mss_max, + }, { .procname = "tcp_probe_threshold", .data = &init_net.ipv4.sysctl_tcp_probe_threshold, diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 30c6e94b06c49d..364e6fdaa38f50 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3829,6 +3829,7 @@ void __init tcp_init(void) unsigned long limit; unsigned int i; + BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE); BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb)); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index cfdd70e32755e6..4a8869d3966221 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1315,7 +1315,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev, TCP_SKB_CB(skb)->seq += shifted; tcp_skb_pcount_add(prev, pcount); - BUG_ON(tcp_skb_pcount(skb) < pcount); + WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount); tcp_skb_pcount_add(skb, -pcount); /* When we're adding to gso_segs == 1, gso_size will be zero, @@ -1381,6 +1381,21 @@ static int skb_can_shift(const struct sk_buff *skb) return !skb_headlen(skb) && skb_is_nonlinear(skb); } +int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, + int pcount, int shiftlen) +{ + /* TCP min gso_size is 8 bytes (TCP_MIN_GSO_SIZE) + * Since TCP_SKB_CB(skb)->tcp_gso_segs is 16 bits, we need + * to make sure not storing more than 65535 * 8 bytes per skb, + * even if current MSS is bigger. + */ + if (unlikely(to->len + shiftlen >= 65535 * TCP_MIN_GSO_SIZE)) + return 0; + if (unlikely(tcp_skb_pcount(to) + pcount > 65535)) + return 0; + return skb_shift(to, from, shiftlen); +} + /* Try collapsing SACK blocks spanning across multiple skbs to a single * skb. */ @@ -1486,7 +1501,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) goto fallback; - if (!skb_shift(prev, skb, len)) + if (!tcp_skb_shift(prev, skb, pcount, len)) goto fallback; if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, dup_sack)) goto out; @@ -1504,11 +1519,10 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, goto out; len = skb->len; - if (skb_shift(prev, skb, len)) { - pcount += tcp_skb_pcount(skb); - tcp_shifted_skb(sk, prev, skb, state, tcp_skb_pcount(skb), + pcount = tcp_skb_pcount(skb); + if (tcp_skb_shift(prev, skb, pcount, len)) + tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, 0); - } out: return prev; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 11101cf8693b1d..b76cf96d5cfed9 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2527,6 +2527,7 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_tcp_ecn_fallback = 1; net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS; + net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS; net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD; net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index bd134e3a0473e7..221d9b72423b96 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1299,6 +1299,12 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, if (nsize < 0) nsize = 0; + if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf && + tcp_queue != TCP_FRAG_IN_WRITE_QUEUE)) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG); + return -ENOMEM; + } + if (skb_unclone(skb, gfp)) return -ENOMEM; @@ -1457,8 +1463,7 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) mss_now -= icsk->icsk_ext_hdr_len; /* Then reserve room for full set of TCP options and 8 bytes of data */ - if (mss_now < 48) - mss_now = 48; + mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss); return mss_now; } @@ -2727,7 +2732,7 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) if (next_skb_size <= skb_availroom(skb)) skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size), next_skb_size); - else if (!skb_shift(skb, next_skb, next_skb_size)) + else if (!tcp_skb_shift(skb, next_skb, 1, next_skb_size)) return false; } tcp_highest_sack_replace(sk, next_skb, skb); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index b1b5a648def6cd..17335a370e6452 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -166,6 +166,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; mss = min(net->ipv4.sysctl_tcp_base_mss, mss); mss = max(mss, 68 - tcp_sk(sk)->tcp_header_len); + mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss); icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); } tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index d73a6d6652f60f..2b144b92ae46a4 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -111,7 +111,8 @@ static void _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) { const struct iphdr *iph = ip_hdr(skb); - u8 *xprth = skb_network_header(skb) + iph->ihl * 4; + int ihl = iph->ihl; + u8 *xprth = skb_network_header(skb) + ihl * 4; struct flowi4 *fl4 = &fl->u.ip4; int oif = 0; @@ -122,6 +123,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) fl4->flowi4_mark = skb->mark; fl4->flowi4_oif = reverse ? skb->skb_iif : oif; + fl4->flowi4_proto = iph->protocol; + fl4->daddr = reverse ? iph->saddr : iph->daddr; + fl4->saddr = reverse ? iph->daddr : iph->saddr; + fl4->flowi4_tos = iph->tos; + if (!ip_is_fragment(iph)) { switch (iph->protocol) { case IPPROTO_UDP: @@ -133,7 +139,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) pskb_may_pull(skb, xprth + 4 - skb->data)) { __be16 *ports; - xprth = skb_network_header(skb) + iph->ihl * 4; + xprth = skb_network_header(skb) + ihl * 4; ports = (__be16 *)xprth; fl4->fl4_sport = ports[!!reverse]; @@ -146,7 +152,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) pskb_may_pull(skb, xprth + 2 - skb->data)) { u8 *icmp; - xprth = skb_network_header(skb) + iph->ihl * 4; + xprth = skb_network_header(skb) + ihl * 4; icmp = xprth; fl4->fl4_icmp_type = icmp[0]; @@ -159,7 +165,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) pskb_may_pull(skb, xprth + 4 - skb->data)) { __be32 *ehdr; - xprth = skb_network_header(skb) + iph->ihl * 4; + xprth = skb_network_header(skb) + ihl * 4; ehdr = (__be32 *)xprth; fl4->fl4_ipsec_spi = ehdr[0]; @@ -171,7 +177,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) pskb_may_pull(skb, xprth + 8 - skb->data)) { __be32 *ah_hdr; - xprth = skb_network_header(skb) + iph->ihl * 4; + xprth = skb_network_header(skb) + ihl * 4; ah_hdr = (__be32 *)xprth; fl4->fl4_ipsec_spi = ah_hdr[1]; @@ -183,7 +189,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) pskb_may_pull(skb, xprth + 4 - skb->data)) { __be16 *ipcomp_hdr; - xprth = skb_network_header(skb) + iph->ihl * 4; + xprth = skb_network_header(skb) + ihl * 4; ipcomp_hdr = (__be16 *)xprth; fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1])); @@ -196,7 +202,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) __be16 *greflags; __be32 *gre_hdr; - xprth = skb_network_header(skb) + iph->ihl * 4; + xprth = skb_network_header(skb) + ihl * 4; greflags = (__be16 *)xprth; gre_hdr = (__be32 *)xprth; @@ -213,10 +219,6 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) break; } } - fl4->flowi4_proto = iph->protocol; - fl4->daddr = reverse ? iph->saddr : iph->daddr; - fl4->saddr = reverse ? iph->daddr : iph->saddr; - fl4->flowi4_tos = iph->tos; } static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk, diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 3e2a9bd3459c77..a6c0479c1d55f8 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -877,6 +877,12 @@ static void fib6_drop_pcpu_from(struct fib6_info *f6i, { int cpu; + /* Make sure rt6_make_pcpu_route() wont add other percpu routes + * while we are cleaning them here. + */ + f6i->fib6_destroying = 1; + mb(); /* paired with the cmpxchg() in rt6_make_pcpu_route() */ + /* release the reference to this fib entry from * all of its cached pcpu routes */ @@ -900,6 +906,9 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn, { struct fib6_table *table = rt->fib6_table; + if (rt->rt6i_pcpu) + fib6_drop_pcpu_from(rt, table); + if (atomic_read(&rt->fib6_ref) != 1) { /* This route is used as dummy address holder in some split * nodes. It is not leaked, but it still holds other resources, @@ -921,9 +930,6 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn, fn = rcu_dereference_protected(fn->parent, lockdep_is_held(&table->tb6_lock)); } - - if (rt->rt6i_pcpu) - fib6_drop_pcpu_from(rt, table); } } diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index be5f3d7ceb966d..f994f50e151622 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -254,9 +254,9 @@ struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label) rcu_read_lock_bh(); for_each_sk_fl_rcu(np, sfl) { struct ip6_flowlabel *fl = sfl->fl; - if (fl->label == label) { + + if (fl->label == label && atomic_inc_not_zero(&fl->users)) { fl->lastuse = jiffies; - atomic_inc(&fl->users); rcu_read_unlock_bh(); return fl; } @@ -622,7 +622,8 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) goto done; } fl1 = sfl->fl; - atomic_inc(&fl1->users); + if (!atomic_inc_not_zero(&fl1->users)) + fl1 = NULL; break; } } diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index 4fe7c90962ddae..868ae23dbae195 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -10,15 +10,25 @@ #include #include -static u32 __ipv6_select_ident(struct net *net, u32 hashrnd, +static u32 __ipv6_select_ident(struct net *net, const struct in6_addr *dst, const struct in6_addr *src) { + const struct { + struct in6_addr dst; + struct in6_addr src; + } __aligned(SIPHASH_ALIGNMENT) combined = { + .dst = *dst, + .src = *src, + }; u32 hash, id; - hash = __ipv6_addr_jhash(dst, hashrnd); - hash = __ipv6_addr_jhash(src, hash); - hash ^= net_hash_mix(net); + /* Note the following code is not safe, but this is okay. */ + if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key))) + get_random_bytes(&net->ipv4.ip_id_key, + sizeof(net->ipv4.ip_id_key)); + + hash = siphash(&combined, sizeof(combined), &net->ipv4.ip_id_key); /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve, * set the hight order instead thus minimizing possible future @@ -41,7 +51,6 @@ static u32 __ipv6_select_ident(struct net *net, u32 hashrnd, */ __be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb) { - static u32 ip6_proxy_idents_hashrnd __read_mostly; struct in6_addr buf[2]; struct in6_addr *addrs; u32 id; @@ -53,11 +62,7 @@ __be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb) if (!addrs) return 0; - net_get_random_once(&ip6_proxy_idents_hashrnd, - sizeof(ip6_proxy_idents_hashrnd)); - - id = __ipv6_select_ident(net, ip6_proxy_idents_hashrnd, - &addrs[1], &addrs[0]); + id = __ipv6_select_ident(net, &addrs[1], &addrs[0]); return htonl(id); } EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident); @@ -66,12 +71,9 @@ __be32 ipv6_select_ident(struct net *net, const struct in6_addr *daddr, const struct in6_addr *saddr) { - static u32 ip6_idents_hashrnd __read_mostly; u32 id; - net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd)); - - id = __ipv6_select_ident(net, ip6_idents_hashrnd, daddr, saddr); + id = __ipv6_select_ident(net, daddr, saddr); return htonl(id); } EXPORT_SYMBOL(ipv6_select_ident); diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 5e0efd3954e90a..4856d9320b28e2 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -288,7 +288,9 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) /* Binding to link-local address requires an interface */ if (!sk->sk_bound_dev_if) goto out_unlock; + } + if (sk->sk_bound_dev_if) { err = -ENODEV; dev = dev_get_by_index_rcu(sock_net(sk), sk->sk_bound_dev_if); @@ -780,6 +782,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct flowi6 fl6; struct ipcm6_cookie ipc6; int addr_len = msg->msg_namelen; + int hdrincl; u16 proto; int err; @@ -793,6 +796,13 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; + /* hdrincl should be READ_ONCE(inet->hdrincl) + * but READ_ONCE() doesn't work with bit fields. + * Doing this indirectly yields the same result. + */ + hdrincl = inet->hdrincl; + hdrincl = READ_ONCE(hdrincl); + /* * Get and verify the address. */ @@ -884,11 +894,14 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) opt = ipv6_fixup_options(&opt_space, opt); fl6.flowi6_proto = proto; - rfv.msg = msg; - rfv.hlen = 0; - err = rawv6_probe_proto_opt(&rfv, &fl6); - if (err) - goto out; + + if (!hdrincl) { + rfv.msg = msg; + rfv.hlen = 0; + err = rawv6_probe_proto_opt(&rfv, &fl6); + if (err) + goto out; + } if (!ipv6_addr_any(daddr)) fl6.daddr = *daddr; @@ -905,7 +918,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.flowi6_oif = np->ucast_oif; security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); - if (inet->hdrincl) + if (hdrincl) fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH; if (ipc6.tclass < 0) @@ -928,7 +941,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) goto do_confirm; back_from_confirm: - if (inet->hdrincl) + if (hdrincl) err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, msg->msg_flags, &ipc6.sockc); else { diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 2e519f7b983c03..24f7b2cf504b2d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -110,8 +110,8 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, int iif, int type, u32 portid, u32 seq, unsigned int flags); static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt, - struct in6_addr *daddr, - struct in6_addr *saddr); + const struct in6_addr *daddr, + const struct in6_addr *saddr); #ifdef CONFIG_IPV6_ROUTE_INFO static struct fib6_info *rt6_add_route_info(struct net *net, @@ -1268,6 +1268,13 @@ static struct rt6_info *rt6_make_pcpu_route(struct net *net, prev = cmpxchg(p, NULL, pcpu_rt); BUG_ON(prev); + if (rt->fib6_destroying) { + struct fib6_info *from; + + from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL); + fib6_info_release(from); + } + return pcpu_rt; } @@ -1542,31 +1549,44 @@ void rt6_flush_exceptions(struct fib6_info *rt) * Caller has to hold rcu_read_lock() */ static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt, - struct in6_addr *daddr, - struct in6_addr *saddr) + const struct in6_addr *daddr, + const struct in6_addr *saddr) { + const struct in6_addr *src_key = NULL; struct rt6_exception_bucket *bucket; - struct in6_addr *src_key = NULL; struct rt6_exception *rt6_ex; struct rt6_info *res = NULL; - bucket = rcu_dereference(rt->rt6i_exception_bucket); - #ifdef CONFIG_IPV6_SUBTREES /* rt6i_src.plen != 0 indicates rt is in subtree * and exception table is indexed by a hash of * both rt6i_dst and rt6i_src. - * Otherwise, the exception table is indexed by - * a hash of only rt6i_dst. + * However, the src addr used to create the hash + * might not be exactly the passed in saddr which + * is a /128 addr from the flow. + * So we need to use f6i->fib6_src to redo lookup + * if the passed in saddr does not find anything. + * (See the logic in ip6_rt_cache_alloc() on how + * rt->rt6i_src is updated.) */ if (rt->fib6_src.plen) src_key = saddr; +find_ex: #endif + bucket = rcu_dereference(rt->rt6i_exception_bucket); rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key); if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i)) res = rt6_ex->rt6i; +#ifdef CONFIG_IPV6_SUBTREES + /* Use fib6_src as src_key and redo lookup */ + if (!res && src_key && src_key != &rt->fib6_src.addr) { + src_key = &rt->fib6_src.addr; + goto find_ex; + } +#endif + return res; } @@ -2460,6 +2480,12 @@ static struct rt6_info *__ip6_route_redirect(struct net *net, struct fib6_info *rt; struct fib6_node *fn; + /* l3mdev_update_flow overrides oif if the device is enslaved; in + * this case we must match on the real ingress device, so reset it + */ + if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) + fl6->flowi6_oif = skb->dev->ifindex; + /* Get the "current" route for this destination and * check if the redirect has come from appropriate router. * @@ -2650,10 +2676,8 @@ static unsigned int ip6_mtu(const struct dst_entry *dst) u32 ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr, struct in6_addr *saddr) { - struct rt6_exception_bucket *bucket; - struct rt6_exception *rt6_ex; - struct in6_addr *src_key; struct inet6_dev *idev; + struct rt6_info *rt; u32 mtu = 0; if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) { @@ -2662,18 +2686,10 @@ u32 ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr, goto out; } - src_key = NULL; -#ifdef CONFIG_IPV6_SUBTREES - if (f6i->fib6_src.plen) - src_key = saddr; -#endif - - bucket = rcu_dereference(f6i->rt6i_exception_bucket); - rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key); - if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i)) - mtu = dst_metric_raw(&rt6_ex->rt6i->dst, RTAX_MTU); - - if (likely(!mtu)) { + rt = rt6_find_cached_rt(f6i, daddr, saddr); + if (unlikely(rt)) { + mtu = dst_metric_raw(&rt->dst, RTAX_MTU); + } else { struct net_device *dev = fib6_info_nh_dev(f6i); mtu = IPV6_MIN_MTU; diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index bc65db782bfb1f..d9e5f6808811ae 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c @@ -345,7 +345,7 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net) unsigned int i; xfrm_flush_gc(); - xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true); + xfrm_state_flush(net, 0, false, true); for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i])); @@ -402,6 +402,10 @@ static void __exit xfrm6_tunnel_fini(void) xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6); xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); unregister_pernet_subsys(&xfrm6_tunnel_net_ops); + /* Someone maybe has gotten the xfrm6_tunnel_spi. + * So need to wait it. + */ + rcu_barrier(); kmem_cache_destroy(xfrm6_tunnel_spi_kmem); } diff --git a/net/key/af_key.c b/net/key/af_key.c index 7d4bed9550605e..0b79c9aa8eb1f2 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1951,8 +1951,10 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) if (rq->sadb_x_ipsecrequest_mode == 0) return -EINVAL; + if (!xfrm_id_proto_valid(rq->sadb_x_ipsecrequest_proto)) + return -EINVAL; - t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */ + t->id.proto = rq->sadb_x_ipsecrequest_proto; if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0) return -EINVAL; t->mode = mode; diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c index db6e0afe3a20b6..1740f852002ec3 100644 --- a/net/lapb/lapb_iface.c +++ b/net/lapb/lapb_iface.c @@ -182,6 +182,7 @@ int lapb_unregister(struct net_device *dev) lapb = __lapb_devtostruct(dev); if (!lapb) goto out; + lapb_put(lapb); lapb_stop_t1timer(lapb); lapb_stop_t2timer(lapb); diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c index 94425e421213a3..9e4b6bcf6920d2 100644 --- a/net/llc/llc_output.c +++ b/net/llc/llc_output.c @@ -72,6 +72,8 @@ int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb, rc = llc_mac_hdr_init(skb, skb->dev->dev_addr, dmac); if (likely(!rc)) rc = dev_queue_xmit(skb); + else + kfree_skb(skb); return rc; } diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 172aeae21ae9d0..35c6dfa13fa85b 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -2183,6 +2183,9 @@ void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy, const u8 *addr); void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata); void ieee80211_tdls_chsw_work(struct work_struct *wk); +void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata, + const u8 *peer, u16 reason); +const char *ieee80211_get_reason_code_string(u16 reason_code); extern const struct ethtool_ops ieee80211_ethtool_ops; diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 3a0171a65db320..152d4365f9616b 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1910,6 +1910,9 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata) list_del_rcu(&sdata->list); mutex_unlock(&sdata->local->iflist_mtx); + if (sdata->vif.txq) + ieee80211_txq_purge(sdata->local, to_txq_info(sdata->vif.txq)); + synchronize_rcu(); if (sdata->dev) { diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 3dbecae4be73cb..1aaa73fa308e65 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1156,9 +1156,6 @@ static void ieee80211_chswitch_work(struct work_struct *work) goto out; } - /* XXX: shouldn't really modify cfg80211-owned data! */ - ifmgd->associated->channel = sdata->csa_chandef.chan; - ifmgd->csa_waiting_bcn = true; ieee80211_sta_reset_beacon_monitor(sdata); @@ -2871,7 +2868,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, #define case_WLAN(type) \ case WLAN_REASON_##type: return #type -static const char *ieee80211_get_reason_code_string(u16 reason_code) +const char *ieee80211_get_reason_code_string(u16 reason_code) { switch (reason_code) { case_WLAN(UNSPECIFIED); @@ -2936,6 +2933,11 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, if (len < 24 + 2) return; + if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) { + ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code); + return; + } + if (ifmgd->associated && ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) { const u8 *bssid = ifmgd->associated->bssid; @@ -2985,6 +2987,11 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); + if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) { + ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code); + return; + } + sdata_info(sdata, "disassociated from %pM (Reason: %u=%s)\n", mgmt->sa, reason_code, ieee80211_get_reason_code_string(reason_code)); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index e946ee4f335bd7..7523d995ea8abe 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3752,6 +3752,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) case NL80211_IFTYPE_STATION: if (!bssid && !sdata->u.mgd.use_4addr) return false; + if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta) + return false; if (multicast) return true; return ether_addr_equal(sdata->vif.addr, hdr->addr1); diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index 6c647f425e057d..67745d1d4c5d10 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -1992,3 +1992,26 @@ void ieee80211_tdls_chsw_work(struct work_struct *wk) } rtnl_unlock(); } + +void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata, + const u8 *peer, u16 reason) +{ + struct ieee80211_sta *sta; + + rcu_read_lock(); + sta = ieee80211_find_sta(&sdata->vif, peer); + if (!sta || !sta->tdls) { + rcu_read_unlock(); + return; + } + rcu_read_unlock(); + + tdls_dbg(sdata, "disconnected from TDLS peer %pM (Reason: %u=%s)\n", + peer, reason, + ieee80211_get_reason_code_string(reason)); + + ieee80211_tdls_oper_request(&sdata->vif, peer, + NL80211_TDLS_TEARDOWN, + WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE, + GFP_ATOMIC); +} diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 3deaa01ebee4bb..2558a34c9df16f 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -3523,7 +3523,9 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, } /* Always allow software iftypes */ - if (local->hw.wiphy->software_iftypes & BIT(iftype)) { + if (local->hw.wiphy->software_iftypes & BIT(iftype) || + (iftype == NL80211_IFTYPE_AP_VLAN && + local->hw.wiphy->flags & WIPHY_FLAG_4ADDR_AP)) { if (radar_detect) return -EINVAL; return 0; diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 58d0b258b684cd..5dd48f0a4b1bd1 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -1175,7 +1175,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_key *key = rx->key; struct ieee80211_mmie_16 *mmie; - u8 aad[GMAC_AAD_LEN], mic[GMAC_MIC_LEN], ipn[6], nonce[GMAC_NONCE_LEN]; + u8 aad[GMAC_AAD_LEN], *mic, ipn[6], nonce[GMAC_NONCE_LEN]; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; if (!ieee80211_is_mgmt(hdr->frame_control)) @@ -1206,13 +1206,18 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx) memcpy(nonce, hdr->addr2, ETH_ALEN); memcpy(nonce + ETH_ALEN, ipn, 6); + mic = kmalloc(GMAC_MIC_LEN, GFP_ATOMIC); + if (!mic) + return RX_DROP_UNUSABLE; if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce, skb->data + 24, skb->len - 24, mic) < 0 || crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { key->u.aes_gmac.icverrors++; + kfree(mic); return RX_DROP_UNUSABLE; } + kfree(mic); } memcpy(key->u.aes_gmac.rx_pn, ipn, 6); diff --git a/net/netfilter/core.c b/net/netfilter/core.c index dc240cb47ddfac..93aaec3a54ecdf 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(nf_ipv6_ops); DEFINE_PER_CPU(bool, nf_skb_duplicated); EXPORT_SYMBOL_GPL(nf_skb_duplicated); -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; EXPORT_SYMBOL(nf_hooks_needed); #endif @@ -347,7 +347,7 @@ static int __nf_register_net_hook(struct net *net, int pf, if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) net_inc_ingress_queue(); #endif -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL static_key_slow_inc(&nf_hooks_needed[pf][reg->hooknum]); #endif BUG_ON(p == new_hooks); @@ -405,7 +405,7 @@ static void __nf_unregister_net_hook(struct net *net, int pf, if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) net_dec_ingress_queue(); #endif -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL static_key_slow_dec(&nf_hooks_needed[pf][reg->hooknum]); #endif } else { diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index a42c1bc7c69820..62c0e80dcd7198 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -2280,7 +2280,6 @@ static void __net_exit __ip_vs_cleanup(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); - nf_unregister_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); ip_vs_service_net_cleanup(ipvs); /* ip_vs_flush() with locks */ ip_vs_conn_net_cleanup(ipvs); ip_vs_app_net_cleanup(ipvs); @@ -2295,6 +2294,7 @@ static void __net_exit __ip_vs_dev_cleanup(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); EnterFunction(2); + nf_unregister_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); ipvs->enable = 0; /* Disable packet reception */ smp_wmb(); ip_vs_sync_net_cleanup(ipvs); diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c index 1601275efe2d10..4c2ef42e189cbd 100644 --- a/net/netfilter/nf_conntrack_h323_asn1.c +++ b/net/netfilter/nf_conntrack_h323_asn1.c @@ -172,7 +172,7 @@ static int nf_h323_error_boundary(struct bitstr *bs, size_t bytes, size_t bits) if (bits % BITS_PER_BYTE > 0) bytes++; - if (*bs->cur + bytes > *bs->end) + if (bs->cur + bytes > bs->end) return 1; return 0; diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index e1537ace2b90c6..5df7486bb4164e 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -185,14 +185,25 @@ static const struct rhashtable_params nf_flow_offload_rhash_params = { int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow) { - flow->timeout = (u32)jiffies; + int err; - rhashtable_insert_fast(&flow_table->rhashtable, - &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node, - nf_flow_offload_rhash_params); - rhashtable_insert_fast(&flow_table->rhashtable, - &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node, - nf_flow_offload_rhash_params); + err = rhashtable_insert_fast(&flow_table->rhashtable, + &flow->tuplehash[0].node, + nf_flow_offload_rhash_params); + if (err < 0) + return err; + + err = rhashtable_insert_fast(&flow_table->rhashtable, + &flow->tuplehash[1].node, + nf_flow_offload_rhash_params); + if (err < 0) { + rhashtable_remove_fast(&flow_table->rhashtable, + &flow->tuplehash[0].node, + nf_flow_offload_rhash_params); + return err; + } + + flow->timeout = (u32)jiffies; return 0; } EXPORT_SYMBOL_GPL(flow_offload_add); diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index 15ed91309992e8..129e9ec99ec976 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -181,6 +181,9 @@ static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev, iph->protocol != IPPROTO_UDP) return -1; + if (iph->ttl <= 1) + return -1; + thoff = iph->ihl * 4; if (!pskb_may_pull(skb, thoff + sizeof(*ports))) return -1; @@ -412,6 +415,9 @@ static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev, ip6h->nexthdr != IPPROTO_UDP) return -1; + if (ip6h->hop_limit <= 1) + return -1; + thoff = sizeof(*ip6h); if (!pskb_may_pull(skb, thoff + sizeof(*ports))) return -1; diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index d67a96a25a681e..7569ba00e73284 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -238,6 +238,7 @@ static unsigned int nf_iterate(struct sk_buff *skb, repeat: verdict = nf_hook_entry_hookfn(hook, skb, state); if (verdict != NF_ACCEPT) { + *index = i; if (verdict != NF_REPEAT) return verdict; goto repeat; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index ebfcfe1dcbdbbb..29ff59dd99acec 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1142,6 +1142,9 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats) u64 pkts, bytes; int cpu; + if (!stats) + return 0; + memset(&total, 0, sizeof(total)); for_each_possible_cpu(cpu) { cpu_stats = per_cpu_ptr(stats, cpu); @@ -1199,6 +1202,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, if (nft_is_base_chain(chain)) { const struct nft_base_chain *basechain = nft_base_chain(chain); const struct nf_hook_ops *ops = &basechain->ops; + struct nft_stats __percpu *stats; struct nlattr *nest; nest = nla_nest_start(skb, NFTA_CHAIN_HOOK); @@ -1220,8 +1224,9 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name)) goto nla_put_failure; - if (rcu_access_pointer(basechain->stats) && - nft_dump_stats(skb, rcu_dereference(basechain->stats))) + stats = rcu_dereference_check(basechain->stats, + lockdep_commit_lock_is_held(net)); + if (nft_dump_stats(skb, stats)) goto nla_put_failure; } diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c index 436cc14cfc59b1..7f85af4c40ff7b 100644 --- a/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c @@ -113,6 +113,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, if (ret < 0) goto err_flow_add; + dst_release(route.tuple[!dir].dst); return; err_flow_add: diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 376181cc1defc6..9f2875efb4ac96 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -922,7 +922,8 @@ static int nfc_genl_deactivate_target(struct sk_buff *skb, u32 device_idx, target_idx; int rc; - if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) + if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || + !info->attrs[NFC_ATTR_TARGET_INDEX]) return -EINVAL; device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index bb95c43aae7627..5a304cfc842336 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c @@ -169,7 +169,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms) { struct vport *vport; struct internal_dev *internal_dev; + struct net_device *dev; int err; + bool free_vport = true; vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms); if (IS_ERR(vport)) { @@ -177,8 +179,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms) goto error; } - vport->dev = alloc_netdev(sizeof(struct internal_dev), - parms->name, NET_NAME_USER, do_setup); + dev = alloc_netdev(sizeof(struct internal_dev), + parms->name, NET_NAME_USER, do_setup); + vport->dev = dev; if (!vport->dev) { err = -ENOMEM; goto error_free_vport; @@ -199,8 +202,10 @@ static struct vport *internal_dev_create(const struct vport_parms *parms) rtnl_lock(); err = register_netdevice(vport->dev); - if (err) + if (err) { + free_vport = false; goto error_unlock; + } dev_set_promiscuity(vport->dev, 1); rtnl_unlock(); @@ -210,11 +215,12 @@ static struct vport *internal_dev_create(const struct vport_parms *parms) error_unlock: rtnl_unlock(); - free_percpu(vport->dev->tstats); + free_percpu(dev->tstats); error_free_netdev: - free_netdev(vport->dev); + free_netdev(dev); error_free_vport: - ovs_vport_free(vport); + if (free_vport) + ovs_vport_free(vport); error: return ERR_PTR(err); } diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 18df3bce73daf3..d98fcf926166f8 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3017,8 +3017,8 @@ static int packet_release(struct socket *sock) synchronize_net(); + kfree(po->rollover); if (f) { - kfree(po->rollover); fanout_release_data(f); kfree(f); } diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index d664e9ade74dea..0b347f46b2f415 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c @@ -428,12 +428,14 @@ int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, wait_clean_list_grace(); list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail); - if (ibmr_ret) + if (ibmr_ret) { *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode); - + clean_nodes = clean_nodes->next; + } /* more than one entry in llist nodes */ - if (clean_nodes->next) - llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list); + if (clean_nodes) + llist_add_batch(clean_nodes, clean_tail, + &pool->clean_list); } diff --git a/net/sched/act_api.c b/net/sched/act_api.c index e12f8ef7baa438..7c4a4b87424811 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -744,7 +744,7 @@ int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { a = actions[i]; - nest = nla_nest_start(skb, a->order); + nest = nla_nest_start(skb, i + 1); if (nest == NULL) goto nla_put_failure; err = tcf_action_dump_1(skb, a, bind, ref); @@ -1257,7 +1257,6 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, ret = PTR_ERR(act); goto err; } - act->order = i; attr_size += tcf_action_fill_size(act); actions[i - 1] = act; } diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index d05c57664e36e0..fb546b2d67ca85 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2329,7 +2329,6 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, union sctp_addr addr; struct sctp_af *af; int src_match = 0; - char *cookie; /* We must include the address that the INIT packet came from. * This is the only address that matters for an INIT packet. @@ -2433,14 +2432,6 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, /* Peer Rwnd : Current calculated value of the peer's rwnd. */ asoc->peer.rwnd = asoc->peer.i.a_rwnd; - /* Copy cookie in case we need to resend COOKIE-ECHO. */ - cookie = asoc->peer.cookie; - if (cookie) { - asoc->peer.cookie = kmemdup(cookie, asoc->peer.cookie_len, gfp); - if (!asoc->peer.cookie) - goto clean_up; - } - /* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily * high (for example, implementations MAY use the size of the receiver * advertised window). @@ -2609,7 +2600,11 @@ static int sctp_process_param(struct sctp_association *asoc, case SCTP_PARAM_STATE_COOKIE: asoc->peer.cookie_len = ntohs(param.p->length) - sizeof(struct sctp_paramhdr); - asoc->peer.cookie = param.cookie->body; + if (asoc->peer.cookie) + kfree(asoc->peer.cookie); + asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp); + if (!asoc->peer.cookie) + retval = 0; break; case SCTP_PARAM_HEARTBEAT_INFO: @@ -2671,6 +2666,8 @@ static int sctp_process_param(struct sctp_association *asoc, goto fall_through; /* Save peer's random parameter */ + if (asoc->peer.peer_random) + kfree(asoc->peer.peer_random); asoc->peer.peer_random = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_random) { @@ -2684,6 +2681,8 @@ static int sctp_process_param(struct sctp_association *asoc, goto fall_through; /* Save peer's HMAC list */ + if (asoc->peer.peer_hmacs) + kfree(asoc->peer.peer_hmacs); asoc->peer.peer_hmacs = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_hmacs) { @@ -2699,6 +2698,8 @@ static int sctp_process_param(struct sctp_association *asoc, if (!ep->auth_enable) goto fall_through; + if (asoc->peer.peer_chunks) + kfree(asoc->peer.peer_chunks); asoc->peer.peer_chunks = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_chunks) diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 48fe8f01265f76..3131b4154c74d0 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -898,6 +898,11 @@ static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds, asoc->rto_initial; } + if (sctp_state(asoc, ESTABLISHED)) { + kfree(asoc->peer.cookie); + asoc->peer.cookie = NULL; + } + if (sctp_state(asoc, ESTABLISHED) || sctp_state(asoc, CLOSED) || sctp_state(asoc, SHUTDOWN_RECEIVED)) { diff --git a/net/tipc/core.c b/net/tipc/core.c index 5b38f516428147..3ecca3b88bf81a 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c @@ -75,9 +75,6 @@ static int __net_init tipc_init_net(struct net *net) goto out_nametbl; INIT_LIST_HEAD(&tn->dist_queue); - err = tipc_topsrv_start(net); - if (err) - goto out_subscr; err = tipc_bcast_init(net); if (err) @@ -86,8 +83,6 @@ static int __net_init tipc_init_net(struct net *net) return 0; out_bclink: - tipc_bcast_stop(net); -out_subscr: tipc_nametbl_stop(net); out_nametbl: tipc_sk_rht_destroy(net); @@ -97,7 +92,6 @@ static int __net_init tipc_init_net(struct net *net) static void __net_exit tipc_exit_net(struct net *net) { - tipc_topsrv_stop(net); tipc_net_stop(net); tipc_bcast_stop(net); tipc_nametbl_stop(net); @@ -111,6 +105,11 @@ static struct pernet_operations tipc_net_ops = { .size = sizeof(struct tipc_net), }; +static struct pernet_operations tipc_topsrv_net_ops = { + .init = tipc_topsrv_init_net, + .exit = tipc_topsrv_exit_net, +}; + static int __init tipc_init(void) { int err; @@ -129,10 +128,6 @@ static int __init tipc_init(void) if (err) goto out_netlink_compat; - err = tipc_socket_init(); - if (err) - goto out_socket; - err = tipc_register_sysctl(); if (err) goto out_sysctl; @@ -141,6 +136,14 @@ static int __init tipc_init(void) if (err) goto out_pernet; + err = tipc_socket_init(); + if (err) + goto out_socket; + + err = register_pernet_subsys(&tipc_topsrv_net_ops); + if (err) + goto out_pernet_topsrv; + err = tipc_bearer_setup(); if (err) goto out_bearer; @@ -148,12 +151,14 @@ static int __init tipc_init(void) pr_info("Started in single node mode\n"); return 0; out_bearer: + unregister_pernet_subsys(&tipc_topsrv_net_ops); +out_pernet_topsrv: + tipc_socket_stop(); +out_socket: unregister_pernet_subsys(&tipc_net_ops); out_pernet: tipc_unregister_sysctl(); out_sysctl: - tipc_socket_stop(); -out_socket: tipc_netlink_compat_stop(); out_netlink_compat: tipc_netlink_stop(); @@ -165,10 +170,11 @@ static int __init tipc_init(void) static void __exit tipc_exit(void) { tipc_bearer_cleanup(); + unregister_pernet_subsys(&tipc_topsrv_net_ops); + tipc_socket_stop(); unregister_pernet_subsys(&tipc_net_ops); tipc_netlink_stop(); tipc_netlink_compat_stop(); - tipc_socket_stop(); tipc_unregister_sysctl(); pr_info("Deactivated\n"); diff --git a/net/tipc/group.c b/net/tipc/group.c index 06fee142f09fbe..3ee93b5c19b633 100644 --- a/net/tipc/group.c +++ b/net/tipc/group.c @@ -218,6 +218,7 @@ void tipc_group_delete(struct net *net, struct tipc_group *grp) rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) { tipc_group_proto_xmit(grp, m, GRP_LEAVE_MSG, &xmitq); + __skb_queue_purge(&m->deferredq); list_del(&m->list); kfree(m); } diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h index d793b434388512..aa015c233898c5 100644 --- a/net/tipc/subscr.h +++ b/net/tipc/subscr.h @@ -77,8 +77,9 @@ void tipc_sub_report_overlap(struct tipc_subscription *sub, u32 found_lower, u32 found_upper, u32 event, u32 port, u32 node, u32 scope, int must); -int tipc_topsrv_start(struct net *net); -void tipc_topsrv_stop(struct net *net); + +int __net_init tipc_topsrv_init_net(struct net *net); +void __net_exit tipc_topsrv_exit_net(struct net *net); void tipc_sub_put(struct tipc_subscription *subscription); void tipc_sub_get(struct tipc_subscription *subscription); diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index 2301b09df23462..35558656fe02fa 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c @@ -643,7 +643,7 @@ static void tipc_topsrv_work_stop(struct tipc_topsrv *s) destroy_workqueue(s->send_wq); } -int tipc_topsrv_start(struct net *net) +static int tipc_topsrv_start(struct net *net) { struct tipc_net *tn = tipc_net(net); const char name[] = "topology_server"; @@ -677,7 +677,7 @@ int tipc_topsrv_start(struct net *net) return ret; } -void tipc_topsrv_stop(struct net *net) +static void tipc_topsrv_stop(struct net *net) { struct tipc_topsrv *srv = tipc_topsrv(net); struct socket *lsock = srv->listener; @@ -702,3 +702,13 @@ void tipc_topsrv_stop(struct net *net) idr_destroy(&srv->conn_idr); kfree(srv); } + +int __net_init tipc_topsrv_init_net(struct net *net) +{ + return tipc_topsrv_start(net); +} + +void __net_exit tipc_topsrv_exit_net(struct net *net) +{ + tipc_topsrv_stop(net); +} diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index fdf22cb0b3e6b1..ead29c2aefa762 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -545,10 +545,22 @@ static int tls_device_push_pending_record(struct sock *sk, int flags) return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA); } +static void tls_device_resync_rx(struct tls_context *tls_ctx, + struct sock *sk, u32 seq, u64 rcd_sn) +{ + struct net_device *netdev; + + if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags))) + return; + netdev = READ_ONCE(tls_ctx->netdev); + if (netdev) + netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq, rcd_sn); + clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags); +} + void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn) { struct tls_context *tls_ctx = tls_get_ctx(sk); - struct net_device *netdev = tls_ctx->netdev; struct tls_offload_context_rx *rx_ctx; u32 is_req_pending; s64 resync_req; @@ -563,10 +575,10 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn) is_req_pending = resync_req; if (unlikely(is_req_pending) && req_seq == seq && - atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) - netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, - seq + TLS_HEADER_SIZE - 1, - rcd_sn); + atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) { + seq += TLS_HEADER_SIZE - 1; + tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn); + } } static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb) @@ -916,12 +928,6 @@ void tls_device_offload_cleanup_rx(struct sock *sk) if (!netdev) goto out; - if (!(netdev->features & NETIF_F_HW_TLS_RX)) { - pr_err_ratelimited("%s: device is missing NETIF_F_HW_TLS_RX cap\n", - __func__); - goto out; - } - netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); @@ -960,7 +966,10 @@ static int tls_device_down(struct net_device *netdev) if (ctx->rx_conf == TLS_HW) netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_RX); - ctx->netdev = NULL; + WRITE_ONCE(ctx->netdev, NULL); + smp_mb__before_atomic(); /* pairs with test_and_set_bit() */ + while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags)) + usleep_range(10, 200); dev_put(netdev); list_del_init(&ctx->list); @@ -980,7 +989,8 @@ static int tls_dev_event(struct notifier_block *this, unsigned long event, { struct net_device *dev = netdev_notifier_info_to_dev(ptr); - if (!(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX))) + if (!dev->tlsdev_ops && + !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX))) return NOTIFY_DONE; switch (event) { diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 15eb5d3d475094..96ab344f17bbc1 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -702,28 +702,27 @@ static int __init virtio_vsock_init(void) if (!virtio_vsock_workqueue) return -ENOMEM; - ret = register_virtio_driver(&virtio_vsock_driver); + ret = vsock_core_init(&virtio_transport.transport); if (ret) goto out_wq; - ret = vsock_core_init(&virtio_transport.transport); + ret = register_virtio_driver(&virtio_vsock_driver); if (ret) - goto out_vdr; + goto out_vci; return 0; -out_vdr: - unregister_virtio_driver(&virtio_vsock_driver); +out_vci: + vsock_core_exit(); out_wq: destroy_workqueue(virtio_vsock_workqueue); return ret; - } static void __exit virtio_vsock_exit(void) { - vsock_core_exit(); unregister_virtio_driver(&virtio_vsock_driver); + vsock_core_exit(); destroy_workqueue(virtio_vsock_workqueue); } diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 602715fc9a757b..e30f53728725d1 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -786,12 +786,19 @@ static bool virtio_transport_close(struct vsock_sock *vsk) void virtio_transport_release(struct vsock_sock *vsk) { + struct virtio_vsock_sock *vvs = vsk->trans; + struct virtio_vsock_pkt *pkt, *tmp; struct sock *sk = &vsk->sk; bool remove_sock = true; lock_sock(sk); if (sk->sk_type == SOCK_STREAM) remove_sock = virtio_transport_close(vsk); + + list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) { + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); + } release_sock(sk); if (remove_sock) @@ -864,8 +871,10 @@ virtio_transport_recv_connected(struct sock *sk, if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND) vsk->peer_shutdown |= SEND_SHUTDOWN; if (vsk->peer_shutdown == SHUTDOWN_MASK && - vsock_stream_has_data(vsk) <= 0) + vsock_stream_has_data(vsk) <= 0) { + sock_set_flag(sk, SOCK_DONE); sk->sk_state = TCP_CLOSING; + } if (le32_to_cpu(pkt->hdr.flags)) sk->sk_state_change(sk); break; diff --git a/net/wireless/core.c b/net/wireless/core.c index a88551f3bc43f2..2a46ec3cb72c15 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -498,7 +498,7 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, &rdev->rfkill_ops, rdev); if (!rdev->rfkill) { - kfree(rdev); + wiphy_free(&rdev->wiphy); return NULL; } @@ -1335,8 +1335,12 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, } break; case NETDEV_PRE_UP: - if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) + if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)) && + !(wdev->iftype == NL80211_IFTYPE_AP_VLAN && + rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP && + wdev->use_4addr)) return notifier_from_errno(-EOPNOTSUPP); + if (rfkill_blocked(rdev->rfkill)) return notifier_from_errno(-ERFKILL); break; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 048e004ed0ee80..8e2f03ab4cc9f7 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -3191,8 +3191,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } - if (!rdev->ops->add_virtual_intf || - !(rdev->wiphy.interface_modes & (1 << type))) + if (!rdev->ops->add_virtual_intf) return -EOPNOTSUPP; if ((type == NL80211_IFTYPE_P2P_DEVICE || type == NL80211_IFTYPE_NAN || @@ -3211,6 +3210,11 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) return err; } + if (!(rdev->wiphy.interface_modes & (1 << type)) && + !(type == NL80211_IFTYPE_AP_VLAN && params.use_4addr && + rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP)) + return -EOPNOTSUPP; + err = nl80211_parse_mon_options(rdev, type, info, ¶ms); if (err < 0) return err; @@ -4607,8 +4611,10 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, struct nlattr *sinfoattr, *bss_param; hdr = nl80211hdr_put(msg, portid, seq, flags, cmd); - if (!hdr) + if (!hdr) { + cfg80211_sinfo_release_content(sinfo); return -1; + } if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) || @@ -15441,6 +15447,11 @@ void cfg80211_ch_switch_notify(struct net_device *dev, wdev->chandef = *chandef; wdev->preset_chandef = *chandef; + + if (wdev->iftype == NL80211_IFTYPE_STATION && + !WARN_ON(!wdev->current_bss)) + wdev->current_bss->pub.channel = chandef->chan; + nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL, NL80211_CMD_CH_SWITCH_NOTIFY, 0); } diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index 82723ef44db3e2..555ee2aca6c016 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -70,17 +70,28 @@ static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x) return NULL; } -static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb) +static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb, + unsigned short family) { struct xfrmi_net *xfrmn; - int ifindex; struct xfrm_if *xi; + int ifindex = 0; if (!secpath_exists(skb) || !skb->dev) return NULL; + switch (family) { + case AF_INET6: + ifindex = inet6_sdif(skb); + break; + case AF_INET: + ifindex = inet_sdif(skb); + break; + } + if (!ifindex) + ifindex = skb->dev->ifindex; + xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id); - ifindex = skb->dev->ifindex; for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) { if (ifindex == xi->dev->ifindex && diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index bf5d59270f79de..ce1b262ce9646a 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2339,7 +2339,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, ifcb = xfrm_if_get_cb(); if (ifcb) { - xi = ifcb->decode_session(skb); + xi = ifcb->decode_session(skb, family); if (xi) { if_id = xi->p.if_id; net = xi->net; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 3f729cd512aff5..11e09eb138d60c 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -2386,7 +2386,7 @@ void xfrm_state_fini(struct net *net) flush_work(&net->xfrm.state_hash_work); flush_work(&xfrm_state_gc_work); - xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true); + xfrm_state_flush(net, 0, false, true); WARN_ON(!list_empty(&net->xfrm.state_all)); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 7e4904b930041f..2122f89f615550 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1424,7 +1424,7 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p) ret = verify_policy_dir(p->dir); if (ret) return ret; - if (p->index && ((p->index & XFRM_POLICY_MAX) != p->dir)) + if (p->index && (xfrm_policy_id2dir(p->index) != p->dir)) return -EINVAL; return 0; @@ -1513,20 +1513,8 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) return -EINVAL; } - switch (ut[i].id.proto) { - case IPPROTO_AH: - case IPPROTO_ESP: - case IPPROTO_COMP: -#if IS_ENABLED(CONFIG_IPV6) - case IPPROTO_ROUTING: - case IPPROTO_DSTOPTS: -#endif - case IPSEC_PROTO_ANY: - break; - default: + if (!xfrm_id_proto_valid(ut[i].id.proto)) return -EINVAL; - } - } return 0; diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl index 34414c6efad639..a2c9e7f98e0673 100755 --- a/scripts/checkstack.pl +++ b/scripts/checkstack.pl @@ -46,7 +46,7 @@ $x = "[0-9a-f]"; # hex character $xs = "[0-9a-f ]"; # hex character or space $funcre = qr/^$x* <(.*)>:$/; - if ($arch eq 'aarch64') { + if ($arch =~ '^(aarch|arm)64$') { #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp, #-80]! $re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o; } elsif ($arch eq 'arm') { diff --git a/scripts/gcc-goto.sh b/scripts/gcc-goto.sh index 083c526073ef9a..8b980fb2270a3b 100755 --- a/scripts/gcc-goto.sh +++ b/scripts/gcc-goto.sh @@ -3,7 +3,7 @@ # Test for gcc 'asm goto' support # Copyright (C) 2010, Jason Baron -cat << "END" | $@ -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y" +cat << "END" | $@ -x c - -fno-PIE -c -o /dev/null int main(void) { #if defined(__arm__) || defined(__aarch64__) diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h index 552d5efd7cb752..17f06079a71231 100644 --- a/scripts/gcc-plugins/gcc-common.h +++ b/scripts/gcc-plugins/gcc-common.h @@ -150,8 +150,12 @@ void print_gimple_expr(FILE *, gimple, int, int); void dump_gimple_stmt(pretty_printer *, gimple, int, int); #endif +#ifndef __unused #define __unused __attribute__((__unused__)) +#endif +#ifndef __visible #define __visible __attribute__((visibility("default"))) +#endif #define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node)) #define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node)) diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c index e09fe4d7307cd0..40e3a098f6fb5b 100644 --- a/security/apparmor/apparmorfs.c +++ b/security/apparmor/apparmorfs.c @@ -123,17 +123,22 @@ static int aafs_show_path(struct seq_file *seq, struct dentry *dentry) return 0; } -static void aafs_evict_inode(struct inode *inode) +static void aafs_i_callback(struct rcu_head *head) { - truncate_inode_pages_final(&inode->i_data); - clear_inode(inode); + struct inode *inode = container_of(head, struct inode, i_rcu); if (S_ISLNK(inode->i_mode)) kfree(inode->i_link); + free_inode_nonrcu(inode); +} + +static void aafs_destroy_inode(struct inode *inode) +{ + call_rcu(&inode->i_rcu, aafs_i_callback); } static const struct super_operations aafs_super_ops = { .statfs = simple_statfs, - .evict_inode = aafs_evict_inode, + .destroy_inode = aafs_destroy_inode, .show_path = aafs_show_path, }; diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h index ab64c6b5db5aca..28c098fb6208b4 100644 --- a/security/apparmor/include/policy.h +++ b/security/apparmor/include/policy.h @@ -214,7 +214,16 @@ static inline struct aa_profile *aa_get_newest_profile(struct aa_profile *p) return labels_profile(aa_get_newest_label(&p->label)); } -#define PROFILE_MEDIATES(P, T) ((P)->policy.start[(unsigned char) (T)]) +static inline unsigned int PROFILE_MEDIATES(struct aa_profile *profile, + unsigned char class) +{ + if (class <= AA_CLASS_LAST) + return profile->policy.start[class]; + else + return aa_dfa_match_len(profile->policy.dfa, + profile->policy.start[0], &class, 1); +} + static inline unsigned int PROFILE_MEDIATES_AF(struct aa_profile *profile, u16 AF) { unsigned int state = PROFILE_MEDIATES(profile, AA_CLASS_NET); diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c index 21cb384d712a28..088ea2ac857065 100644 --- a/security/apparmor/policy_unpack.c +++ b/security/apparmor/policy_unpack.c @@ -276,7 +276,7 @@ static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name) char *tag = NULL; size_t size = unpack_u16_chunk(e, &tag); /* if a name is specified it must match. otherwise skip tag */ - if (name && (!size || strcmp(name, tag))) + if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag))) goto fail; } else if (name) { /* if a name is specified and there is no name tag fail */ diff --git a/security/inode.c b/security/inode.c index 8dd9ca8848e436..829f15672e01f0 100644 --- a/security/inode.c +++ b/security/inode.c @@ -26,17 +26,22 @@ static struct vfsmount *mount; static int mount_count; -static void securityfs_evict_inode(struct inode *inode) +static void securityfs_i_callback(struct rcu_head *head) { - truncate_inode_pages_final(&inode->i_data); - clear_inode(inode); + struct inode *inode = container_of(head, struct inode, i_rcu); if (S_ISLNK(inode->i_mode)) kfree(inode->i_link); + free_inode_nonrcu(inode); +} + +static void securityfs_destroy_inode(struct inode *inode) +{ + call_rcu(&inode->i_rcu, securityfs_i_callback); } static const struct super_operations securityfs_super_operations = { .statfs = simple_statfs, - .evict_inode = securityfs_evict_inode, + .destroy_inode = securityfs_destroy_inode, }; static int fill_super(struct super_block *sb, void *data, int silent) diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c index 8a3905bb02c745..6a314fb0d480e6 100644 --- a/security/integrity/evm/evm_crypto.c +++ b/security/integrity/evm/evm_crypto.c @@ -89,6 +89,9 @@ static struct shash_desc *init_desc(char type, uint8_t hash_algo) tfm = &hmac_tfm; algo = evm_hmac; } else { + if (hash_algo >= HASH_ALGO__LAST) + return ERR_PTR(-EINVAL); + tfm = &evm_tfm[hash_algo]; algo = hash_algo_name[hash_algo]; } diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index 8c9499867c918b..93babb60b05ae3 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -1059,10 +1059,10 @@ enum { }; static const char *const mask_tokens[] = { - "MAY_EXEC", - "MAY_WRITE", - "MAY_READ", - "MAY_APPEND" + "^MAY_EXEC", + "^MAY_WRITE", + "^MAY_READ", + "^MAY_APPEND" }; #define __ima_hook_stringify(str) (#str), @@ -1122,6 +1122,7 @@ int ima_policy_show(struct seq_file *m, void *v) struct ima_rule_entry *entry = v; int i; char tbuf[64] = {0,}; + int offset = 0; rcu_read_lock(); @@ -1145,15 +1146,17 @@ int ima_policy_show(struct seq_file *m, void *v) if (entry->flags & IMA_FUNC) policy_func_show(m, entry->func); - if (entry->flags & IMA_MASK) { + if ((entry->flags & IMA_MASK) || (entry->flags & IMA_INMASK)) { + if (entry->flags & IMA_MASK) + offset = 1; if (entry->mask & MAY_EXEC) - seq_printf(m, pt(Opt_mask), mt(mask_exec)); + seq_printf(m, pt(Opt_mask), mt(mask_exec) + offset); if (entry->mask & MAY_WRITE) - seq_printf(m, pt(Opt_mask), mt(mask_write)); + seq_printf(m, pt(Opt_mask), mt(mask_write) + offset); if (entry->mask & MAY_READ) - seq_printf(m, pt(Opt_mask), mt(mask_read)); + seq_printf(m, pt(Opt_mask), mt(mask_read) + offset); if (entry->mask & MAY_APPEND) - seq_printf(m, pt(Opt_mask), mt(mask_append)); + seq_printf(m, pt(Opt_mask), mt(mask_append) + offset); seq_puts(m, " "); } diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c index 186e727b737b99..6fd9954e1c085f 100644 --- a/security/selinux/netlabel.c +++ b/security/selinux/netlabel.c @@ -288,11 +288,8 @@ int selinux_netlbl_sctp_assoc_request(struct sctp_endpoint *ep, int rc; struct netlbl_lsm_secattr secattr; struct sk_security_struct *sksec = ep->base.sk->sk_security; - struct sockaddr *addr; struct sockaddr_in addr4; -#if IS_ENABLED(CONFIG_IPV6) struct sockaddr_in6 addr6; -#endif if (ep->base.sk->sk_family != PF_INET && ep->base.sk->sk_family != PF_INET6) @@ -310,16 +307,15 @@ int selinux_netlbl_sctp_assoc_request(struct sctp_endpoint *ep, if (ip_hdr(skb)->version == 4) { addr4.sin_family = AF_INET; addr4.sin_addr.s_addr = ip_hdr(skb)->saddr; - addr = (struct sockaddr *)&addr4; -#if IS_ENABLED(CONFIG_IPV6) - } else { + rc = netlbl_conn_setattr(ep->base.sk, (void *)&addr4, &secattr); + } else if (IS_ENABLED(CONFIG_IPV6) && ip_hdr(skb)->version == 6) { addr6.sin6_family = AF_INET6; addr6.sin6_addr = ipv6_hdr(skb)->saddr; - addr = (struct sockaddr *)&addr6; -#endif + rc = netlbl_conn_setattr(ep->base.sk, (void *)&addr6, &secattr); + } else { + rc = -EAFNOSUPPORT; } - rc = netlbl_conn_setattr(ep->base.sk, addr, &secattr); if (rc == 0) sksec->nlbl_state = NLBL_LABELED; diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index b55cb96d1fed4d..f59e13c1d84a8c 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -1900,20 +1900,14 @@ static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client, int result; struct snd_seq_client *sender = NULL; struct snd_seq_client_port *sport = NULL; - struct snd_seq_subscribers *p; result = -EINVAL; if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL) goto __end; if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL) goto __end; - p = snd_seq_port_get_subscription(&sport->c_src, &subs->dest); - if (p) { - result = 0; - *subs = p->info; - } else - result = -ENOENT; - + result = snd_seq_port_get_subscription(&sport->c_src, &subs->dest, + subs); __end: if (sport) snd_seq_port_unlock(sport); diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c index 24d90abfc64dfe..16289aefb443a0 100644 --- a/sound/core/seq/seq_ports.c +++ b/sound/core/seq/seq_ports.c @@ -550,10 +550,10 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client, list_del_init(list); grp->exclusive = 0; write_unlock_irq(&grp->list_lock); - up_write(&grp->list_mutex); if (!empty) unsubscribe_port(client, port, grp, &subs->info, ack); + up_write(&grp->list_mutex); } /* connect two ports */ @@ -635,20 +635,23 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector, /* get matched subscriber */ -struct snd_seq_subscribers *snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp, - struct snd_seq_addr *dest_addr) +int snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp, + struct snd_seq_addr *dest_addr, + struct snd_seq_port_subscribe *subs) { - struct snd_seq_subscribers *s, *found = NULL; + struct snd_seq_subscribers *s; + int err = -ENOENT; down_read(&src_grp->list_mutex); list_for_each_entry(s, &src_grp->list_head, src_list) { if (addr_match(dest_addr, &s->info.dest)) { - found = s; + *subs = s->info; + err = 0; break; } } up_read(&src_grp->list_mutex); - return found; + return err; } /* diff --git a/sound/core/seq/seq_ports.h b/sound/core/seq/seq_ports.h index 26bd71f36c41d1..06003b36652ef3 100644 --- a/sound/core/seq/seq_ports.h +++ b/sound/core/seq/seq_ports.h @@ -135,7 +135,8 @@ int snd_seq_port_subscribe(struct snd_seq_client_port *port, struct snd_seq_port_subscribe *info); /* get matched subscriber */ -struct snd_seq_subscribers *snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp, - struct snd_seq_addr *dest_addr); +int snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp, + struct snd_seq_addr *dest_addr, + struct snd_seq_port_subscribe *subs); #endif diff --git a/sound/firewire/motu/motu-stream.c b/sound/firewire/motu/motu-stream.c index 73e7a5e527fc02..483a8771d5020a 100644 --- a/sound/firewire/motu/motu-stream.c +++ b/sound/firewire/motu/motu-stream.c @@ -345,7 +345,7 @@ static void destroy_stream(struct snd_motu *motu, } amdtp_stream_destroy(stream); - fw_iso_resources_free(resources); + fw_iso_resources_destroy(resources); } int snd_motu_stream_init_duplex(struct snd_motu *motu) diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c index 5f82a375725af4..4ecaf69569dcbf 100644 --- a/sound/firewire/oxfw/oxfw.c +++ b/sound/firewire/oxfw/oxfw.c @@ -170,9 +170,6 @@ static int detect_quirks(struct snd_oxfw *oxfw) oxfw->midi_input_ports = 0; oxfw->midi_output_ports = 0; - /* Output stream exists but no data channels are useful. */ - oxfw->has_output = false; - return snd_oxfw_scs1x_add(oxfw); } diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 9bc8a7cb40ea62..308ce76149ccac 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -378,6 +378,7 @@ enum { #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) #define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348) +#define IS_CNL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9dc8) static char *driver_short_names[] = { [AZX_DRIVER_ICH] = "HDA Intel", @@ -1795,8 +1796,8 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci, else chip->bdl_pos_adj = bdl_pos_adj[dev]; - /* Workaround for a communication error on CFL (bko#199007) */ - if (IS_CFL(pci)) + /* Workaround for a communication error on CFL (bko#199007) and CNL */ + if (IS_CFL(pci) || IS_CNL(pci)) chip->polling_mode = 1; err = azx_bus_init(chip, model[dev], &pci_hda_io_ops); @@ -1883,9 +1884,6 @@ static int azx_first_init(struct azx *chip) chip->msi = 0; } - if (azx_acquire_irq(chip, 0) < 0) - return -EBUSY; - pci_set_master(pci); synchronize_irq(bus->irq); @@ -2000,6 +1998,9 @@ static int azx_first_init(struct azx *chip) return -ENODEV; } + if (azx_acquire_irq(chip, 0) < 0) + return -EBUSY; + strcpy(card->driver, "HDA-Intel"); strlcpy(card->shortname, driver_short_names[chip->driver_type], sizeof(card->shortname)); diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index cb587dce67a90c..35931a18418f30 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -1548,9 +1548,11 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin, ret = !repoll || !eld->monitor_present || eld->eld_valid; jack = snd_hda_jack_tbl_get(codec, pin_nid); - if (jack) + if (jack) { jack->block_report = !ret; - + jack->pin_sense = (eld->monitor_present && eld->eld_valid) ? + AC_PINSENSE_PRESENCE : 0; + } mutex_unlock(&per_pin->lock); return ret; } @@ -1660,6 +1662,11 @@ static void hdmi_repoll_eld(struct work_struct *work) container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work); struct hda_codec *codec = per_pin->codec; struct hdmi_spec *spec = codec->spec; + struct hda_jack_tbl *jack; + + jack = snd_hda_jack_tbl_get(codec, per_pin->pin_nid); + if (jack) + jack->jack_dirty = 1; if (per_pin->repoll_count++ > 6) per_pin->repoll_count = 0; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 75a0be2aa9c243..e154506a66cb66 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -477,12 +477,45 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on) set_eapd(codec, *p, on); } +static int find_ext_mic_pin(struct hda_codec *codec); + +static void alc_headset_mic_no_shutup(struct hda_codec *codec) +{ + const struct hda_pincfg *pin; + int mic_pin = find_ext_mic_pin(codec); + int i; + + /* don't shut up pins when unloading the driver; otherwise it breaks + * the default pin setup at the next load of the driver + */ + if (codec->bus->shutdown) + return; + + snd_array_for_each(&codec->init_pins, i, pin) { + /* use read here for syncing after issuing each verb */ + if (pin->nid != mic_pin) + snd_hda_codec_read(codec, pin->nid, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, 0); + } + + codec->pins_shutup = 1; +} + static void alc_shutup_pins(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; - if (!spec->no_shutup_pins) - snd_hda_shutup_pins(codec); + switch (codec->core.vendor_id) { + case 0x10ec0286: + case 0x10ec0288: + case 0x10ec0298: + alc_headset_mic_no_shutup(codec); + break; + default: + if (!spec->no_shutup_pins) + snd_hda_shutup_pins(codec); + break; + } } /* generic shutup callback; @@ -803,11 +836,10 @@ static int alc_init(struct hda_codec *codec) if (spec->init_hook) spec->init_hook(codec); + snd_hda_gen_init(codec); alc_fix_pll(codec); alc_auto_init_amp(codec, spec->init_amp); - snd_hda_gen_init(codec); - snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT); return 0; @@ -2924,27 +2956,6 @@ static int alc269_parse_auto_config(struct hda_codec *codec) return alc_parse_auto_config(codec, alc269_ignore, ssids); } -static int find_ext_mic_pin(struct hda_codec *codec); - -static void alc286_shutup(struct hda_codec *codec) -{ - const struct hda_pincfg *pin; - int i; - int mic_pin = find_ext_mic_pin(codec); - /* don't shut up pins when unloading the driver; otherwise it breaks - * the default pin setup at the next load of the driver - */ - if (codec->bus->shutdown) - return; - snd_array_for_each(&codec->init_pins, i, pin) { - /* use read here for syncing after issuing each verb */ - if (pin->nid != mic_pin) - snd_hda_codec_read(codec, pin->nid, 0, - AC_VERB_SET_PIN_WIDGET_CONTROL, 0); - } - codec->pins_shutup = 1; -} - static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up) { alc_update_coef_idx(codec, 0x04, 1 << 11, power_up ? (1 << 11) : 0); @@ -4071,18 +4082,19 @@ static struct coef_fw alc225_pre_hsmode[] = { static void alc_headset_mode_unplugged(struct hda_codec *codec) { static struct coef_fw coef0255[] = { + WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */ WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */ UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/ WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */ WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */ {} }; - static struct coef_fw coef0255_1[] = { - WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */ - {} - }; static struct coef_fw coef0256[] = { WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */ + WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */ + WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */ + WRITE_COEFEX(0x57, 0x03, 0x09a3), /* Direct Drive HP Amp control */ + UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/ {} }; static struct coef_fw coef0233[] = { @@ -4145,13 +4157,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) switch (codec->core.vendor_id) { case 0x10ec0255: - alc_process_coef_fw(codec, coef0255_1); alc_process_coef_fw(codec, coef0255); break; case 0x10ec0236: case 0x10ec0256: alc_process_coef_fw(codec, coef0256); - alc_process_coef_fw(codec, coef0255); break; case 0x10ec0234: case 0x10ec0274: @@ -4204,6 +4214,12 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin, WRITE_COEF(0x06, 0x6100), /* Set MIC2 Vref gate to normal */ {} }; + static struct coef_fw coef0256[] = { + UPDATE_COEFEX(0x57, 0x05, 1<<14, 1<<14), /* Direct Drive HP Amp control(Set to verb control)*/ + WRITE_COEFEX(0x57, 0x03, 0x09a3), + WRITE_COEF(0x06, 0x6100), /* Set MIC2 Vref gate to normal */ + {} + }; static struct coef_fw coef0233[] = { UPDATE_COEF(0x35, 0, 1<<14), WRITE_COEF(0x06, 0x2100), @@ -4251,14 +4267,19 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin, }; switch (codec->core.vendor_id) { - case 0x10ec0236: case 0x10ec0255: - case 0x10ec0256: alc_write_coef_idx(codec, 0x45, 0xc489); snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); alc_process_coef_fw(codec, coef0255); snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50); break; + case 0x10ec0236: + case 0x10ec0256: + alc_write_coef_idx(codec, 0x45, 0xc489); + snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); + alc_process_coef_fw(codec, coef0256); + snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50); + break; case 0x10ec0234: case 0x10ec0274: case 0x10ec0294: @@ -4340,6 +4361,14 @@ static void alc_headset_mode_default(struct hda_codec *codec) WRITE_COEF(0x49, 0x0049), {} }; + static struct coef_fw coef0256[] = { + WRITE_COEF(0x45, 0xc489), + WRITE_COEFEX(0x57, 0x03, 0x0da3), + WRITE_COEF(0x49, 0x0049), + UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/ + WRITE_COEF(0x06, 0x6100), + {} + }; static struct coef_fw coef0233[] = { WRITE_COEF(0x06, 0x2100), WRITE_COEF(0x32, 0x4ea3), @@ -4390,11 +4419,16 @@ static void alc_headset_mode_default(struct hda_codec *codec) alc_process_coef_fw(codec, alc225_pre_hsmode); alc_process_coef_fw(codec, coef0225); break; - case 0x10ec0236: case 0x10ec0255: - case 0x10ec0256: alc_process_coef_fw(codec, coef0255); break; + case 0x10ec0236: + case 0x10ec0256: + alc_write_coef_idx(codec, 0x1b, 0x0e4b); + alc_write_coef_idx(codec, 0x45, 0xc089); + msleep(50); + alc_process_coef_fw(codec, coef0256); + break; case 0x10ec0234: case 0x10ec0274: case 0x10ec0294: @@ -4438,8 +4472,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) }; static struct coef_fw coef0256[] = { WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */ - WRITE_COEF(0x1b, 0x0c6b), - WRITE_COEFEX(0x57, 0x03, 0x8ea6), + WRITE_COEF(0x1b, 0x0e6b), {} }; static struct coef_fw coef0233[] = { @@ -4557,8 +4590,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec) }; static struct coef_fw coef0256[] = { WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */ - WRITE_COEF(0x1b, 0x0c6b), - WRITE_COEFEX(0x57, 0x03, 0x8ea6), + WRITE_COEF(0x1b, 0x0e6b), {} }; static struct coef_fw coef0233[] = { @@ -4690,13 +4722,37 @@ static void alc_determine_headset_type(struct hda_codec *codec) }; switch (codec->core.vendor_id) { - case 0x10ec0236: case 0x10ec0255: + alc_process_coef_fw(codec, coef0255); + msleep(300); + val = alc_read_coef_idx(codec, 0x46); + is_ctia = (val & 0x0070) == 0x0070; + break; + case 0x10ec0236: case 0x10ec0256: + alc_write_coef_idx(codec, 0x1b, 0x0e4b); + alc_write_coef_idx(codec, 0x06, 0x6104); + alc_write_coefex_idx(codec, 0x57, 0x3, 0x09a3); + + snd_hda_codec_write(codec, 0x21, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); + msleep(80); + snd_hda_codec_write(codec, 0x21, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); + alc_process_coef_fw(codec, coef0255); msleep(300); val = alc_read_coef_idx(codec, 0x46); is_ctia = (val & 0x0070) == 0x0070; + + alc_write_coefex_idx(codec, 0x57, 0x3, 0x0da3); + alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0); + + snd_hda_codec_write(codec, 0x21, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); + msleep(80); + snd_hda_codec_write(codec, 0x21, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); break; case 0x10ec0234: case 0x10ec0274: @@ -6841,6 +6897,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x8550, "System76 Gazelle (gaze14)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x8551, "System76 Gazelle (gaze14)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x8560, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x1558, 0x8561, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS), SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE), @@ -6883,7 +6943,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), - SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), + SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI), SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK), @@ -7543,7 +7603,7 @@ static int patch_alc269(struct hda_codec *codec) spec = codec->spec; spec->gen.shared_mic_vref_pin = 0x18; - codec->power_save_node = 1; + codec->power_save_node = 0; #ifdef CONFIG_PM codec->patch_ops.suspend = alc269_suspend; @@ -7608,7 +7668,6 @@ static int patch_alc269(struct hda_codec *codec) case 0x10ec0286: case 0x10ec0288: spec->codec_variant = ALC269_TYPE_ALC286; - spec->shutup = alc286_shutup; break; case 0x10ec0298: spec->codec_variant = ALC269_TYPE_ALC298; diff --git a/sound/soc/codecs/cs42xx8.c b/sound/soc/codecs/cs42xx8.c index ebb9e0cf83647b..28a4ac36c4f856 100644 --- a/sound/soc/codecs/cs42xx8.c +++ b/sound/soc/codecs/cs42xx8.c @@ -558,6 +558,7 @@ static int cs42xx8_runtime_resume(struct device *dev) msleep(5); regcache_cache_only(cs42xx8->regmap, false); + regcache_mark_dirty(cs42xx8->regmap); ret = regcache_sync(cs42xx8->regmap); if (ret) { diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c index b61d518f4feff1..63487240b61e40 100644 --- a/sound/soc/codecs/hdac_hdmi.c +++ b/sound/soc/codecs/hdac_hdmi.c @@ -1828,6 +1828,17 @@ static int hdmi_codec_probe(struct snd_soc_component *component) /* Imp: Store the card pointer in hda_codec */ hdmi->card = dapm->card->snd_card; + /* + * Setup a device_link between card device and HDMI codec device. + * The card device is the consumer and the HDMI codec device is + * the supplier. With this setting, we can make sure that the audio + * domain in display power will be always turned on before operating + * on the HDMI audio codec registers. + * Let's use the flag DL_FLAG_AUTOREMOVE_CONSUMER. This can make + * sure the device link is freed when the machine driver is removed. + */ + device_link_add(component->card->dev, &hdev->dev, DL_FLAG_RPM_ACTIVE | + DL_FLAG_AUTOREMOVE_CONSUMER); /* * hdac_device core already sets the state to active and calls * get_noresume. So enable runtime and set the device to suspend. diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c index d5f73c83728176..7994e8ddc7d217 100644 --- a/sound/soc/codecs/hdmi-codec.c +++ b/sound/soc/codecs/hdmi-codec.c @@ -439,8 +439,12 @@ static int hdmi_codec_startup(struct snd_pcm_substream *substream, if (!ret) { ret = snd_pcm_hw_constraint_eld(substream->runtime, hcp->eld); - if (ret) + if (ret) { + mutex_lock(&hcp->current_stream_lock); + hcp->current_stream = NULL; + mutex_unlock(&hcp->current_stream_lock); return ret; + } } /* Select chmap supported */ hdmi_codec_eld_chmap(hcp); diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c index c97f21836c66a2..f06ae43650a3ae 100644 --- a/sound/soc/codecs/max98090.c +++ b/sound/soc/codecs/max98090.c @@ -1209,14 +1209,14 @@ static const struct snd_soc_dapm_widget max98090_dapm_widgets[] = { &max98090_right_rcv_mixer_controls[0], ARRAY_SIZE(max98090_right_rcv_mixer_controls)), - SND_SOC_DAPM_MUX("LINMOD Mux", M98090_REG_LOUTR_MIXER, - M98090_LINMOD_SHIFT, 0, &max98090_linmod_mux), + SND_SOC_DAPM_MUX("LINMOD Mux", SND_SOC_NOPM, 0, 0, + &max98090_linmod_mux), - SND_SOC_DAPM_MUX("MIXHPLSEL Mux", M98090_REG_HP_CONTROL, - M98090_MIXHPLSEL_SHIFT, 0, &max98090_mixhplsel_mux), + SND_SOC_DAPM_MUX("MIXHPLSEL Mux", SND_SOC_NOPM, 0, 0, + &max98090_mixhplsel_mux), - SND_SOC_DAPM_MUX("MIXHPRSEL Mux", M98090_REG_HP_CONTROL, - M98090_MIXHPRSEL_SHIFT, 0, &max98090_mixhprsel_mux), + SND_SOC_DAPM_MUX("MIXHPRSEL Mux", SND_SOC_NOPM, 0, 0, + &max98090_mixhprsel_mux), SND_SOC_DAPM_PGA("HP Left Out", M98090_REG_OUTPUT_ENABLE, M98090_HPLEN_SHIFT, 0, NULL, 0), diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c index bd51f3655ee3e5..06abcd01765049 100644 --- a/sound/soc/codecs/rt5677-spi.c +++ b/sound/soc/codecs/rt5677-spi.c @@ -58,13 +58,15 @@ static DEFINE_MUTEX(spi_mutex); * RT5677_SPI_READ/WRITE_32: Transfer 4 bytes * RT5677_SPI_READ/WRITE_BURST: Transfer any multiples of 8 bytes * - * For example, reading 260 bytes at 0x60030002 uses the following commands: - * 0x60030002 RT5677_SPI_READ_16 2 bytes + * Note: + * 16 Bit writes and reads are restricted to the address range + * 0x18020000 ~ 0x18021000 + * + * For example, reading 256 bytes at 0x60030004 uses the following commands: * 0x60030004 RT5677_SPI_READ_32 4 bytes * 0x60030008 RT5677_SPI_READ_BURST 240 bytes * 0x600300F8 RT5677_SPI_READ_BURST 8 bytes * 0x60030100 RT5677_SPI_READ_32 4 bytes - * 0x60030104 RT5677_SPI_READ_16 2 bytes * * Input: * @read: true for read commands; false for write commands @@ -79,15 +81,13 @@ static u8 rt5677_spi_select_cmd(bool read, u32 align, u32 remain, u32 *len) { u8 cmd; - if (align == 2 || align == 6 || remain == 2) { - cmd = RT5677_SPI_READ_16; - *len = 2; - } else if (align == 4 || remain <= 6) { + if (align == 4 || remain <= 4) { cmd = RT5677_SPI_READ_32; *len = 4; } else { cmd = RT5677_SPI_READ_BURST; - *len = min_t(u32, remain & ~7, RT5677_SPI_BURST_LEN); + *len = (((remain - 1) >> 3) + 1) << 3; + *len = min_t(u32, *len, RT5677_SPI_BURST_LEN); } return read ? cmd : cmd + 1; } @@ -108,7 +108,7 @@ static void rt5677_spi_reverse(u8 *dst, u32 dstlen, const u8 *src, u32 srclen) } } -/* Read DSP address space using SPI. addr and len have to be 2-byte aligned. */ +/* Read DSP address space using SPI. addr and len have to be 4-byte aligned. */ int rt5677_spi_read(u32 addr, void *rxbuf, size_t len) { u32 offset; @@ -124,7 +124,7 @@ int rt5677_spi_read(u32 addr, void *rxbuf, size_t len) if (!g_spi) return -ENODEV; - if ((addr & 1) || (len & 1)) { + if ((addr & 3) || (len & 3)) { dev_err(&g_spi->dev, "Bad read align 0x%x(%zu)\n", addr, len); return -EACCES; } @@ -159,13 +159,13 @@ int rt5677_spi_read(u32 addr, void *rxbuf, size_t len) } EXPORT_SYMBOL_GPL(rt5677_spi_read); -/* Write DSP address space using SPI. addr has to be 2-byte aligned. - * If len is not 2-byte aligned, an extra byte of zero is written at the end +/* Write DSP address space using SPI. addr has to be 4-byte aligned. + * If len is not 4-byte aligned, then extra zeros are written at the end * as padding. */ int rt5677_spi_write(u32 addr, const void *txbuf, size_t len) { - u32 offset, len_with_pad = len; + u32 offset; int status = 0; struct spi_transfer t; struct spi_message m; @@ -178,22 +178,19 @@ int rt5677_spi_write(u32 addr, const void *txbuf, size_t len) if (!g_spi) return -ENODEV; - if (addr & 1) { + if (addr & 3) { dev_err(&g_spi->dev, "Bad write align 0x%x(%zu)\n", addr, len); return -EACCES; } - if (len & 1) - len_with_pad = len + 1; - memset(&t, 0, sizeof(t)); t.tx_buf = buf; t.speed_hz = RT5677_SPI_FREQ; spi_message_init_with_transfers(&m, &t, 1); - for (offset = 0; offset < len_with_pad;) { + for (offset = 0; offset < len;) { spi_cmd = rt5677_spi_select_cmd(false, (addr + offset) & 7, - len_with_pad - offset, &t.len); + len - offset, &t.len); /* Construct SPI message header */ buf[0] = spi_cmd; diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c index f70db8412c7ccb..160b2764b2ad89 100644 --- a/sound/soc/davinci/davinci-mcasp.c +++ b/sound/soc/davinci/davinci-mcasp.c @@ -43,6 +43,7 @@ #define MCASP_MAX_AFIFO_DEPTH 64 +#ifdef CONFIG_PM static u32 context_regs[] = { DAVINCI_MCASP_TXFMCTL_REG, DAVINCI_MCASP_RXFMCTL_REG, @@ -65,6 +66,7 @@ struct davinci_mcasp_context { u32 *xrsr_regs; /* for serializer configuration */ bool pm_state; }; +#endif struct davinci_mcasp_ruledata { struct davinci_mcasp *mcasp; diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig index 2e75b5bc5f1daa..f721cd4e3f9721 100644 --- a/sound/soc/fsl/Kconfig +++ b/sound/soc/fsl/Kconfig @@ -173,16 +173,17 @@ config SND_MPC52xx_SOC_EFIKA endif # SND_POWERPC_SOC +config SND_SOC_IMX_PCM_FIQ + tristate + default y if SND_SOC_IMX_SSI=y && (SND_SOC_FSL_SSI=m || SND_SOC_FSL_SPDIF=m) && (MXC_TZIC || MXC_AVIC) + select FIQ + if SND_IMX_SOC config SND_SOC_IMX_SSI tristate select SND_SOC_FSL_UTILS -config SND_SOC_IMX_PCM_FIQ - tristate - select FIQ - comment "SoC Audio support for Freescale i.MX boards:" config SND_MXC_SOC_WM1133_EV1 diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c index 191426a6d9adff..30a3d68b5c0335 100644 --- a/sound/soc/fsl/eukrea-tlv320.c +++ b/sound/soc/fsl/eukrea-tlv320.c @@ -118,13 +118,13 @@ static int eukrea_tlv320_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "fsl,mux-int-port node missing or invalid.\n"); - return ret; + goto err; } ret = of_property_read_u32(np, "fsl,mux-ext-port", &ext_port); if (ret) { dev_err(&pdev->dev, "fsl,mux-ext-port node missing or invalid.\n"); - return ret; + goto err; } /* diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c index 528e8b10842297..09e03b884a829c 100644 --- a/sound/soc/fsl/fsl_asrc.c +++ b/sound/soc/fsl/fsl_asrc.c @@ -282,8 +282,8 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair) return -EINVAL; } - if ((outrate > 8000 && outrate < 30000) && - (outrate/inrate > 24 || inrate/outrate > 8)) { + if ((outrate >= 8000 && outrate <= 30000) && + (outrate > 24 * inrate || inrate > 8 * outrate)) { pair_err("exceed supported ratio range [1/24, 8] for \ inrate/outrate: %d/%d\n", inrate, outrate); return -EINVAL; diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c index 38fd32ab443c31..ff96db91f818aa 100644 --- a/sound/soc/fsl/fsl_esai.c +++ b/sound/soc/fsl/fsl_esai.c @@ -251,7 +251,7 @@ static int fsl_esai_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id, break; case ESAI_HCKT_EXTAL: ecr |= ESAI_ECR_ETI; - /* fall through */ + break; case ESAI_HCKR_EXTAL: ecr |= ESAI_ECR_ERI; break; diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c index 4163f2cfc06fc7..bfc5b21d0c3f99 100644 --- a/sound/soc/fsl/fsl_sai.c +++ b/sound/soc/fsl/fsl_sai.c @@ -268,12 +268,14 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai, case SND_SOC_DAIFMT_CBS_CFS: val_cr2 |= FSL_SAI_CR2_BCD_MSTR; val_cr4 |= FSL_SAI_CR4_FSD_MSTR; + sai->is_slave_mode = false; break; case SND_SOC_DAIFMT_CBM_CFM: sai->is_slave_mode = true; break; case SND_SOC_DAIFMT_CBS_CFM: val_cr2 |= FSL_SAI_CR2_BCD_MSTR; + sai->is_slave_mode = false; break; case SND_SOC_DAIFMT_CBM_CFS: val_cr4 |= FSL_SAI_CR4_FSD_MSTR; diff --git a/sound/soc/fsl/fsl_utils.c b/sound/soc/fsl/fsl_utils.c index 7f0fa4b5222318..cca33ab7020a45 100644 --- a/sound/soc/fsl/fsl_utils.c +++ b/sound/soc/fsl/fsl_utils.c @@ -71,6 +71,7 @@ int fsl_asoc_get_dma_channel(struct device_node *ssi_np, iprop = of_get_property(dma_np, "cell-index", NULL); if (!iprop) { of_node_put(dma_np); + of_node_put(dma_channel_np); return -EINVAL; } *dma_id = be32_to_cpup(iprop); diff --git a/sound/soc/intel/boards/kbl_da7219_max98357a.c b/sound/soc/intel/boards/kbl_da7219_max98357a.c index 38f6ab74709d06..07491a0f8fb8bb 100644 --- a/sound/soc/intel/boards/kbl_da7219_max98357a.c +++ b/sound/soc/intel/boards/kbl_da7219_max98357a.c @@ -188,7 +188,7 @@ static int kabylake_da7219_codec_init(struct snd_soc_pcm_runtime *rtd) jack = &ctx->kabylake_headset; - snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA); + snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE); snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP); snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN); snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND); diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c index aa28510d23adae..bbcb0d4d83aecf 100644 --- a/sound/usb/line6/driver.c +++ b/sound/usb/line6/driver.c @@ -720,6 +720,15 @@ static int line6_init_cap_control(struct usb_line6 *line6) return 0; } +static void line6_startup_work(struct work_struct *work) +{ + struct usb_line6 *line6 = + container_of(work, struct usb_line6, startup_work.work); + + if (line6->startup) + line6->startup(line6); +} + /* Probe USB device. */ @@ -755,6 +764,7 @@ int line6_probe(struct usb_interface *interface, line6->properties = properties; line6->usbdev = usbdev; line6->ifcdev = &interface->dev; + INIT_DELAYED_WORK(&line6->startup_work, line6_startup_work); strcpy(card->id, properties->id); strcpy(card->driver, driver_name); @@ -825,6 +835,8 @@ void line6_disconnect(struct usb_interface *interface) if (WARN_ON(usbdev != line6->usbdev)) return; + cancel_delayed_work(&line6->startup_work); + if (line6->urb_listen != NULL) line6_stop_listen(line6); diff --git a/sound/usb/line6/driver.h b/sound/usb/line6/driver.h index 61425597eb61ce..650d909c9c4ff8 100644 --- a/sound/usb/line6/driver.h +++ b/sound/usb/line6/driver.h @@ -178,11 +178,15 @@ struct usb_line6 { fifo; } messages; + /* Work for delayed PCM startup */ + struct delayed_work startup_work; + /* If MIDI is supported, buffer_message contains the pre-processed data; * otherwise the data is only in urb_listen (buffer_incoming). */ void (*process_message)(struct usb_line6 *); void (*disconnect)(struct usb_line6 *line6); + void (*startup)(struct usb_line6 *line6); }; extern char *line6_alloc_sysex_buffer(struct usb_line6 *line6, int code1, diff --git a/sound/usb/line6/toneport.c b/sound/usb/line6/toneport.c index 19bee725de00dd..7e39083f8f76ac 100644 --- a/sound/usb/line6/toneport.c +++ b/sound/usb/line6/toneport.c @@ -54,9 +54,6 @@ struct usb_line6_toneport { /* Firmware version (x 100) */ u8 firmware_version; - /* Timer for delayed PCM startup */ - struct timer_list timer; - /* Device type */ enum line6_device_type type; @@ -241,11 +238,8 @@ static int snd_toneport_source_put(struct snd_kcontrol *kcontrol, return 1; } -static void toneport_start_pcm(struct timer_list *t) +static void toneport_startup(struct usb_line6 *line6) { - struct usb_line6_toneport *toneport = from_timer(toneport, t, timer); - struct usb_line6 *line6 = &toneport->line6; - line6_pcm_acquire(line6->line6pcm, LINE6_STREAM_MONITOR, true); } @@ -393,7 +387,8 @@ static int toneport_setup(struct usb_line6_toneport *toneport) if (toneport_has_led(toneport)) toneport_update_led(toneport); - mod_timer(&toneport->timer, jiffies + TONEPORT_PCM_DELAY * HZ); + schedule_delayed_work(&toneport->line6.startup_work, + msecs_to_jiffies(TONEPORT_PCM_DELAY * 1000)); return 0; } @@ -405,8 +400,6 @@ static void line6_toneport_disconnect(struct usb_line6 *line6) struct usb_line6_toneport *toneport = (struct usb_line6_toneport *)line6; - del_timer_sync(&toneport->timer); - if (toneport_has_led(toneport)) toneport_remove_leds(toneport); } @@ -422,9 +415,9 @@ static int toneport_init(struct usb_line6 *line6, struct usb_line6_toneport *toneport = (struct usb_line6_toneport *) line6; toneport->type = id->driver_info; - timer_setup(&toneport->timer, toneport_start_pcm, 0); line6->disconnect = line6_toneport_disconnect; + line6->startup = toneport_startup; /* initialize PCM subsystem: */ err = line6_init_pcm(line6, &toneport_pcm_properties); diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index e7d441d0e839fa..5a10b1b7f6b959 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -2679,6 +2679,8 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, kctl = snd_ctl_new1(&mixer_selectunit_ctl, cval); if (! kctl) { usb_audio_err(state->chip, "cannot malloc kcontrol\n"); + for (i = 0; i < desc->bNrInPins; i++) + kfree(namelist[i]); kfree(namelist); kfree(cval); return -ENOMEM; diff --git a/tools/arch/x86/include/asm/rmwcc.h b/tools/arch/x86/include/asm/rmwcc.h index dc90c0c2fae352..fee7983a90b4f4 100644 --- a/tools/arch/x86/include/asm/rmwcc.h +++ b/tools/arch/x86/include/asm/rmwcc.h @@ -2,7 +2,7 @@ #ifndef _TOOLS_LINUX_ASM_X86_RMWcc #define _TOOLS_LINUX_ASM_X86_RMWcc -#ifdef CC_HAVE_ASM_GOTO +#ifdef CONFIG_CC_HAS_ASM_GOTO #define __GEN_RMWcc(fullop, var, cc, ...) \ do { \ @@ -20,7 +20,7 @@ cc_label: \ #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val)) -#else /* !CC_HAVE_ASM_GOTO */ +#else /* !CONFIG_CC_HAS_ASM_GOTO */ #define __GEN_RMWcc(fullop, var, cc, ...) \ do { \ @@ -37,6 +37,6 @@ do { \ #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val)) -#endif /* CC_HAVE_ASM_GOTO */ +#endif /* CONFIG_CC_HAS_ASM_GOTO */ #endif /* _TOOLS_LINUX_ASM_X86_RMWcc */ diff --git a/tools/bpf/bpftool/.gitignore b/tools/bpf/bpftool/.gitignore index 67167e44b72662..8248b8dd89d4b9 100644 --- a/tools/bpf/bpftool/.gitignore +++ b/tools/bpf/bpftool/.gitignore @@ -1,5 +1,5 @@ *.d -bpftool +/bpftool bpftool*.8 bpf-helpers.* FEATURE-DUMP.bpftool diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat index 195ba486640f9b..ba7ee74ee53305 100755 --- a/tools/kvm/kvm_stat/kvm_stat +++ b/tools/kvm/kvm_stat/kvm_stat @@ -575,8 +575,12 @@ class TracepointProvider(Provider): def update_fields(self, fields_filter): """Refresh fields, applying fields_filter""" self.fields = [field for field in self._get_available_fields() - if self.is_field_wanted(fields_filter, field) or - ARCH.tracepoint_is_child(field)] + if self.is_field_wanted(fields_filter, field)] + # add parents for child fields - otherwise we won't see any output! + for field in self._fields: + parent = ARCH.tracepoint_is_child(field) + if (parent and parent not in self._fields): + self.fields.append(parent) @staticmethod def _get_online_cpus(): @@ -735,8 +739,12 @@ class DebugfsProvider(Provider): def update_fields(self, fields_filter): """Refresh fields, applying fields_filter""" self._fields = [field for field in self._get_available_fields() - if self.is_field_wanted(fields_filter, field) or - ARCH.debugfs_is_child(field)] + if self.is_field_wanted(fields_filter, field)] + # add parents for child fields - otherwise we won't see any output! + for field in self._fields: + parent = ARCH.debugfs_is_child(field) + if (parent and parent not in self._fields): + self.fields.append(parent) @property def fields(self): diff --git a/tools/kvm/kvm_stat/kvm_stat.txt b/tools/kvm/kvm_stat/kvm_stat.txt index 0811d860fe7500..c057ba52364e0f 100644 --- a/tools/kvm/kvm_stat/kvm_stat.txt +++ b/tools/kvm/kvm_stat/kvm_stat.txt @@ -34,6 +34,8 @@ INTERACTIVE COMMANDS *c*:: clear filter *f*:: filter by regular expression + :: *Note*: Child events pull in their parents, and parents' stats summarize + all child events, not just the filtered ones *g*:: filter by guest name/PID diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 7a0014794bff72..dd0b68d1f4be09 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -53,6 +53,8 @@ # define __NR_bpf 349 # elif defined(__s390__) # define __NR_bpf 351 +# elif defined(__arc__) +# define __NR_bpf 280 # else # error __NR_bpf not defined. libbpf does not support your arch. # endif diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 6f38164b261812..c3145ab3bdcaca 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -26,6 +26,7 @@ #include #include #include +#include struct bpf_create_map_attr { const char *name; diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index 53f8be0f4a1f76..88158239622bce 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -7,11 +7,12 @@ ARCH := x86 endif # always use the host compiler +HOSTAR ?= ar HOSTCC ?= gcc HOSTLD ?= ld +AR = $(HOSTAR) CC = $(HOSTCC) LD = $(HOSTLD) -AR = ar ifeq ($(srctree),) srctree := $(patsubst %/,%,$(dir $(CURDIR))) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index ef152daccc3331..ecf5fc77f50b56 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -28,6 +28,8 @@ #include #include +#define FAKE_JUMP_OFFSET -1 + struct alternative { struct list_head list; struct instruction *insn; @@ -501,7 +503,7 @@ static int add_jump_destinations(struct objtool_file *file) insn->type != INSN_JUMP_UNCONDITIONAL) continue; - if (insn->ignore) + if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET) continue; rela = find_rela_by_dest_range(insn->sec, insn->offset, @@ -670,10 +672,10 @@ static int handle_group_alt(struct objtool_file *file, clear_insn_state(&fake_jump->state); fake_jump->sec = special_alt->new_sec; - fake_jump->offset = -1; + fake_jump->offset = FAKE_JUMP_OFFSET; fake_jump->type = INSN_JUMP_UNCONDITIONAL; fake_jump->jump_dest = list_next_entry(last_orig_insn, list); - fake_jump->ignore = true; + fake_jump->func = orig_insn->func; } if (!special_alt->new_len) { @@ -837,7 +839,7 @@ static int add_switch_table(struct objtool_file *file, struct instruction *insn, struct symbol *pfunc = insn->func->pfunc; unsigned int prev_offset = 0; - list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) { + list_for_each_entry_from(rela, &table->rela_sec->rela_list, list) { if (rela == next_table) break; @@ -927,6 +929,7 @@ static struct rela *find_switch_table(struct objtool_file *file, { struct rela *text_rela, *rodata_rela; struct instruction *orig_insn = insn; + struct section *rodata_sec; unsigned long table_offset; /* @@ -954,10 +957,13 @@ static struct rela *find_switch_table(struct objtool_file *file, /* look for a relocation which references .rodata */ text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len); - if (!text_rela || text_rela->sym != file->rodata->sym) + if (!text_rela || text_rela->sym->type != STT_SECTION || + !text_rela->sym->sec->rodata) continue; table_offset = text_rela->addend; + rodata_sec = text_rela->sym->sec; + if (text_rela->type == R_X86_64_PC32) table_offset += 4; @@ -965,10 +971,10 @@ static struct rela *find_switch_table(struct objtool_file *file, * Make sure the .rodata address isn't associated with a * symbol. gcc jump tables are anonymous data. */ - if (find_symbol_containing(file->rodata, table_offset)) + if (find_symbol_containing(rodata_sec, table_offset)) continue; - rodata_rela = find_rela_by_dest(file->rodata, table_offset); + rodata_rela = find_rela_by_dest(rodata_sec, table_offset); if (rodata_rela) { /* * Use of RIP-relative switch jumps is quite rare, and @@ -1053,7 +1059,7 @@ static int add_switch_table_alts(struct objtool_file *file) struct symbol *func; int ret; - if (!file->rodata || !file->rodata->rela) + if (!file->rodata) return 0; for_each_sec(file, sec) { @@ -1199,10 +1205,33 @@ static int read_retpoline_hints(struct objtool_file *file) return 0; } +static void mark_rodata(struct objtool_file *file) +{ + struct section *sec; + bool found = false; + + /* + * This searches for the .rodata section or multiple .rodata.func_name + * sections if -fdata-sections is being used. The .str.1.1 and .str.1.8 + * rodata sections are ignored as they don't contain jump tables. + */ + for_each_sec(file, sec) { + if (!strncmp(sec->name, ".rodata", 7) && + !strstr(sec->name, ".str1.")) { + sec->rodata = true; + found = true; + } + } + + file->rodata = found; +} + static int decode_sections(struct objtool_file *file) { int ret; + mark_rodata(file); + ret = decode_instructions(file); if (ret) return ret; @@ -1805,7 +1834,8 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, return 1; } - func = insn->func ? insn->func->pfunc : NULL; + if (insn->func) + func = insn->func->pfunc; if (func && insn->ignore) { WARN_FUNC("BUG: why am I validating an ignored function?", @@ -2173,7 +2203,6 @@ int check(const char *_objname, bool orc) INIT_LIST_HEAD(&file.insn_list); hash_init(file.insn_hash); file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard"); - file.rodata = find_section_by_name(file.elf, ".rodata"); file.c_file = find_section_by_name(file.elf, ".comment"); file.ignore_unreachables = no_unreachable; file.hints = false; diff --git a/tools/objtool/check.h b/tools/objtool/check.h index 95700a2bcb7c1e..e6e8a655b5563e 100644 --- a/tools/objtool/check.h +++ b/tools/objtool/check.h @@ -60,8 +60,8 @@ struct objtool_file { struct elf *elf; struct list_head insn_list; DECLARE_HASHTABLE(insn_hash, 16); - struct section *rodata, *whitelist; - bool ignore_unreachables, c_file, hints; + struct section *whitelist; + bool ignore_unreachables, c_file, hints, rodata; }; int check(const char *objname, bool orc); diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index b75d004f6482d8..abed594a9653b0 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -390,6 +390,7 @@ static int read_relas(struct elf *elf) rela->offset = rela->rela.r_offset; symndx = GELF_R_SYM(rela->rela.r_info); rela->sym = find_symbol_by_index(elf, symndx); + rela->rela_sec = sec; if (!rela->sym) { WARN("can't find rela entry symbol %d for %s", symndx, sec->name); diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h index de5cd2ddded987..bc97ed86b9cd8e 100644 --- a/tools/objtool/elf.h +++ b/tools/objtool/elf.h @@ -48,7 +48,7 @@ struct section { char *name; int idx; unsigned int len; - bool changed, text; + bool changed, text, rodata; }; struct symbol { @@ -68,6 +68,7 @@ struct rela { struct list_head list; struct hlist_node hash; GElf_Rela rela; + struct section *rela_sec; struct symbol *sym; unsigned int type; unsigned long offset; diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c index 0b205400731444..a19690a17291eb 100644 --- a/tools/perf/arch/s390/util/machine.c +++ b/tools/perf/arch/s390/util/machine.c @@ -5,16 +5,19 @@ #include "util.h" #include "machine.h" #include "api/fs/fs.h" +#include "debug.h" int arch__fix_module_text_start(u64 *start, const char *name) { + u64 m_start = *start; char path[PATH_MAX]; snprintf(path, PATH_MAX, "module/%.*s/sections/.text", (int)strlen(name) - 2, name + 1); - - if (sysfs__read_ull(path, (unsigned long long *)start) < 0) - return -1; + if (sysfs__read_ull(path, (unsigned long long *)start) < 0) { + pr_debug2("Using module %s start:%#lx\n", path, m_start); + *start = m_start; + } return 0; } diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c index 44195514b19e65..fa56fde6e8d803 100644 --- a/tools/perf/bench/numa.c +++ b/tools/perf/bench/numa.c @@ -38,6 +38,10 @@ #include #include +#ifndef RUSAGE_THREAD +# define RUSAGE_THREAD 1 +#endif + /* * Regular printout to the terminal, supressed if -q is specified: */ diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c index abd38abf1d918a..24f2a87cf91ddf 100644 --- a/tools/perf/util/data-convert-bt.c +++ b/tools/perf/util/data-convert-bt.c @@ -271,7 +271,7 @@ static int string_set_value(struct bt_ctf_field *field, const char *string) if (i > 0) strncpy(buffer, string, i); } - strncat(buffer + p, numstr, 4); + memcpy(buffer + p, numstr, 4); p += 3; } } diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index 0bc3e6e93c31fc..4357141c7c924b 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -58,6 +58,7 @@ enum intel_pt_pkt_state { INTEL_PT_STATE_NO_IP, INTEL_PT_STATE_ERR_RESYNC, INTEL_PT_STATE_IN_SYNC, + INTEL_PT_STATE_TNT_CONT, INTEL_PT_STATE_TNT, INTEL_PT_STATE_TIP, INTEL_PT_STATE_TIP_PGD, @@ -72,8 +73,9 @@ static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state) case INTEL_PT_STATE_NO_IP: case INTEL_PT_STATE_ERR_RESYNC: case INTEL_PT_STATE_IN_SYNC: - case INTEL_PT_STATE_TNT: + case INTEL_PT_STATE_TNT_CONT: return true; + case INTEL_PT_STATE_TNT: case INTEL_PT_STATE_TIP: case INTEL_PT_STATE_TIP_PGD: case INTEL_PT_STATE_FUP: @@ -888,16 +890,20 @@ static uint64_t intel_pt_next_period(struct intel_pt_decoder *decoder) timestamp = decoder->timestamp + decoder->timestamp_insn_cnt; masked_timestamp = timestamp & decoder->period_mask; if (decoder->continuous_period) { - if (masked_timestamp != decoder->last_masked_timestamp) + if (masked_timestamp > decoder->last_masked_timestamp) return 1; } else { timestamp += 1; masked_timestamp = timestamp & decoder->period_mask; - if (masked_timestamp != decoder->last_masked_timestamp) { + if (masked_timestamp > decoder->last_masked_timestamp) { decoder->last_masked_timestamp = masked_timestamp; decoder->continuous_period = true; } } + + if (masked_timestamp < decoder->last_masked_timestamp) + return decoder->period_ticks; + return decoder->period_ticks - (timestamp - masked_timestamp); } @@ -926,7 +932,10 @@ static void intel_pt_sample_insn(struct intel_pt_decoder *decoder) case INTEL_PT_PERIOD_TICKS: timestamp = decoder->timestamp + decoder->timestamp_insn_cnt; masked_timestamp = timestamp & decoder->period_mask; - decoder->last_masked_timestamp = masked_timestamp; + if (masked_timestamp > decoder->last_masked_timestamp) + decoder->last_masked_timestamp = masked_timestamp; + else + decoder->last_masked_timestamp += decoder->period_ticks; break; case INTEL_PT_PERIOD_NONE: case INTEL_PT_PERIOD_MTC: @@ -1249,7 +1258,9 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder) return -ENOENT; } decoder->tnt.count -= 1; - if (!decoder->tnt.count) + if (decoder->tnt.count) + decoder->pkt_state = INTEL_PT_STATE_TNT_CONT; + else decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; decoder->tnt.payload <<= 1; decoder->state.from_ip = decoder->ip; @@ -1280,7 +1291,9 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder) if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) { decoder->tnt.count -= 1; - if (!decoder->tnt.count) + if (decoder->tnt.count) + decoder->pkt_state = INTEL_PT_STATE_TNT_CONT; + else decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; if (decoder->tnt.payload & BIT63) { decoder->tnt.payload <<= 1; @@ -1300,8 +1313,11 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder) return 0; } decoder->ip += intel_pt_insn.length; - if (!decoder->tnt.count) + if (!decoder->tnt.count) { + decoder->sample_timestamp = decoder->timestamp; + decoder->sample_insn_cnt = decoder->timestamp_insn_cnt; return -EAGAIN; + } decoder->tnt.payload <<= 1; continue; } @@ -2349,6 +2365,7 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder) err = intel_pt_walk_trace(decoder); break; case INTEL_PT_STATE_TNT: + case INTEL_PT_STATE_TNT_CONT: err = intel_pt_walk_tnt(decoder); if (err == -EAGAIN) err = intel_pt_walk_trace(decoder); diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 2048d393ece6f2..56007a7e0b4d7f 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -128,7 +128,7 @@ void thread__put(struct thread *thread) } } -struct namespaces *thread__namespaces(const struct thread *thread) +static struct namespaces *__thread__namespaces(const struct thread *thread) { if (list_empty(&thread->namespaces_list)) return NULL; @@ -136,10 +136,21 @@ struct namespaces *thread__namespaces(const struct thread *thread) return list_first_entry(&thread->namespaces_list, struct namespaces, list); } +struct namespaces *thread__namespaces(const struct thread *thread) +{ + struct namespaces *ns; + + down_read((struct rw_semaphore *)&thread->namespaces_lock); + ns = __thread__namespaces(thread); + up_read((struct rw_semaphore *)&thread->namespaces_lock); + + return ns; +} + static int __thread__set_namespaces(struct thread *thread, u64 timestamp, struct namespaces_event *event) { - struct namespaces *new, *curr = thread__namespaces(thread); + struct namespaces *new, *curr = __thread__namespaces(thread); new = namespaces__new(event); if (!new) diff --git a/tools/testing/selftests/bpf/test_libbpf_open.c b/tools/testing/selftests/bpf/test_libbpf_open.c index 8fcd1c076add0b..cbd55f5f8d5984 100644 --- a/tools/testing/selftests/bpf/test_libbpf_open.c +++ b/tools/testing/selftests/bpf/test_libbpf_open.c @@ -11,6 +11,8 @@ static const char *__doc__ = #include #include +#include "bpf_rlimit.h" + static const struct option long_options[] = { {"help", no_argument, NULL, 'h' }, {"debug", no_argument, NULL, 'D' }, diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 294fc18aba2aa1..9db5a7378f40fd 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -32,7 +32,6 @@ #include #include -#include #ifdef HAVE_GENHDR # include "autoconf.h" @@ -57,7 +56,6 @@ #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled" static bool unpriv_disabled = false; -static int skips; struct bpf_test { const char *descr; @@ -12772,11 +12770,6 @@ static void do_test_single(struct bpf_test *test, bool unpriv, fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT, "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1); - if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) { - printf("SKIP (unsupported program type %d)\n", prog_type); - skips++; - goto close_fds; - } expected_ret = unpriv && test->result_unpriv != UNDEF ? test->result_unpriv : test->result; @@ -12912,7 +12905,7 @@ static void get_unpriv_disabled() static int do_test(bool unpriv, unsigned int from, unsigned int to) { - int i, passes = 0, errors = 0; + int i, passes = 0, errors = 0, skips = 0; for (i = from; i < to; i++) { struct bpf_test *test = &tests[i]; diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c index cabe2a3a3b3007..cf156b35367985 100644 --- a/tools/testing/selftests/bpf/trace_helpers.c +++ b/tools/testing/selftests/bpf/trace_helpers.c @@ -51,6 +51,10 @@ struct ksym *ksym_search(long key) int start = 0, end = sym_cnt; int result; + /* kallsyms not loaded. return NULL */ + if (sym_cnt <= 0) + return NULL; + while (start < end) { size_t mid = start + (end - start) / 2; diff --git a/tools/testing/selftests/cgroup/test_core.c b/tools/testing/selftests/cgroup/test_core.c index be59f9c34ea21d..79053a4f478384 100644 --- a/tools/testing/selftests/cgroup/test_core.c +++ b/tools/testing/selftests/cgroup/test_core.c @@ -198,7 +198,7 @@ static int test_cgcore_no_internal_process_constraint_on_threads(const char *roo char *parent = NULL, *child = NULL; if (cg_read_strstr(root, "cgroup.controllers", "cpu") || - cg_read_strstr(root, "cgroup.subtree_control", "cpu")) { + cg_write(root, "cgroup.subtree_control", "+cpu")) { ret = KSFT_SKIP; goto cleanup; } @@ -376,6 +376,11 @@ int main(int argc, char *argv[]) if (cg_find_unified_root(root, sizeof(root))) ksft_exit_skip("cgroup v2 isn't mounted\n"); + + if (cg_read_strstr(root, "cgroup.subtree_control", "memory")) + if (cg_write(root, "cgroup.subtree_control", "+memory")) + ksft_exit_skip("Failed to set memory controller\n"); + for (i = 0; i < ARRAY_SIZE(tests); i++) { switch (tests[i].fn(root)) { case KSFT_PASS: diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c index 28d321ba311b48..c19a97dd02d496 100644 --- a/tools/testing/selftests/cgroup/test_memcontrol.c +++ b/tools/testing/selftests/cgroup/test_memcontrol.c @@ -26,7 +26,7 @@ */ static int test_memcg_subtree_control(const char *root) { - char *parent, *child, *parent2, *child2; + char *parent, *child, *parent2 = NULL, *child2 = NULL; int ret = KSFT_FAIL; char buf[PAGE_SIZE]; @@ -34,50 +34,54 @@ static int test_memcg_subtree_control(const char *root) parent = cg_name(root, "memcg_test_0"); child = cg_name(root, "memcg_test_0/memcg_test_1"); if (!parent || !child) - goto cleanup; + goto cleanup_free; if (cg_create(parent)) - goto cleanup; + goto cleanup_free; if (cg_write(parent, "cgroup.subtree_control", "+memory")) - goto cleanup; + goto cleanup_parent; if (cg_create(child)) - goto cleanup; + goto cleanup_parent; if (cg_read_strstr(child, "cgroup.controllers", "memory")) - goto cleanup; + goto cleanup_child; /* Create two nested cgroups without enabling memory controller */ parent2 = cg_name(root, "memcg_test_1"); child2 = cg_name(root, "memcg_test_1/memcg_test_1"); if (!parent2 || !child2) - goto cleanup; + goto cleanup_free2; if (cg_create(parent2)) - goto cleanup; + goto cleanup_free2; if (cg_create(child2)) - goto cleanup; + goto cleanup_parent2; if (cg_read(child2, "cgroup.controllers", buf, sizeof(buf))) - goto cleanup; + goto cleanup_all; if (!cg_read_strstr(child2, "cgroup.controllers", "memory")) - goto cleanup; + goto cleanup_all; ret = KSFT_PASS; -cleanup: - cg_destroy(child); - cg_destroy(parent); - free(parent); - free(child); - +cleanup_all: cg_destroy(child2); +cleanup_parent2: cg_destroy(parent2); +cleanup_free2: free(parent2); free(child2); +cleanup_child: + cg_destroy(child); +cleanup_parent: + cg_destroy(parent); +cleanup_free: + free(parent); + free(child); return ret; } @@ -1201,6 +1205,10 @@ int main(int argc, char **argv) if (cg_read_strstr(root, "cgroup.controllers", "memory")) ksft_exit_skip("memory controller isn't available\n"); + if (cg_read_strstr(root, "cgroup.subtree_control", "memory")) + if (cg_write(root, "cgroup.subtree_control", "+memory")) + ksft_exit_skip("Failed to set memory controller\n"); + for (i = 0; i < ARRAY_SIZE(tests); i++) { switch (tests[i].fn(root)) { case KSFT_PASS: diff --git a/tools/testing/selftests/net/fib_rule_tests.sh b/tools/testing/selftests/net/fib_rule_tests.sh index d84193bdc307fc..dbd90ca73e44fb 100755 --- a/tools/testing/selftests/net/fib_rule_tests.sh +++ b/tools/testing/selftests/net/fib_rule_tests.sh @@ -55,7 +55,7 @@ setup() $IP link add dummy0 type dummy $IP link set dev dummy0 up - $IP address add 198.51.100.1/24 dev dummy0 + $IP address add 192.51.100.1/24 dev dummy0 $IP -6 address add 2001:db8:1::1/64 dev dummy0 set +e diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh index 8ec76681605cca..f25f72a75cf3db 100755 --- a/tools/testing/selftests/netfilter/nft_nat.sh +++ b/tools/testing/selftests/netfilter/nft_nat.sh @@ -23,7 +23,11 @@ ip netns add ns0 ip netns add ns1 ip netns add ns2 -ip link add veth0 netns ns0 type veth peer name eth0 netns ns1 +ip link add veth0 netns ns0 type veth peer name eth0 netns ns1 > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: No virtual ethernet pair device support in kernel" + exit $ksft_skip +fi ip link add veth1 netns ns0 type veth peer name eth0 netns ns2 ip -net ns0 link set lo up diff --git a/tools/testing/selftests/timers/adjtick.c b/tools/testing/selftests/timers/adjtick.c index 0caca3a06bd26e..54d8d87f36b3ca 100644 --- a/tools/testing/selftests/timers/adjtick.c +++ b/tools/testing/selftests/timers/adjtick.c @@ -136,6 +136,7 @@ int check_tick_adj(long tickval) eppm = get_ppm_drift(); printf("%lld usec, %lld ppm", systick + (systick * eppm / MILLION), eppm); + fflush(stdout); tx1.modes = 0; adjtimex(&tx1); diff --git a/tools/testing/selftests/timers/leapcrash.c b/tools/testing/selftests/timers/leapcrash.c index 830c462f605d94..dc80728ed19155 100644 --- a/tools/testing/selftests/timers/leapcrash.c +++ b/tools/testing/selftests/timers/leapcrash.c @@ -101,6 +101,7 @@ int main(void) } clear_time_state(); printf("."); + fflush(stdout); } printf("[OK]\n"); return ksft_exit_pass(); diff --git a/tools/testing/selftests/timers/mqueue-lat.c b/tools/testing/selftests/timers/mqueue-lat.c index 1867db5d6f5e0f..7916cf5cc6fff1 100644 --- a/tools/testing/selftests/timers/mqueue-lat.c +++ b/tools/testing/selftests/timers/mqueue-lat.c @@ -102,6 +102,7 @@ int main(int argc, char **argv) int ret; printf("Mqueue latency : "); + fflush(stdout); ret = mqueue_lat_test(); if (ret < 0) { diff --git a/tools/testing/selftests/timers/nanosleep.c b/tools/testing/selftests/timers/nanosleep.c index 8adb0bb51d4da4..71b5441c2fd9f6 100644 --- a/tools/testing/selftests/timers/nanosleep.c +++ b/tools/testing/selftests/timers/nanosleep.c @@ -142,6 +142,7 @@ int main(int argc, char **argv) continue; printf("Nanosleep %-31s ", clockstring(clockid)); + fflush(stdout); length = 10; while (length <= (NSEC_PER_SEC * 10)) { diff --git a/tools/testing/selftests/timers/nsleep-lat.c b/tools/testing/selftests/timers/nsleep-lat.c index c3c3dc10db17fe..eb3e79ed7b4a9e 100644 --- a/tools/testing/selftests/timers/nsleep-lat.c +++ b/tools/testing/selftests/timers/nsleep-lat.c @@ -155,6 +155,7 @@ int main(int argc, char **argv) continue; printf("nsleep latency %-26s ", clockstring(clockid)); + fflush(stdout); length = 10; while (length <= (NSEC_PER_SEC * 10)) { diff --git a/tools/testing/selftests/timers/raw_skew.c b/tools/testing/selftests/timers/raw_skew.c index dcf73c5dab6e12..b41d8dd0c40c56 100644 --- a/tools/testing/selftests/timers/raw_skew.c +++ b/tools/testing/selftests/timers/raw_skew.c @@ -112,6 +112,7 @@ int main(int argv, char **argc) printf("WARNING: ADJ_OFFSET in progress, this will cause inaccurate results\n"); printf("Estimating clock drift: "); + fflush(stdout); sleep(120); get_monotonic_and_raw(&mon, &raw); diff --git a/tools/testing/selftests/timers/set-tai.c b/tools/testing/selftests/timers/set-tai.c index 70fed27d8fd3cf..8c4179ee2ca294 100644 --- a/tools/testing/selftests/timers/set-tai.c +++ b/tools/testing/selftests/timers/set-tai.c @@ -55,6 +55,7 @@ int main(int argc, char **argv) printf("tai offset started at %i\n", ret); printf("Checking tai offsets can be properly set: "); + fflush(stdout); for (i = 1; i <= 60; i++) { ret = set_tai(i); ret = get_tai(); diff --git a/tools/testing/selftests/timers/set-tz.c b/tools/testing/selftests/timers/set-tz.c index 877fd5532fee03..62bd33eb16f0a5 100644 --- a/tools/testing/selftests/timers/set-tz.c +++ b/tools/testing/selftests/timers/set-tz.c @@ -65,6 +65,7 @@ int main(int argc, char **argv) printf("tz_minuteswest started at %i, dst at %i\n", min, dst); printf("Checking tz_minuteswest can be properly set: "); + fflush(stdout); for (i = -15*60; i < 15*60; i += 30) { ret = set_tz(i, dst); ret = get_tz_min(); @@ -76,6 +77,7 @@ int main(int argc, char **argv) printf("[OK]\n"); printf("Checking invalid tz_minuteswest values are caught: "); + fflush(stdout); if (!set_tz(-15*60-1, dst)) { printf("[FAILED] %i didn't return failure!\n", -15*60-1); diff --git a/tools/testing/selftests/timers/threadtest.c b/tools/testing/selftests/timers/threadtest.c index 759c9c06f1a034..cf3e48919874b3 100644 --- a/tools/testing/selftests/timers/threadtest.c +++ b/tools/testing/selftests/timers/threadtest.c @@ -163,6 +163,7 @@ int main(int argc, char **argv) strftime(buf, 255, "%a, %d %b %Y %T %z", localtime(&start)); printf("%s\n", buf); printf("Testing consistency with %i threads for %ld seconds: ", thread_count, runtime); + fflush(stdout); /* spawn */ for (i = 0; i < thread_count; i++) diff --git a/tools/testing/selftests/timers/valid-adjtimex.c b/tools/testing/selftests/timers/valid-adjtimex.c index d9d3ab93b31a75..5397de708d3c25 100644 --- a/tools/testing/selftests/timers/valid-adjtimex.c +++ b/tools/testing/selftests/timers/valid-adjtimex.c @@ -123,6 +123,7 @@ int validate_freq(void) /* Set the leap second insert flag */ printf("Testing ADJ_FREQ... "); + fflush(stdout); for (i = 0; i < NUM_FREQ_VALID; i++) { tx.modes = ADJ_FREQUENCY; tx.freq = valid_freq[i]; @@ -250,6 +251,7 @@ int set_bad_offset(long sec, long usec, int use_nano) int validate_set_offset(void) { printf("Testing ADJ_SETOFFSET... "); + fflush(stdout); /* Test valid values */ if (set_offset(NSEC_PER_SEC - 1, 1)) diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index dc68340a6a960f..2cf3dc49bd0346 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile @@ -24,6 +24,8 @@ TEST_GEN_FILES += virtual_address_range TEST_PROGS := run_vmtests +TEST_FILES := test_vmalloc.sh + KSFT_KHDR_INSTALL := 1 include ../lib.mk diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c index 5abbe9b3c65213..6880236974b854 100644 --- a/virt/kvm/arm/aarch32.c +++ b/virt/kvm/arm/aarch32.c @@ -25,127 +25,6 @@ #include #include -/* - * stolen from arch/arm/kernel/opcodes.c - * - * condition code lookup table - * index into the table is test code: EQ, NE, ... LT, GT, AL, NV - * - * bit position in short is condition code: NZCV - */ -static const unsigned short cc_map[16] = { - 0xF0F0, /* EQ == Z set */ - 0x0F0F, /* NE */ - 0xCCCC, /* CS == C set */ - 0x3333, /* CC */ - 0xFF00, /* MI == N set */ - 0x00FF, /* PL */ - 0xAAAA, /* VS == V set */ - 0x5555, /* VC */ - 0x0C0C, /* HI == C set && Z clear */ - 0xF3F3, /* LS == C clear || Z set */ - 0xAA55, /* GE == (N==V) */ - 0x55AA, /* LT == (N!=V) */ - 0x0A05, /* GT == (!Z && (N==V)) */ - 0xF5FA, /* LE == (Z || (N!=V)) */ - 0xFFFF, /* AL always */ - 0 /* NV */ -}; - -/* - * Check if a trapped instruction should have been executed or not. - */ -bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu) -{ - unsigned long cpsr; - u32 cpsr_cond; - int cond; - - /* Top two bits non-zero? Unconditional. */ - if (kvm_vcpu_get_hsr(vcpu) >> 30) - return true; - - /* Is condition field valid? */ - cond = kvm_vcpu_get_condition(vcpu); - if (cond == 0xE) - return true; - - cpsr = *vcpu_cpsr(vcpu); - - if (cond < 0) { - /* This can happen in Thumb mode: examine IT state. */ - unsigned long it; - - it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); - - /* it == 0 => unconditional. */ - if (it == 0) - return true; - - /* The cond for this insn works out as the top 4 bits. */ - cond = (it >> 4); - } - - cpsr_cond = cpsr >> 28; - - if (!((cc_map[cond] >> cpsr_cond) & 1)) - return false; - - return true; -} - -/** - * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block - * @vcpu: The VCPU pointer - * - * When exceptions occur while instructions are executed in Thumb IF-THEN - * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have - * to do this little bit of work manually. The fields map like this: - * - * IT[7:0] -> CPSR[26:25],CPSR[15:10] - */ -static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) -{ - unsigned long itbits, cond; - unsigned long cpsr = *vcpu_cpsr(vcpu); - bool is_arm = !(cpsr & PSR_AA32_T_BIT); - - if (is_arm || !(cpsr & PSR_AA32_IT_MASK)) - return; - - cond = (cpsr & 0xe000) >> 13; - itbits = (cpsr & 0x1c00) >> (10 - 2); - itbits |= (cpsr & (0x3 << 25)) >> 25; - - /* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */ - if ((itbits & 0x7) == 0) - itbits = cond = 0; - else - itbits = (itbits << 1) & 0x1f; - - cpsr &= ~PSR_AA32_IT_MASK; - cpsr |= cond << 13; - cpsr |= (itbits & 0x1c) << (10 - 2); - cpsr |= (itbits & 0x3) << 25; - *vcpu_cpsr(vcpu) = cpsr; -} - -/** - * kvm_skip_instr - skip a trapped instruction and proceed to the next - * @vcpu: The vcpu pointer - */ -void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) -{ - bool is_thumb; - - is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT); - if (is_thumb && !is_wide_instr) - *vcpu_pc(vcpu) += 2; - else - *vcpu_pc(vcpu) += 4; - kvm_adjust_itstate(vcpu); -} - /* * Table taken from ARMv8 ARM DDI0487B-B, table G1-10. */ diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 1415e36fed3db8..02bac8abd206fd 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -223,6 +223,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; + case KVM_CAP_MAX_VCPU_ID: + r = KVM_MAX_VCPU_ID; + break; case KVM_CAP_NR_MEMSLOTS: r = KVM_USER_MEM_SLOTS; break; @@ -949,7 +952,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, const struct kvm_vcpu_init *init) { - unsigned int i; + unsigned int i, ret; int phys_target = kvm_target_cpu(); if (init->target != phys_target) @@ -984,9 +987,14 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, vcpu->arch.target = phys_target; /* Now we know what it is, we can reset it. */ - return kvm_reset_vcpu(vcpu); -} + ret = kvm_reset_vcpu(vcpu); + if (ret) { + vcpu->arch.target = -1; + bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); + } + return ret; +} static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init) diff --git a/virt/kvm/arm/hyp/aarch32.c b/virt/kvm/arm/hyp/aarch32.c new file mode 100644 index 00000000000000..d31f267961e75a --- /dev/null +++ b/virt/kvm/arm/hyp/aarch32.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Hyp portion of the (not much of an) Emulation layer for 32bit guests. + * + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier + * + * based on arch/arm/kvm/emulate.c + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall + */ + +#include +#include +#include + +/* + * stolen from arch/arm/kernel/opcodes.c + * + * condition code lookup table + * index into the table is test code: EQ, NE, ... LT, GT, AL, NV + * + * bit position in short is condition code: NZCV + */ +static const unsigned short cc_map[16] = { + 0xF0F0, /* EQ == Z set */ + 0x0F0F, /* NE */ + 0xCCCC, /* CS == C set */ + 0x3333, /* CC */ + 0xFF00, /* MI == N set */ + 0x00FF, /* PL */ + 0xAAAA, /* VS == V set */ + 0x5555, /* VC */ + 0x0C0C, /* HI == C set && Z clear */ + 0xF3F3, /* LS == C clear || Z set */ + 0xAA55, /* GE == (N==V) */ + 0x55AA, /* LT == (N!=V) */ + 0x0A05, /* GT == (!Z && (N==V)) */ + 0xF5FA, /* LE == (Z || (N!=V)) */ + 0xFFFF, /* AL always */ + 0 /* NV */ +}; + +/* + * Check if a trapped instruction should have been executed or not. + */ +bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu) +{ + unsigned long cpsr; + u32 cpsr_cond; + int cond; + + /* Top two bits non-zero? Unconditional. */ + if (kvm_vcpu_get_hsr(vcpu) >> 30) + return true; + + /* Is condition field valid? */ + cond = kvm_vcpu_get_condition(vcpu); + if (cond == 0xE) + return true; + + cpsr = *vcpu_cpsr(vcpu); + + if (cond < 0) { + /* This can happen in Thumb mode: examine IT state. */ + unsigned long it; + + it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); + + /* it == 0 => unconditional. */ + if (it == 0) + return true; + + /* The cond for this insn works out as the top 4 bits. */ + cond = (it >> 4); + } + + cpsr_cond = cpsr >> 28; + + if (!((cc_map[cond] >> cpsr_cond) & 1)) + return false; + + return true; +} + +/** + * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block + * @vcpu: The VCPU pointer + * + * When exceptions occur while instructions are executed in Thumb IF-THEN + * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have + * to do this little bit of work manually. The fields map like this: + * + * IT[7:0] -> CPSR[26:25],CPSR[15:10] + */ +static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) +{ + unsigned long itbits, cond; + unsigned long cpsr = *vcpu_cpsr(vcpu); + bool is_arm = !(cpsr & PSR_AA32_T_BIT); + + if (is_arm || !(cpsr & PSR_AA32_IT_MASK)) + return; + + cond = (cpsr & 0xe000) >> 13; + itbits = (cpsr & 0x1c00) >> (10 - 2); + itbits |= (cpsr & (0x3 << 25)) >> 25; + + /* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */ + if ((itbits & 0x7) == 0) + itbits = cond = 0; + else + itbits = (itbits << 1) & 0x1f; + + cpsr &= ~PSR_AA32_IT_MASK; + cpsr |= cond << 13; + cpsr |= (itbits & 0x1c) << (10 - 2); + cpsr |= (itbits & 0x3) << 25; + *vcpu_cpsr(vcpu) = cpsr; +} + +/** + * kvm_skip_instr - skip a trapped instruction and proceed to the next + * @vcpu: The vcpu pointer + */ +void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) +{ + bool is_thumb; + + is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT); + if (is_thumb && !is_wide_instr) + *vcpu_pc(vcpu) += 2; + else + *vcpu_pc(vcpu) += 4; + kvm_adjust_itstate(vcpu); +} diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index e909d9907b5066..2b36a51afb5764 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2965,8 +2965,6 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) case KVM_CAP_MULTI_ADDRESS_SPACE: return KVM_ADDRESS_SPACE_NUM; #endif - case KVM_CAP_MAX_VCPU_ID: - return KVM_MAX_VCPU_ID; default: break; }