diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt index 9f57f0cfa636..8ee87ab2f31c 100644 --- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt +++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt @@ -253,6 +253,7 @@ Optional properties: - qcom,mdss-dsi-color-order: Specifies the R, G and B channel ordering. "rgb_swap_rgb" = DSI_RGB_SWAP_RGB (default value) "rgb_swap_rbg" = DSI_RGB_SWAP_RBG + "rgb_swap_bgr" = DSI_RGB_SWAP_BGR "rgb_swap_brg" = DSI_RGB_SWAP_BRG "rgb_swap_grb" = DSI_RGB_SWAP_GRB "rgb_swap_gbr" = DSI_RGB_SWAP_GBR @@ -553,6 +554,7 @@ Optional properties: frequencies in Hz for the given panel. - qcom,dsi-dyn-clk-skip-timing-update: Boolean to specify whether to skip phy timing parameter update during dynamic clock switch. +- qcom,csi-proxy-enable: Boolean to config DSI transmission packet DataTypes to simulate the CSI-2 compatible signal Required properties for sub-nodes: None Optional properties: diff --git a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt index 9c12fb871cfe..f4f462f6ab7f 100644 --- a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt +++ b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt @@ -80,6 +80,9 @@ Optional properties: identifier. If this is specified, then a QMP message should be sent to enable the GDSC instead of setting SW_COLLAPSE=0. + - qcom,skip-disable-before-sw-enable: Presence denotes a hardware requirement + to leave the GDSC on that has been + enabled by an entity external to HLOS. [1]: Documentation/devicetree/bindings/arm/msm/msm_bus.txt diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt index 893827588c35..0e210066777a 100644 --- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt +++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt @@ -1417,6 +1417,8 @@ Optional properties: - qcom,afe-rxtx-lb: AFE RX to TX loopback. - qcom,ext-mclk-gpio: pinctrl referring to external mclk - qcom,ext-mclk-src: Device tree node referring to external mclk clock +- qcom,pri-spdiftx-gpios: Pinctrl referring to primary spdif output gpios +- qcom,sec-spdiftx-gpios: Pinctrl referring to secondary spdif output gpios - #ext-mclk-1-cfg-cells: Number of cells in ext-mclk-1-cfg-* nodes. Must be 6. - ext-mclk-1 cfg-11p2896: Frequnency table for 11.2896MHz mclk frequnecy. Fields are clock rate, div2x, m, n, d and clock root. diff --git a/arch/arm64/boot/dts/qcom/atoll-gdsc.dtsi b/arch/arm64/boot/dts/qcom/atoll-gdsc.dtsi index 0eba4a21ff28..776b1b552f0c 100644 --- a/arch/arm64/boot/dts/qcom/atoll-gdsc.dtsi +++ b/arch/arm64/boot/dts/qcom/atoll-gdsc.dtsi @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -134,6 +134,7 @@ qcom,poll-cfg-gdscr; domain-addr = <&gpu_gx_domain_addr>; sw-reset = <&gpu_gx_sw_reset>; + qcom,skip-disable-before-sw-enable; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index ee36196df1d1..1f86ddd6e421 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2015, 2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and diff --git a/arch/arm64/boot/dts/qcom/qcs405-csra8-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/qcs405-csra8-audio-overlay.dtsi index 5f847f00e5c8..fae8015e8a9e 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-csra8-audio-overlay.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-csra8-audio-overlay.dtsi @@ -76,6 +76,22 @@ pinctrl-0 = <&ext_mclk_1_sck_active>; pinctrl-1 = <&ext_mclk_1_sck_sleep>; }; + + pri_spdiftx_gpios: pri_spdiftx_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&pri_spdiftx_active>; + pinctrl-1 = <&pri_spdiftx_sleep>; + qcom,lpi-gpios; + }; + + sec_spdiftx_gpios: sec_spdiftx_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&sec_spdiftx_active>; + pinctrl-1 = <&sec_spdiftx_sleep>; + qcom,lpi-gpios; + }; }; &q6core { @@ -109,6 +125,8 @@ qcom,cdc-dmic67-gpios = <&cdc_dmic67_gpios>; qcom,pri-mi2s-gpios = <&pri_mi2s_gpios>; qcom,sec-mi2s-gpios = <&sec_mi2s_gpios>; + qcom,pri-spdiftx-gpios = <&pri_spdiftx_gpios>; + qcom,sec-spdiftx-gpios = <&sec_spdiftx_gpios>; qcom,audio-routing = "RX_BIAS", "MCLK", "lineout booster", "LINEOUT1", diff --git a/arch/arm64/boot/dts/qcom/qcs405-csra8plus2-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/qcs405-csra8plus2-audio-overlay.dtsi index f5c1dad1a2a9..983889f0fbe2 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-csra8plus2-audio-overlay.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-csra8plus2-audio-overlay.dtsi @@ -97,6 +97,22 @@ pinctrl-0 = <&ext_mclk_1_sck_active>; pinctrl-1 = <&ext_mclk_1_sck_sleep>; }; + + pri_spdiftx_gpios: pri_spdiftx_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&pri_spdiftx_active>; + pinctrl-1 = <&pri_spdiftx_sleep>; + qcom,lpi-gpios; + }; + + sec_spdiftx_gpios: sec_spdiftx_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&sec_spdiftx_active>; + pinctrl-1 = <&sec_spdiftx_sleep>; + qcom,lpi-gpios; + }; }; &q6core { @@ -136,6 +152,8 @@ qcom,pri-mi2s-gpios = <&pri_mi2s_gpios>; qcom,sec-mi2s-gpios = <&sec_mi2s_gpios>; qcom,quat-mi2s-gpios = <&quat_mi2s_gpios>; + qcom,pri-spdiftx-gpios = <&pri_spdiftx_gpios>; + qcom,sec-spdiftx-gpios = <&sec_spdiftx_gpios>; qcom,msm-mi2s-master = <1>, <0>, <1>, <0>, <1>, <1>; qcom,audio-routing = "RX_BIAS", "MCLK", diff --git a/arch/arm64/boot/dts/qcom/qcs405-geni-ir-overlay.dtsi b/arch/arm64/boot/dts/qcom/qcs405-geni-ir-overlay.dtsi index 8b202e53646a..0e7efd8cdf11 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-geni-ir-overlay.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-geni-ir-overlay.dtsi @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -26,6 +26,7 @@ <&clock_gcc GCC_GENI_IR_S_CLK>; clock-names = "iface_clk", "serial_clk"; + vdda33-supply = <&pms405_l12>; qcom,geni-ir-wakeup-gpio = <&tlmm 77 IRQ_TYPE_LEVEL_HIGH>; pinctrl-names = "default"; pinctrl-0 = <&ir_in_default>; diff --git a/arch/arm64/boot/dts/qcom/qcs405-lpi.dtsi b/arch/arm64/boot/dts/qcom/qcs405-lpi.dtsi index ef7a3c487e9b..68e3a6db08d4 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-lpi.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-lpi.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -453,6 +453,65 @@ }; }; + pri_spdiftx_pin { + pri_spdiftx_sleep: pri_spdiftx_sleep { + mux { + pins = "gpio6"; + function = "func1"; + }; + + config { + pins = "gpio6"; + drive-strength = <2>; + bias-pull-down; + output-low; + }; + }; + + pri_spdiftx_active: pri_spdiftx_active { + mux { + pins = "gpio6"; + function = "func1"; + }; + + config { + pins = "gpio6"; + drive-strength = <8>; + bias-pull-down; + output-low; + }; + }; + }; + + sec_spdiftx_pin { + sec_spdiftx_sleep: sec_spdiftx_sleep { + mux { + pins = "gpio7"; + function = "func1"; + }; + + config { + pins = "gpio7"; + drive-strenght = <2>; + bias-bus-hold; + output-high; + }; + }; + + sec_spdiftx_active: sec_spdiftx_active { + mux { + pins = "gpio7"; + function = "func1"; + }; + + config { + pins = "gpio7"; + drive-strength = <8>; + output-low; + }; + }; + }; + wsa_swr_data_pin { wsa_swr_data_sleep: wsa_swr_data_sleep { mux { diff --git a/arch/arm64/boot/dts/qcom/qcs405.dtsi b/arch/arm64/boot/dts/qcom/qcs405.dtsi index d372c75631c1..5f25c8f300ea 100644 --- a/arch/arm64/boot/dts/qcom/qcs405.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405.dtsi @@ -1571,9 +1571,9 @@ qcom,msm-bus,num-paths = <2>; qcom,msm-bus,vectors-KBps = <98 512 0 0>, <1 781 0 0>, /* No vote */ - <98 512 1250 0>, <1 781 0 40000>, /* 10Mbps vote */ - <98 512 12500 0>, <1 781 0 40000>, /* 100Mbps vote */ - <98 512 125000 0>, <1 781 0 40000>; /* 1000Mbps vote */ + <98 512 2500 0>, <1 781 0 40000>, /* 10Mbps vote */ + <98 512 25000 0>, <1 781 0 40000>, /* 100Mbps vote */ + <98 512 250000 0>, <1 781 0 40000>; /* 1000Mbps vote */ qcom,bus-vector-names = "0", "10", "100", "1000"; clocks = <&clock_gcc GCC_ETH_AXI_CLK>, <&clock_gcc GCC_ETH_PTP_CLK>, diff --git a/arch/arm64/boot/dts/qcom/sdxprairie.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie.dtsi index 64e691e37ddc..9a78e3e5f48b 100644 --- a/arch/arm64/boot/dts/qcom/sdxprairie.dtsi +++ b/arch/arm64/boot/dts/qcom/sdxprairie.dtsi @@ -1519,6 +1519,7 @@ compatible = "qcom,emac-smmu-embedded"; iommus = <&apps_smmu 0x1c0 0xf>; qcom,iova-mapping = <0x80000000 0x40000000>; + qcom,smmu-geometry; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-marmot.dtsi b/arch/arm64/boot/dts/qcom/sm8150-marmot.dtsi index f6489f20f7f0..f9ceb357c7e0 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-marmot.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-marmot.dtsi @@ -172,7 +172,7 @@ mhi_event@7 { mhi,num-elements = <2048>; mhi,intmod = <5>; - mhi,msi = <6>; + mhi,msi = <5>; mhi,chan = <101>; mhi,priority = <1>; mhi,brstmode = <3>; @@ -183,7 +183,7 @@ mhi_event@8 { mhi,num-elements = <0>; mhi,intmod = <0>; - mhi,msi = <7>; + mhi,msi = <6>; mhi,chan = <102>; mhi,priority = <1>; mhi,brstmode = <3>; @@ -195,7 +195,7 @@ mhi_event@9 { mhi,num-elements = <1024>; mhi,intmod = <5>; - mhi,msi = <8>; + mhi,msi = <7>; mhi,chan = <103>; mhi,priority = <1>; mhi,brstmode = <2>; @@ -205,7 +205,7 @@ mhi_event@10 { mhi,num-elements = <0>; mhi,intmod = <0>; - mhi,msi = <9>; + mhi,msi = <8>; mhi,chan = <105>; mhi,priority = <1>; mhi,brstmode = <3>; @@ -217,7 +217,7 @@ mhi_event@11 { mhi,num-elements = <0>; mhi,intmod = <0>; - mhi,msi = <10>; + mhi,msi = <9>; mhi,chan = <106>; mhi,priority = <1>; mhi,brstmode = <3>; @@ -229,7 +229,7 @@ mhi_event@12 { mhi,num-elements = <0>; mhi,intmod = <0>; - mhi,msi = <11>; + mhi,msi = <10>; mhi,chan = <107>; mhi,priority = <1>; mhi,brstmode = <3>; @@ -241,7 +241,7 @@ mhi_event@13 { mhi,num-elements = <0>; mhi,intmod = <0>; - mhi,msi = <12>; + mhi,msi = <11>; mhi,chan = <108>; mhi,priority = <1>; mhi,brstmode = <3>; @@ -253,7 +253,7 @@ mhi_event@14 { mhi,num-elements = <1024>; mhi,intmod = <1>; - mhi,msi = <13>; + mhi,msi = <12>; mhi,chan = <109>; mhi,priority = <0>; mhi,brstmode = <2>; @@ -263,7 +263,7 @@ mhi_event@15 { mhi,num-elements = <1024>; mhi,intmod = <0>; - mhi,msi = <14>; + mhi,msi = <13>; mhi,chan = <110>; mhi,priority = <0>; mhi,brstmode = <2>; diff --git a/arch/arm64/boot/dts/qcom/trinket-sde.dtsi b/arch/arm64/boot/dts/qcom/trinket-sde.dtsi index 77ffa86c0d72..1d06c5eb9ec7 100644 --- a/arch/arm64/boot/dts/qcom/trinket-sde.dtsi +++ b/arch/arm64/boot/dts/qcom/trinket-sde.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -99,7 +99,7 @@ qcom,sde-cdm-off = <0x7a200>; qcom,sde-cdm-size = <0x224>; - qcom,sde-dither-off = <0x30e0 0x30e0 0x30e0 0x30e0>; + qcom,sde-dither-off = <0x30e0 0x30e0>; qcom,sde-dither-version = <0x00010000>; qcom,sde-dither-size = <0x20>; diff --git a/drivers/bus/mhi/controllers/mhi_qcom.c b/drivers/bus/mhi/controllers/mhi_qcom.c index 26811368e6c3..e0154b549b4f 100644 --- a/drivers/bus/mhi/controllers/mhi_qcom.c +++ b/drivers/bus/mhi/controllers/mhi_qcom.c @@ -33,7 +33,7 @@ struct firmware_info { }; static const struct firmware_info firmware_table[] = { - {.dev_id = 0x308, .fw_image = "sdx65m/sbl1.mbn", + {.dev_id = 0x308, .fw_image = "sdx65m/xbl.elf", .edl_image = "sdx65m/edl.mbn"}, {.dev_id = 0x307, .fw_image = "sdx60m/sbl1.mbn", .edl_image = "sdx60m/edl.mbn"}, diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c index c586aa5d1d82..5b69628f4581 100644 --- a/drivers/bus/mhi/core/mhi_main.c +++ b/drivers/bus/mhi/core/mhi_main.c @@ -1889,7 +1889,8 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, return 0; error_dec_pendpkt: - atomic_dec(&mhi_cntrl->pending_pkts); + if (in_mission_mode) + atomic_dec(&mhi_cntrl->pending_pkts); error_pm_state: if (!mhi_chan->offload_ch) mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index 136213917e46..b5631a236f64 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -656,12 +656,20 @@ static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, static int dma_alloc_memory(dma_addr_t *region_phys, void **vaddr, size_t size, unsigned long dma_attr) { + int err = 0; struct fastrpc_apps *me = &gfa; if (me->dev == NULL) { pr_err("device adsprpc-mem is not initialized\n"); return -ENODEV; } + VERIFY(err, size > 0 && size < MAX_SIZE_LIMIT); + if (err) { + err = -EFAULT; + pr_err("adsprpc: %s: invalid allocation size 0x%zx\n", + __func__, size); + return err; + } *vaddr = dma_alloc_attrs(me->dev, size, region_phys, GFP_KERNEL, dma_attr); if (IS_ERR_OR_NULL(*vaddr)) { @@ -733,9 +741,11 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) } if (map->flags == ADSP_MMAP_HEAP_ADDR || map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { + spin_lock(&me->hlock); map->refs--; if (!map->refs) hlist_del_init(&map->hn); + spin_unlock(&me->hlock); if (map->refs > 0) return; } else { diff --git a/drivers/clk/qcom/gdsc-regulator.c b/drivers/clk/qcom/gdsc-regulator.c index d79040d382bb..dc585123dcd1 100644 --- a/drivers/clk/qcom/gdsc-regulator.c +++ b/drivers/clk/qcom/gdsc-regulator.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -88,6 +88,7 @@ struct gdsc { int reset_count; int root_clk_idx; u32 gds_timeout; + bool skip_disable_before_enable; }; enum gdscr_status { @@ -166,6 +167,9 @@ static int gdsc_is_enabled(struct regulator_dev *rdev) if (!sc->toggle_logic) return !sc->resets_asserted; + if (sc->skip_disable_before_enable) + return false; + if (sc->parent_regulator) { /* * The parent regulator for the GDSC is required to be on to @@ -258,6 +262,9 @@ static int gdsc_enable(struct regulator_dev *rdev) uint32_t regval, hw_ctrl_regval = 0x0; int i, ret = 0; + if (sc->skip_disable_before_enable) + return 0; + if (sc->parent_regulator) { ret = regulator_set_voltage(sc->parent_regulator, RPMH_REGULATOR_LEVEL_LOW_SVS, INT_MAX); @@ -423,6 +430,8 @@ static int gdsc_enable(struct regulator_dev *rdev) sc->is_bus_enabled = false; } + sc->skip_disable_before_enable = false; + if (ret && sc->parent_regulator) regulator_set_voltage(sc->parent_regulator, 0, INT_MAX); @@ -994,6 +1003,9 @@ static int gdsc_probe(struct platform_device *pdev) clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH); } + sc->skip_disable_before_enable = of_property_read_bool( + pdev->dev.of_node, "qcom,skip-disable-before-sw-enable"); + reg_config.dev = &pdev->dev; reg_config.init_data = init_data; reg_config.driver_data = sc; diff --git a/drivers/clk/qcom/npucc-atoll.c b/drivers/clk/qcom/npucc-atoll.c index b2be104de7f6..a58d96d0c017 100644 --- a/drivers/clk/qcom/npucc-atoll.c +++ b/drivers/clk/qcom/npucc-atoll.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -357,13 +357,13 @@ static struct clk_rcg2 npu_cc_core_clk_src = { }; static const struct freq_tbl ftbl_npu_dsp_core_clk_src[] = { - F(250000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0), - F(300000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0), - F(400000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0), - F(500000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0), - F(600000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0), - F(660000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0), - F(800000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0), + F(250000000, P_NPU_Q6SS_PLL_OUT_MAIN, 2, 0, 0), + F(300000000, P_NPU_Q6SS_PLL_OUT_MAIN, 2, 0, 0), + F(400000000, P_NPU_Q6SS_PLL_OUT_MAIN, 2, 0, 0), + F(500000000, P_NPU_Q6SS_PLL_OUT_MAIN, 2, 0, 0), + F(600000000, P_NPU_Q6SS_PLL_OUT_MAIN, 2, 0, 0), + F(660000000, P_NPU_Q6SS_PLL_OUT_MAIN, 2, 0, 0), + F(800000000, P_NPU_Q6SS_PLL_OUT_MAIN, 2, 0, 0), { } }; diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c index b4bf37ed8966..76465644571b 100644 --- a/drivers/esoc/esoc-mdm-4x.c +++ b/drivers/esoc/esoc-mdm-4x.c @@ -593,6 +593,7 @@ static irqreturn_t mdm_status_change(int irq, void *dev_id) cancel_delayed_work(&mdm->mdm2ap_status_check_work); dev_dbg(dev, "status = 1: mdm is now ready\n"); mdm->ready = true; + esoc_clink_evt_notify(ESOC_BOOT_STATE, esoc); mdm_trigger_dbg(mdm); queue_work(mdm->mdm_queue, &mdm->mdm_status_work); if (mdm->get_restart_reason) diff --git a/drivers/esoc/esoc-mdm-drv.c b/drivers/esoc/esoc-mdm-drv.c index 9e5d1aca189b..ecf62cff6b75 100755 --- a/drivers/esoc/esoc-mdm-drv.c +++ b/drivers/esoc/esoc-mdm-drv.c @@ -38,20 +38,21 @@ enum esoc_pon_state { enum { PWR_OFF = 0x1, - PWR_ON, - BOOT, - RUN, - CRASH, - IN_DEBUG, SHUTDOWN, RESET, PEER_CRASH, + IN_DEBUG, + CRASH, + PWR_ON, + BOOT, + RUN, }; struct mdm_drv { unsigned int mode; struct esoc_eng cmd_eng; struct completion pon_done; + struct completion ssr_ready; struct completion req_eng_wait; struct esoc_clink *esoc_clink; enum esoc_pon_state pon_state; @@ -152,6 +153,14 @@ static void mdm_handle_clink_evt(enum esoc_evt evt, "ESOC_INVALID_STATE: Calling complete with state: PON_FAIL\n"); mdm_drv->pon_state = PON_FAIL; complete(&mdm_drv->pon_done); + complete(&mdm_drv->ssr_ready); + break; + case ESOC_BOOT_STATE: + if (mdm_drv->mode == PWR_OFF) { + esoc_mdm_log( + "ESOC_BOOT_STATE: Observed status high from modem.\n"); + mdm_drv->mode = BOOT; + } break; case ESOC_RUN_STATE: esoc_mdm_log( @@ -159,12 +168,14 @@ static void mdm_handle_clink_evt(enum esoc_evt evt, mdm_drv->pon_state = PON_SUCCESS; mdm_drv->mode = RUN, complete(&mdm_drv->pon_done); + complete(&mdm_drv->ssr_ready); break; case ESOC_RETRY_PON_EVT: esoc_mdm_log( "ESOC_RETRY_PON_EVT: Calling complete with state: PON_RETRY\n"); mdm_drv->pon_state = PON_RETRY; complete(&mdm_drv->pon_done); + complete(&mdm_drv->ssr_ready); break; case ESOC_UNEXPECTED_RESET: esoc_mdm_log("evt_state: ESOC_UNEXPECTED_RESET\n"); @@ -174,19 +185,15 @@ static void mdm_handle_clink_evt(enum esoc_evt evt, esoc_mdm_log("evt_state: ESOC_ERR_FATAL\n"); /* - * Modem can crash while we are waiting for pon_done during - * a subsystem_get(). Setting mode to CRASH will prevent a - * subsequent subsystem_get() from entering poweron ops. Avoid - * this by seting mode to CRASH only if device was up and - * running. + * Ignore all modem errfatals if the status is not up + * or modem in run state. */ - if (mdm_drv->mode == CRASH) + if (mdm_drv->mode <= CRASH) { esoc_mdm_log( - "Modem in crash state already. Ignoring.\n"); - if (mdm_drv->mode != RUN) - esoc_mdm_log("Modem not up. Ignoring.\n"); - if (mdm_drv->mode == CRASH || mdm_drv->mode != RUN) + "Modem in crash state or not booted. Ignoring.\n"); return; + } + esoc_mdm_log("Setting crash flag\n"); mdm_drv->mode = CRASH; queue_work(mdm_drv->mdm_queue, &mdm_drv->ssr_work); break; @@ -205,10 +212,15 @@ static void mdm_ssr_fn(struct work_struct *work) struct mdm_drv *mdm_drv = container_of(work, struct mdm_drv, ssr_work); struct mdm_ctrl *mdm = get_esoc_clink_data(mdm_drv->esoc_clink); - esoc_client_link_mdm_crash(mdm_drv->esoc_clink); + /* Wait for pon to complete. Start SSR only if pon is success */ + wait_for_completion(&mdm_drv->ssr_ready); + if (mdm_drv->pon_state != PON_SUCCESS) { + esoc_mdm_log("Got errfatal but ignoring as boot failed\n"); + return; + } + esoc_client_link_mdm_crash(mdm_drv->esoc_clink); mdm_wait_for_status_low(mdm, false); - esoc_mdm_log("Starting SSR work\n"); /* @@ -372,7 +384,9 @@ static void mdm_subsys_retry_powerup_cleanup(struct esoc_clink *esoc_clink, esoc_client_link_power_off(esoc_clink, poff_flags); mdm_disable_irqs(mdm); mdm_drv->pon_state = PON_INIT; + mdm_drv->mode = PWR_OFF; reinit_completion(&mdm_drv->pon_done); + reinit_completion(&mdm_drv->ssr_ready); reinit_completion(&mdm_drv->req_eng_wait); } @@ -423,6 +437,7 @@ static int mdm_handle_boot_fail(struct esoc_clink *esoc_clink, u8 *pon_trial) break; case BOOT_FAIL_ACTION_NOP: esoc_mdm_log("Leaving the modem in its curent state\n"); + mdm_drv->mode = PWR_OFF; return -EIO; case BOOT_FAIL_ACTION_SHUTDOWN: default: @@ -617,6 +632,7 @@ int esoc_ssr_probe(struct esoc_clink *esoc_clink, struct esoc_drv *drv) } esoc_set_drv_data(esoc_clink, mdm_drv); init_completion(&mdm_drv->pon_done); + init_completion(&mdm_drv->ssr_ready); init_completion(&mdm_drv->req_eng_wait); INIT_WORK(&mdm_drv->ssr_work, mdm_ssr_fn); mdm_drv->esoc_clink = esoc_clink; diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c index 93e3b96b6dfa..627bdd2b5c29 100644 --- a/drivers/firmware/qcom_scm-32.c +++ b/drivers/firmware/qcom_scm-32.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2010,2015,2020 The Linux Foundation. All rights reserved. * Copyright (C) 2015 Linaro Ltd. * * This program is free software; you can redistribute it and/or modify @@ -579,6 +579,13 @@ int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id) return ret ? : le32_to_cpu(scm_ret); } +int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, + size_t mem_sz, phys_addr_t src, size_t src_sz, + phys_addr_t dest, size_t dest_sz) +{ + return -ENODEV; +} + int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, u32 spare) { diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c index e9001075f676..52d1d172c1d1 100644 --- a/drivers/firmware/qcom_scm-64.c +++ b/drivers/firmware/qcom_scm-64.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015,2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -382,6 +382,33 @@ int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id) return ret ? : res.a1; } +int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, + size_t mem_sz, phys_addr_t src, size_t src_sz, + phys_addr_t dest, size_t dest_sz) +{ + int ret; + struct qcom_scm_desc desc = {0}; + struct arm_smccc_res res; + + desc.args[0] = mem_region; + desc.args[1] = mem_sz; + desc.args[2] = src; + desc.args[3] = src_sz; + desc.args[4] = dest; + desc.args[5] = dest_sz; + desc.args[6] = 0; + + desc.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL, + QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO, + QCOM_SCM_VAL, QCOM_SCM_VAL); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, + QCOM_MEM_PROT_ASSIGN_ID, + &desc, &res); + + return ret ? : res.a1; +} + int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, u32 spare) { struct qcom_scm_desc desc = {0}; diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index bb16510d75ba..a0fb36aa6ad5 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -1,7 +1,7 @@ /* * Qualcomm SCM driver * - * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2010,2015,2020 The Linux Foundation. All rights reserved. * Copyright (C) 2015 Linaro Ltd. * * This program is free software; you can redistribute it and/or modify @@ -40,6 +40,19 @@ struct qcom_scm { struct reset_controller_dev reset; }; +struct qcom_scm_current_perm_info { + __le32 vmid; + __le32 perm; + __le64 ctx; + __le32 ctx_size; + __le32 unused; +}; + +struct qcom_scm_mem_map_info { + __le64 mem_addr; + __le64 mem_size; +}; + static struct qcom_scm *__scm; static int qcom_scm_clk_enable(void) @@ -348,6 +361,88 @@ int qcom_scm_set_remote_state(u32 state, u32 id) } EXPORT_SYMBOL(qcom_scm_set_remote_state); +/** + * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership + * @mem_addr: mem region whose ownership need to be reassigned + * @mem_sz: size of the region. + * @srcvm: vmid for current set of owners, each set bit in + * flag indicate a unique owner + * @newvm: array having new owners and corrsponding permission + * flags + * @dest_cnt: number of owners in next set. + * + * Return negative errno on failure, 0 on success, with @srcvm updated. + */ +int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, + unsigned int *srcvm, + struct qcom_scm_vmperm *newvm, int dest_cnt) +{ + struct qcom_scm_current_perm_info *destvm; + struct qcom_scm_mem_map_info *mem_to_map; + phys_addr_t mem_to_map_phys; + phys_addr_t dest_phys; + phys_addr_t ptr_phys; + size_t mem_to_map_sz; + size_t dest_sz; + size_t src_sz; + size_t ptr_sz; + int next_vm; + __le32 *src; + void *ptr; + int ret; + int len; + int i; + + src_sz = hweight_long(*srcvm) * sizeof(*src); + mem_to_map_sz = sizeof(*mem_to_map); + dest_sz = dest_cnt * sizeof(*destvm); + ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + + ALIGN(dest_sz, SZ_64); + + ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + /* Fill source vmid detail */ + src = ptr; + len = hweight_long(*srcvm); + for (i = 0; i < len; i++) { + src[i] = cpu_to_le32(ffs(*srcvm) - 1); + *srcvm ^= 1 << (ffs(*srcvm) - 1); + } + + /* Fill details of mem buff to map */ + mem_to_map = ptr + ALIGN(src_sz, SZ_64); + mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); + mem_to_map[0].mem_addr = cpu_to_le64(mem_addr); + mem_to_map[0].mem_size = cpu_to_le64(mem_sz); + + next_vm = 0; + /* Fill details of next vmid detail */ + destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); + dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); + for (i = 0; i < dest_cnt; i++) { + destvm[i].vmid = cpu_to_le32(newvm[i].vmid); + destvm[i].perm = cpu_to_le32(newvm[i].perm); + destvm[i].ctx = 0; + destvm[i].ctx_size = 0; + next_vm |= BIT(newvm[i].vmid); + } + + ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, + ptr_phys, src_sz, dest_phys, dest_sz); + dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys); + if (ret) { + dev_err(__scm->dev, + "Assign memory protection call failed %d.\n", ret); + return -EINVAL; + } + + *srcvm = next_vm; + return 0; +} +EXPORT_SYMBOL(qcom_scm_assign_mem); + static int qcom_scm_probe(struct platform_device *pdev) { struct qcom_scm *scm; diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h index 9bea691f30fb..9b9fe30cada6 100644 --- a/drivers/firmware/qcom_scm.h +++ b/drivers/firmware/qcom_scm.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2015,2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -95,5 +95,10 @@ extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare, size_t *size); extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size, u32 spare); +#define QCOM_MEM_PROT_ASSIGN_ID 0x16 +extern int __qcom_scm_assign_mem(struct device *dev, + phys_addr_t mem_region, size_t mem_sz, + phys_addr_t src, size_t src_sz, + phys_addr_t dest, size_t dest_sz); #endif diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index 889ddcd2da96..16e1d722ab10 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -203,9 +203,8 @@ enum sde_enc_rc_states { * @debugfs_root: Debug file system root file node * @enc_lock: Lock around physical encoder create/destroy and access. - * @frame_busy_mask: Bitmask tracking which phys_enc we are still - * busy processing current command. - * Bit0 = phys_encs[0] etc. + * @frame_done_cnt: Atomic counter for tracking which phy_enc is + * done with frame processing. * @crtc_frame_event_cb: callback handler for frame event * @crtc_frame_event_cb_data: callback handler private data * @vsync_event_timer: vsync timer @@ -264,7 +263,7 @@ struct sde_encoder_virt { struct dentry *debugfs_root; struct mutex enc_lock; - DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL); + atomic_t frame_done_cnt[MAX_PHYS_ENCODERS_PER_VIRTUAL]; void (*crtc_frame_event_cb)(void *, u32 event); struct sde_crtc_frame_event_cb_data crtc_frame_event_cb_data; @@ -2434,6 +2433,16 @@ static int sde_encoder_resource_control(struct drm_encoder *drm_enc, return -EINVAL; } + /* + * schedule off work item only when there are no + * frames pending + */ + if (sde_crtc_frame_pending(sde_enc->crtc) > 1) { + SDE_DEBUG_ENC(sde_enc, "skip schedule work"); + SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state, + SDE_EVTLOG_FUNC_CASE2); + return 0; + } if (sde_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) { SDE_ERROR("invalid crtc index :%u\n", sde_enc->crtc->index); @@ -2455,17 +2464,6 @@ static int sde_encoder_resource_control(struct drm_encoder *drm_enc, return -EINVAL; } - /* - * schedule off work item only when there are no - * frames pending - */ - if (sde_crtc_frame_pending(sde_enc->crtc) > 1) { - SDE_DEBUG_ENC(sde_enc, "skip schedule work"); - SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state, - SDE_EVTLOG_FUNC_CASE2); - return 0; - } - /* schedule delayed off work if autorefresh is disabled */ if (sde_enc->cur_master && sde_enc->cur_master->ops.is_autorefresh_enabled) @@ -2680,17 +2678,10 @@ static int sde_encoder_resource_control(struct drm_encoder *drm_enc, SDE_EVTLOG_ERROR); mutex_unlock(&sde_enc->rc_lock); return 0; - } - - /* - * if we are in ON but a frame was just kicked off, - * ignore the IDLE event, it's probably a stale timer event - */ - if (sde_enc->frame_busy_mask[0]) { - SDE_ERROR_ENC(sde_enc, - "sw_event:%d, rc:%d frame pending\n", - sw_event, sde_enc->rc_state); + } else if (sde_crtc_frame_pending(sde_enc->crtc) > 1) { + SDE_DEBUG_ENC(sde_enc, "skip idle entry"); SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state, + sde_crtc_frame_pending(sde_enc->crtc), SDE_EVTLOG_ERROR); mutex_unlock(&sde_enc->rc_lock); return 0; @@ -3628,6 +3619,8 @@ static void sde_encoder_frame_done_callback( { struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); unsigned int i; + bool trigger = true, is_cmd_mode; + enum sde_rm_topology_name topology = SDE_RM_TOPOLOGY_NONE; if (!drm_enc || !sde_enc->cur_master) { SDE_ERROR("invalid param: drm_enc %lx, cur_master %lx\n", @@ -3638,30 +3631,41 @@ static void sde_encoder_frame_done_callback( sde_enc->crtc_frame_event_cb_data.connector = sde_enc->cur_master->connector; + is_cmd_mode = sde_enc->disp_info.capabilities & + MSM_DISPLAY_CAP_CMD_MODE; if (event & (SDE_ENCODER_FRAME_EVENT_DONE | SDE_ENCODER_FRAME_EVENT_ERROR - | SDE_ENCODER_FRAME_EVENT_PANEL_DEAD)) { - - if (!sde_enc->frame_busy_mask[0]) { - /** - * suppress frame_done without waiter, - * likely autorefresh - */ - SDE_EVT32(DRMID(drm_enc), event, ready_phys->intf_idx); - return; - } + | SDE_ENCODER_FRAME_EVENT_PANEL_DEAD) && is_cmd_mode) { + if (ready_phys->connector) + topology = sde_connector_get_topology_name( + ready_phys->connector); /* One of the physical encoders has become idle */ for (i = 0; i < sde_enc->num_phys_encs; i++) { - if (sde_enc->phys_encs[i] == ready_phys) { - clear_bit(i, sde_enc->frame_busy_mask); + if ((sde_enc->phys_encs[i] == ready_phys) || + (event & SDE_ENCODER_FRAME_EVENT_ERROR)) { SDE_EVT32_VERBOSE(DRMID(drm_enc), i, - sde_enc->frame_busy_mask[0]); + atomic_read( + &sde_enc->frame_done_cnt[i])); + if (!atomic_add_unless( + &sde_enc->frame_done_cnt[i], 1, 1)) { + SDE_EVT32(DRMID(drm_enc), event, + ready_phys->intf_idx, + SDE_EVTLOG_ERROR); + SDE_ERROR_ENC(sde_enc, + "intf idx:%d, event:%d\n", + ready_phys->intf_idx, event); + return; + } } + + if (topology != SDE_RM_TOPOLOGY_PPSPLIT && + atomic_read(&sde_enc->frame_done_cnt[i]) != 1) + trigger = false; } - if (!sde_enc->frame_busy_mask[0]) { + if (trigger) { sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_FRAME_DONE); @@ -3669,11 +3673,18 @@ static void sde_encoder_frame_done_callback( sde_enc->crtc_frame_event_cb( &sde_enc->crtc_frame_event_cb_data, event); + for (i = 0; i < sde_enc->num_phys_encs; i++) + atomic_set(&sde_enc->frame_done_cnt[i], 0); } } else { - if (sde_enc->crtc_frame_event_cb) + if (sde_enc->crtc_frame_event_cb) { + if (!is_cmd_mode) + sde_encoder_resource_control(drm_enc, + SDE_ENC_RC_EVENT_FRAME_DONE); + sde_enc->crtc_frame_event_cb( &sde_enc->crtc_frame_event_cb_data, event); + } } } @@ -3980,14 +3991,6 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc) if (phys->connector) topology = sde_connector_get_topology_name( phys->connector); - /* - * don't wait on ppsplit slaves or skipped encoders because - * they dont receive irqs - */ - if (!(topology == SDE_RM_TOPOLOGY_PPSPLIT && - phys->split_role == ENC_ROLE_SLAVE) && - phys->split_role != ENC_ROLE_SKIP) - set_bit(i, sde_enc->frame_busy_mask); if (!phys->ops.needs_single_flush || !phys->ops.needs_single_flush(phys)) { @@ -5494,6 +5497,8 @@ struct drm_encoder *sde_encoder_init_with_ops( sde_enc->cur_master = NULL; spin_lock_init(&sde_enc->enc_spinlock); mutex_init(&sde_enc->vblank_ctl_lock); + for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) + atomic_set(&sde_enc->frame_done_cnt[i], 0); drm_enc = &sde_enc->base; drm_encoder_init(dev, drm_enc, &sde_encoder_funcs, drm_enc_mode, NULL); drm_encoder_helper_add(drm_enc, &sde_encoder_helper_funcs); diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c index 34ebe8d2503d..670c405b6605 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -594,11 +594,11 @@ static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx) pend_ret_fence_cnt = atomic_read(&phys_enc->pending_retire_fence_cnt); /* signal only for master, where there is a pending kickoff */ - if (sde_encoder_phys_vid_is_master(phys_enc)) { - if (atomic_add_unless(&phys_enc->pending_retire_fence_cnt, - -1, 0)) - event |= SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE | - SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE; + if (sde_encoder_phys_vid_is_master(phys_enc) && + atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0)) { + event = SDE_ENCODER_FRAME_EVENT_DONE | + SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE | + SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE; } not_flushed: @@ -616,9 +616,10 @@ static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx) phys_enc); SDE_EVT32_IRQ(DRMID(phys_enc->parent), phys_enc->hw_intf->idx - INTF_0, - old_cnt, new_cnt, reset_status ? SDE_EVTLOG_ERROR : 0, + old_cnt, atomic_read(&phys_enc->pending_kickoff_cnt), + reset_status ? SDE_EVTLOG_ERROR : 0, flush_register, event, - pend_ret_fence_cnt); + atomic_read(&phys_enc->pending_retire_fence_cnt)); /* Signal any waiting atomic commit thread */ wake_up_all(&phys_enc->pending_kickoff_wq); @@ -945,10 +946,11 @@ static void sde_encoder_phys_vid_get_hw_resources( static int _sde_encoder_phys_vid_wait_for_vblank( struct sde_encoder_phys *phys_enc, bool notify) { - struct sde_encoder_wait_info wait_info; + struct sde_encoder_wait_info wait_info = {0}; int ret = 0; - u32 event = 0; - u32 event_helper = 0; + u32 event = SDE_ENCODER_FRAME_EVENT_ERROR | + SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE | + SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE; if (!phys_enc) { pr_err("invalid encoder\n"); @@ -959,40 +961,16 @@ static int _sde_encoder_phys_vid_wait_for_vblank( wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt; wait_info.timeout_ms = KICKOFF_TIMEOUT_MS; - if (!sde_encoder_phys_vid_is_master(phys_enc)) { - /* signal done for slave video encoder, unless it is pp-split */ - if (!_sde_encoder_phys_is_ppsplit(phys_enc) && notify) { - event = SDE_ENCODER_FRAME_EVENT_DONE; - goto end; - } - return 0; - } - /* Wait for kickoff to complete */ ret = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_VSYNC, &wait_info); - event_helper = SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE - | SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE; - - if (notify) { - if (ret == -ETIMEDOUT) { - event = SDE_ENCODER_FRAME_EVENT_ERROR; - if (atomic_add_unless( - &phys_enc->pending_retire_fence_cnt, -1, 0)) - event |= event_helper; - } else if (!ret) { - event = SDE_ENCODER_FRAME_EVENT_DONE; - } - } - -end: - SDE_EVT32(DRMID(phys_enc->parent), event, notify, ret, - ret ? SDE_EVTLOG_FATAL : 0); - if (phys_enc->parent_ops.handle_frame_done && event) + if (notify && (ret == -ETIMEDOUT) && + atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0) && + phys_enc->parent_ops.handle_frame_done) phys_enc->parent_ops.handle_frame_done( - phys_enc->parent, phys_enc, - event); + phys_enc->parent, phys_enc, event); + return ret; } diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c index 2e81e31288b0..e79450e2b941 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1227,7 +1227,7 @@ static int _sde_encoder_phys_wb_wait_for_commit_done( u32 event = 0; u64 wb_time = 0; int rc = 0; - struct sde_encoder_wait_info wait_info; + struct sde_encoder_wait_info wait_info = {0}; /* Return EWOULDBLOCK since we know the wait isn't necessary */ if (phys_enc->enable_state == SDE_ENC_DISABLED) { diff --git a/drivers/gpu/drm/msm/sde_hdcp_2x.c b/drivers/gpu/drm/msm/sde_hdcp_2x.c index 01e5e2c9e516..c346d26aa1fb 100644 --- a/drivers/gpu/drm/msm/sde_hdcp_2x.c +++ b/drivers/gpu/drm/msm/sde_hdcp_2x.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -563,6 +563,11 @@ static void sde_hdcp_2x_msg_sent(struct sde_hdcp_2x_ctrl *hdcp) HDCP_TRANSPORT_CMD_INVALID }; cdata.context = hdcp->client_data; + if (atomic_read(&hdcp->hdcp_off)) { + pr_debug("invalid state, hdcp off\n"); + return; + } + switch (hdcp->app_data.response.data[0]) { case SKE_SEND_TYPE_ID: sde_hdcp_2x_set_hw_key(hdcp); diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 9ab72644d036..b7a2ae16ebf5 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -3954,6 +3954,19 @@ static int adreno_resume_device(struct kgsl_device *device, return 0; } +u32 adreno_get_ucode_version(const u32 *data) +{ + u32 version; + + version = data[1]; + + if ((version & 0xf) != 0xa) + return version; + + version &= ~0xfff; + return version | ((data[3] & 0xfff000) >> 12); +} + static const struct kgsl_functable adreno_functable = { /* Mandatory functions */ .regread = adreno_regread, diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index 5ce10a4df9fd..8946b20c2ff8 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2008-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -288,8 +288,8 @@ enum adreno_preempt_states { /** * struct adreno_preemption * @state: The current state of preemption - * @counters: Memory descriptor for the memory where the GPU writes the - * preemption counters on switch + * @scratch: Memory descriptor for the memory where the GPU writes the + * current ctxt record address and preemption counters on switch * @timer: A timer to make sure preemption doesn't stall * @work: A work struct for the preemption worker (for 5XX) * @token_submit: Indicates if a preempt token has been submitted in @@ -301,7 +301,7 @@ enum adreno_preempt_states { */ struct adreno_preemption { atomic_t state; - struct kgsl_memdesc counters; + struct kgsl_memdesc scratch; struct timer_list timer; struct work_struct work; bool token_submit; @@ -1196,6 +1196,7 @@ void adreno_cx_misc_regwrite(struct adreno_device *adreno_dev, void adreno_cx_misc_regrmw(struct adreno_device *adreno_dev, unsigned int offsetwords, unsigned int mask, unsigned int bits); +u32 adreno_get_ucode_version(const u32 *data); #define ADRENO_TARGET(_name, _id) \ diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c index 671d23691dfa..7ee260585081 100644 --- a/drivers/gpu/msm/adreno_a5xx.c +++ b/drivers/gpu/msm/adreno_a5xx.c @@ -2151,12 +2151,15 @@ static int a5xx_post_start(struct adreno_device *adreno_dev) *cmds++ = 0xF; } - if (adreno_is_preemption_enabled(adreno_dev)) + if (adreno_is_preemption_enabled(adreno_dev)) { cmds += _preemption_init(adreno_dev, rb, cmds, NULL); + rb->_wptr = rb->_wptr - (42 - (cmds - start)); + ret = adreno_ringbuffer_submit_spin_nosync(rb, NULL, 2000); + } else { + rb->_wptr = rb->_wptr - (42 - (cmds - start)); + ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000); + } - rb->_wptr = rb->_wptr - (42 - (cmds - start)); - - ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000); if (ret) adreno_spin_idle_debug(adreno_dev, "hw initialization failed to idle\n"); @@ -2493,7 +2496,7 @@ static int _load_firmware(struct kgsl_device *device, const char *fwfile, memcpy(firmware->memdesc.hostptr, &fw->data[4], fw->size - 4); firmware->size = (fw->size - 4) / sizeof(uint32_t); - firmware->version = *(unsigned int *)&fw->data[4]; + firmware->version = adreno_get_ucode_version((u32 *)fw->data); done: release_firmware(fw); diff --git a/drivers/gpu/msm/adreno_a5xx.h b/drivers/gpu/msm/adreno_a5xx.h index 08f56d6701f2..71e9e69895f0 100644 --- a/drivers/gpu/msm/adreno_a5xx.h +++ b/drivers/gpu/msm/adreno_a5xx.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017,2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017,2019-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -112,7 +112,7 @@ void a5xx_crashdump_init(struct adreno_device *adreno_dev); void a5xx_hwcg_set(struct adreno_device *adreno_dev, bool on); -#define A5XX_CP_RB_CNTL_DEFAULT (((ilog2(4) << 8) & 0x1F00) | \ +#define A5XX_CP_RB_CNTL_DEFAULT ((1 << 27) | ((ilog2(4) << 8) & 0x1F00) | \ (ilog2(KGSL_RB_DWORDS >> 1) & 0x3F)) /* GPMU interrupt multiplexor */ #define FW_INTR_INFO (0) diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c index cb7a65f92135..39283db8f100 100644 --- a/drivers/gpu/msm/adreno_a5xx_preempt.c +++ b/drivers/gpu/msm/adreno_a5xx_preempt.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017,2019 The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017,2019-2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -575,7 +575,7 @@ static void _preemption_close(struct adreno_device *adreno_dev) unsigned int i; del_timer(&preempt->timer); - kgsl_free_global(device, &preempt->counters); + kgsl_free_global(device, &preempt->scratch); a5xx_preemption_iommu_close(adreno_dev); FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { @@ -611,14 +611,14 @@ int a5xx_preemption_init(struct adreno_device *adreno_dev) (unsigned long) adreno_dev); /* Allocate mem for storing preemption counters */ - ret = kgsl_allocate_global(device, &preempt->counters, + ret = kgsl_allocate_global(device, &preempt->scratch, adreno_dev->num_ringbuffers * A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0, "preemption_counters"); if (ret) goto err; - addr = preempt->counters.gpuaddr; + addr = preempt->scratch.gpuaddr; /* Allocate mem for storing preemption switch record */ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c index b894bb92c016..14a9132f747f 100644 --- a/drivers/gpu/msm/adreno_a6xx.c +++ b/drivers/gpu/msm/adreno_a6xx.c @@ -1,4 +1,4 @@ -/* Copyright (c)2017-2019, The Linux Foundation. All rights reserved. +/* Copyright (c)2017-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1202,7 +1202,7 @@ static int a6xx_post_start(struct adreno_device *adreno_dev) rb->_wptr = rb->_wptr - (42 - (cmds - start)); - ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000); + ret = adreno_ringbuffer_submit_spin_nosync(rb, NULL, 2000); if (ret) adreno_spin_idle_debug(adreno_dev, "hw preemption initialization failed to idle\n"); @@ -1348,7 +1348,7 @@ static int _load_firmware(struct kgsl_device *device, const char *fwfile, if (!ret) { memcpy(firmware->memdesc.hostptr, &fw->data[4], fw->size - 4); firmware->size = (fw->size - 4) / sizeof(uint32_t); - firmware->version = *(unsigned int *)&fw->data[4]; + firmware->version = adreno_get_ucode_version((u32 *)fw->data); } release_firmware(fw); diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h index 8807db6333f4..2fa21283d5d4 100644 --- a/drivers/gpu/msm/adreno_a6xx.h +++ b/drivers/gpu/msm/adreno_a6xx.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -101,7 +101,7 @@ struct cpu_gpu_lock { /* Size of the performance counter save/restore block (in bytes) */ #define A6XX_CP_PERFCOUNTER_SAVE_RESTORE_SIZE (4 * 1024) -#define A6XX_CP_RB_CNTL_DEFAULT (((ilog2(4) << 8) & 0x1F00) | \ +#define A6XX_CP_RB_CNTL_DEFAULT ((1 << 27) | ((ilog2(4) << 8) & 0x1F00) | \ (ilog2(KGSL_RB_DWORDS >> 1) & 0x3F)) /* diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c index d39420b0c9ed..5d551b5483d7 100644 --- a/drivers/gpu/msm/adreno_a6xx_preempt.c +++ b/drivers/gpu/msm/adreno_a6xx_preempt.c @@ -304,8 +304,8 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev) kgsl_sharedmem_writel(device, &iommu->smmu_info, PREEMPT_SMMU_RECORD(context_idr), contextidr); - kgsl_sharedmem_readq(&device->scratch, &gpuaddr, - SCRATCH_PREEMPTION_CTXT_RESTORE_ADDR_OFFSET(next->id)); + kgsl_sharedmem_readq(&preempt->scratch, &gpuaddr, + next->id * sizeof(u64)); /* * Set a keepalive bit before the first preemption register write. @@ -526,12 +526,10 @@ unsigned int a6xx_preemption_pre_ibsubmit( rb->perfcounter_save_restore_desc.gpuaddr); if (context) { - struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct adreno_context *drawctxt = ADRENO_CONTEXT(context); struct adreno_ringbuffer *rb = drawctxt->rb; - uint64_t dest = - SCRATCH_PREEMPTION_CTXT_RESTORE_GPU_ADDR(device, - rb->id); + uint64_t dest = adreno_dev->preempt.scratch.gpuaddr + + sizeof(u64) * rb->id; *cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 2); cmds += cp_gpuaddr(adreno_dev, cmds, dest); @@ -549,9 +547,8 @@ unsigned int a6xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev, struct adreno_ringbuffer *rb = adreno_dev->cur_rb; if (rb) { - struct kgsl_device *device = KGSL_DEVICE(adreno_dev); - uint64_t dest = SCRATCH_PREEMPTION_CTXT_RESTORE_GPU_ADDR(device, - rb->id); + uint64_t dest = adreno_dev->preempt.scratch.gpuaddr + + sizeof(u64) * rb->id; *cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 2); cmds += cp_gpuaddr(adreno_dev, cmds, dest); @@ -712,7 +709,7 @@ static void _preemption_close(struct adreno_device *adreno_dev) unsigned int i; del_timer(&preempt->timer); - kgsl_free_global(device, &preempt->counters); + kgsl_free_global(device, &preempt->scratch); a6xx_preemption_iommu_close(adreno_dev); FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { @@ -751,15 +748,19 @@ int a6xx_preemption_init(struct adreno_device *adreno_dev) setup_timer(&preempt->timer, _a6xx_preemption_timer, (unsigned long) adreno_dev); - /* Allocate mem for storing preemption counters */ - ret = kgsl_allocate_global(device, &preempt->counters, - adreno_dev->num_ringbuffers * - A6XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0, - "preemption_counters"); + /* + * Allocate a scratch buffer to keep the below table: + * Offset: What + * 0x0: Context Record address + * 0x10: Preemption Counters + */ + ret = kgsl_allocate_global(device, &preempt->scratch, PAGE_SIZE, 0, 0, + "preemption_scratch"); if (ret) goto err; - addr = preempt->counters.gpuaddr; + addr = preempt->scratch.gpuaddr + + KGSL_PRIORITY_MAX_RB_LEVELS * sizeof(u64); /* Allocate mem for storing preemption switch record */ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c index df8e8acecb4c..aa945a4bd302 100644 --- a/drivers/gpu/msm/adreno_drawctxt.c +++ b/drivers/gpu/msm/adreno_drawctxt.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -487,11 +487,12 @@ void adreno_drawctxt_detach(struct kgsl_context *context) drawctxt = ADRENO_CONTEXT(context); rb = drawctxt->rb; + spin_lock(&drawctxt->lock); + spin_lock(&adreno_dev->active_list_lock); list_del_init(&drawctxt->active_node); spin_unlock(&adreno_dev->active_list_lock); - spin_lock(&drawctxt->lock); count = drawctxt_detach_drawobjs(drawctxt, list); spin_unlock(&drawctxt->lock); diff --git a/drivers/gpu/msm/adreno_ioctl.c b/drivers/gpu/msm/adreno_ioctl.c index 82629c6fcf23..d18aa19ad563 100644 --- a/drivers/gpu/msm/adreno_ioctl.c +++ b/drivers/gpu/msm/adreno_ioctl.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2018,2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -168,7 +168,7 @@ static long adreno_ioctl_preemption_counters_query( levels_to_copy = gpudev->num_prio_levels; if (copy_to_user((void __user *) (uintptr_t) read->counters, - adreno_dev->preempt.counters.hostptr, + adreno_dev->preempt.scratch.hostptr, levels_to_copy * size_level)) return -EFAULT; diff --git a/drivers/gpu/msm/adreno_pm4types.h b/drivers/gpu/msm/adreno_pm4types.h index 2a330b4474aa..543496399044 100644 --- a/drivers/gpu/msm/adreno_pm4types.h +++ b/drivers/gpu/msm/adreno_pm4types.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2017,2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -103,6 +103,8 @@ /* A5XX Enable yield in RB only */ #define CP_YIELD_ENABLE 0x1C +#define CP_WHERE_AM_I 0x62 + /* Enable/Disable/Defer A5x global preemption model */ #define CP_PREEMPT_ENABLE_GLOBAL 0x69 diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index 38953e2b9e0a..6d689e38ba6e 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -189,7 +189,7 @@ void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb, adreno_ringbuffer_wptr(adreno_dev, rb); } -int adreno_ringbuffer_submit_spin(struct adreno_ringbuffer *rb, +int adreno_ringbuffer_submit_spin_nosync(struct adreno_ringbuffer *rb, struct adreno_submit_time *time, unsigned int timeout) { struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb); @@ -198,6 +198,38 @@ int adreno_ringbuffer_submit_spin(struct adreno_ringbuffer *rb, return adreno_spin_idle(adreno_dev, timeout); } +/* + * adreno_ringbuffer_submit_spin() - Submit the cmds and wait until GPU is idle + * @rb: Pointer to ringbuffer + * @time: Pointer to adreno_submit_time + * @timeout: timeout value in ms + * + * Add commands to the ringbuffer and wait until GPU goes to idle. This routine + * inserts a WHERE_AM_I packet to trigger a shadow rptr update. So, use + * adreno_ringbuffer_submit_spin_nosync() if the previous cmd in the RB is a + * CSY packet because CSY followed by WHERE_AM_I is not legal. + */ +int adreno_ringbuffer_submit_spin(struct adreno_ringbuffer *rb, + struct adreno_submit_time *time, unsigned int timeout) +{ + struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb); + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + unsigned int *cmds; + + if (adreno_is_a3xx(adreno_dev)) + return adreno_ringbuffer_submit_spin_nosync(rb, time, timeout); + + cmds = adreno_ringbuffer_allocspace(rb, 3); + if (IS_ERR(cmds)) + return PTR_ERR(cmds); + + *cmds++ = cp_packet(adreno_dev, CP_WHERE_AM_I, 2); + cmds += cp_gpuaddr(adreno_dev, cmds, + SCRATCH_RPTR_GPU_ADDR(device, rb->id)); + + return adreno_ringbuffer_submit_spin_nosync(rb, time, timeout); +} + unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb, unsigned int dwords) { @@ -322,12 +354,13 @@ int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); + unsigned int priv = KGSL_MEMDESC_RANDOM | KGSL_MEMDESC_PRIVILEGED; int i, r = 0; int status = -ENOMEM; if (!adreno_is_a3xx(adreno_dev)) { status = kgsl_allocate_global(device, &device->scratch, - PAGE_SIZE, 0, KGSL_MEMDESC_RANDOM, "scratch"); + PAGE_SIZE, 0, priv, "scratch"); if (status != 0) return status; } @@ -549,6 +582,8 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb, if (gpudev->preemption_post_ibsubmit && adreno_is_preemption_enabled(adreno_dev)) total_sizedwords += 10; + else if (!adreno_is_a3xx(adreno_dev)) + total_sizedwords += 3; /* * a5xx uses 64 bit memory address. pm4 commands that involve read/write @@ -760,6 +795,11 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb, adreno_is_preemption_enabled(adreno_dev)) ringcmds += gpudev->preemption_post_ibsubmit(adreno_dev, ringcmds); + else if (!adreno_is_a3xx(adreno_dev)) { + *ringcmds++ = cp_packet(adreno_dev, CP_WHERE_AM_I, 2); + ringcmds += cp_gpuaddr(adreno_dev, ringcmds, + SCRATCH_RPTR_GPU_ADDR(device, rb->id)); + } /* * If we have more ringbuffer commands than space reserved diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h index 9eb0c92213f3..14b4ecc2e192 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.h +++ b/drivers/gpu/msm/adreno_ringbuffer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -178,6 +178,9 @@ int adreno_ringbuffer_issue_internal_cmds(struct adreno_ringbuffer *rb, void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb, struct adreno_submit_time *time); +int adreno_ringbuffer_submit_spin_nosync(struct adreno_ringbuffer *rb, + struct adreno_submit_time *time, unsigned int timeout); + int adreno_ringbuffer_submit_spin(struct adreno_ringbuffer *rb, struct adreno_submit_time *time, unsigned int timeout); diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h index 3c877f722f23..6c04695d5201 100644 --- a/drivers/gpu/msm/kgsl.h +++ b/drivers/gpu/msm/kgsl.h @@ -71,13 +71,11 @@ /* * SCRATCH MEMORY: The scratch memory is one page worth of data that * is mapped into the GPU. This allows for some 'shared' data between - * the GPU and CPU. For example, it will be used by the GPU to write - * each updated RPTR for each RB. + * the GPU and CPU. * * Used Data: * Offset: Length(bytes): What * 0x0: 4 * KGSL_PRIORITY_MAX_RB_LEVELS: RB0 RPTR - * 0x10: 8 * KGSL_PRIORITY_MAX_RB_LEVELS: RB0 CTXT RESTORE ADDR */ /* Shadow global helpers */ @@ -85,13 +83,6 @@ #define SCRATCH_RPTR_GPU_ADDR(dev, id) \ ((dev)->scratch.gpuaddr + SCRATCH_RPTR_OFFSET(id)) -#define SCRATCH_PREEMPTION_CTXT_RESTORE_ADDR_OFFSET(id) \ - (SCRATCH_RPTR_OFFSET(KGSL_PRIORITY_MAX_RB_LEVELS) + \ - ((id) * sizeof(uint64_t))) -#define SCRATCH_PREEMPTION_CTXT_RESTORE_GPU_ADDR(dev, id) \ - ((dev)->scratch.gpuaddr + \ - SCRATCH_PREEMPTION_CTXT_RESTORE_ADDR_OFFSET(id)) - /* Timestamp window used to detect rollovers (half of integer range) */ #define KGSL_TIMESTAMP_WINDOW 0x80000000 diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c index 922dba4ed71e..141e9b4f9cd3 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c @@ -3174,16 +3174,19 @@ int sde_rotator_core_init(struct sde_rot_mgr **pmgr, mgr->ops_hw_init = sde_rotator_r3_init; mgr->min_rot_clk = ROT_MIN_ROT_CLK; - /* - * on platforms where the maxlinewidth is greater than - * default we need to have a max clock rate check to - * ensure we do not cross the max allowed clock for rotator - */ - if (IS_SDE_MAJOR_SAME(mdata->mdss_version, - SDE_MDP_HW_REV_500) || + if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, + SDE_MDP_HW_REV_500) || + IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, + SDE_MDP_HW_REV_620)) + mgr->max_rot_clk = 460000000UL; + else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, + SDE_MDP_HW_REV_520)) + mgr->max_rot_clk = 430000000UL; + else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, + SDE_MDP_HW_REV_530) || IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, - SDE_MDP_HW_REV_620)) - mgr->max_rot_clk = ROT_R3_MAX_ROT_CLK; + SDE_MDP_HW_REV_540)) + mgr->max_rot_clk = 307200000UL; if (!(IS_SDE_MAJOR_SAME(mdata->mdss_version, SDE_MDP_HW_REV_500) || diff --git a/drivers/media/rc/msm-geni-ir.c b/drivers/media/rc/msm-geni-ir.c index 8ee451f06f1f..7fae4e04bd30 100644 --- a/drivers/media/rc/msm-geni-ir.c +++ b/drivers/media/rc/msm-geni-ir.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, 2018 The Linux Foundation. All rights reserved. +/* Copyright (c) 2014, 2018, 2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -360,7 +361,7 @@ struct msm_geni_ir { void __iomem *base; unsigned int gpio_rx; - + struct regulator *vdda33; struct clk *ahb_clk; struct clk *serial_clk; struct reset_control *reset_core; @@ -494,28 +495,6 @@ static void msm_geni_ir_load_firmware(struct msm_geni_ir *ir) } EXPORT_SYMBOL(msm_geni_ir_load_firmware); -/* sets the RX filter table with wakeup commands */ -static void msm_geni_ir_set_rx_filter(struct msm_geni_ir *ir) -{ - u32 i, irq_enable = 0; - - /* set the IRQ enable bit for non-zero RX wakeup commands */ - for (i = 0; i < ir->num_wakeup_codes; i++) { - irq_enable |= ((ir->wakeup_codes[i]) ? (1 << i) : 0); - - writel_relaxed(ir->wakeup_codes[i], - ir->base + GENI_IR_RX_FILTER_TABLE(i)); - } - - /* set the filter mask */ - writel_relaxed(ir->wakeup_mask, ir->base + IR_RX_FILTER_VAL_MASK); - - /* set the IRQ enable bits */ - writel_relaxed(irq_enable, ir->base + GENI_IR_IRQ_ENABLE); - /*write memory barrier*/ - wmb(); -} - /* stop GENI IR */ static void msm_geni_ir_stop(struct msm_geni_ir *ir) { @@ -543,76 +522,6 @@ static void msm_geni_ir_stop(struct msm_geni_ir *ir) readl_relaxed(ir->base + IR_GENI_RX_FIFO(i)); } -/* configures geni IR to low power mode */ -static void msm_geni_ir_low_power_mode(struct msm_geni_ir *ir) -{ - u32 clk_cfg; - - /* set the RX filter table for wakeup */ - msm_geni_ir_set_rx_filter(ir); - - /* disable interrupts */ - writel_relaxed(0, ir->base + IR_GENI_IRQ_ENABLE); - synchronize_irq(ir->irq); - - /* stop GENI IR */ - msm_geni_ir_stop(ir); - - /* disable TX path, enable RX path */ - clk_cfg = RX_CLK_DIV_VALUE(RX_CLK_DIV_LP) | RX_SER_CLK_EN; - writel_relaxed(clk_cfg, ir->base + IR_GENI_SER_CLK_CFG); - - /* switch clock mux output from hclk to sclk */ - writel_relaxed(0x1, ir->base + GENI_IR_CLK_MUX); - - /* read back clk_mux register to ensure output clk is active */ - readl_relaxed(ir->base + GENI_IR_CLK_MUX); - - /* select low power mode */ - writel_relaxed(GENI_IR_LOW_POWER_MODE, ir->base + GENI_IR_AHB_MUX_SEL); - - /* enable the RX filter */ - writel_relaxed(0x1, ir->base + GENI_IR_RX_FILTER_EN); - /*write memory barrier*/ - wmb(); -} - -/* configures geni IR to normal mode */ -static void msm_geni_ir_normal_mode(struct msm_geni_ir *ir) -{ - u32 clk_cfg; - - /* ensure RX filter is disabled */ - writel_relaxed(0x0, ir->base + GENI_IR_RX_FILTER_EN); - - /* switch clock mux output from sclk to hclk */ - writel_relaxed(0x0, ir->base + GENI_IR_CLK_MUX); - - /* read back clk_mux register to ensure output clk is active */ - readl_relaxed(ir->base + GENI_IR_CLK_MUX); - - /* select normal mode */ - writel_relaxed(GENI_IR_NORMAL_MODE, ir->base + GENI_IR_AHB_MUX_SEL); - - /* stop GENI IR */ - msm_geni_ir_stop(ir); - - /* configure serial clock */ - clk_cfg = RX_CLK_DIV_VALUE(RX_CLK_DIV) | RX_SER_CLK_EN; - writel_relaxed(clk_cfg, ir->base + IR_GENI_SER_CLK_CFG); - - /* set rx polarization to active low */ - writel_relaxed(RX_POL_LOW, ir->base + IR_GENI_GP_OUTPUT_REG); - - /* enable interrupts */ - writel_relaxed(GENI_IR_DEF_IRQ_EN, ir->base + IR_GENI_IRQ_ENABLE); - - /* enable RX */ - writel_relaxed(0, ir->base + IR_GENI_S_CMD0); - /*write memory barrier*/ - wmb(); -} - /* sets the core for the specified protocol */ static int msm_geni_ir_change_protocol(struct rc_dev *dev, u64 *rc_type) { @@ -883,6 +792,13 @@ static int msm_geni_ir_get_res(struct platform_device *pdev, return -ENOMEM; } pr_debug("ir->base: 0x%lx\n", (unsigned long int)ir->base); + + ir->vdda33 = devm_regulator_get(&pdev->dev, "vdda33"); + if (IS_ERR(ir->vdda33)) { + pr_err("unable to get vdda33 supply\n"); + return rc; + } + ir->ahb_clk = clk_get(&pdev->dev, "iface_clk"); ir->serial_clk = clk_get(&pdev->dev, "serial_clk"); if (IS_ERR(ir->ahb_clk)) { @@ -974,6 +890,11 @@ int msm_geni_ir_probe(struct platform_device *pdev) goto rc_register_err; } + rc = regulator_enable(ir->vdda33); + if (rc) { + pr_err("Unable to enable vdda33:%d\n", rc); + return rc; + } #ifdef CONFIG_IR_MSM_GENI_TX ir->misc.minor = MISC_DYNAMIC_MINOR; @@ -1032,11 +953,6 @@ static int msm_geni_ir_suspend(struct device *dev) { struct msm_geni_ir *ir = platform_get_drvdata(to_platform_device(dev)); - if (ir->image_loaded != NULL) { - /* configure low power mode */ - msm_geni_ir_low_power_mode(ir); - clk_disable_unprepare(ir->ahb_clk); - } enable_irq_wake(ir->wakeup_irq); return 0; @@ -1046,27 +962,17 @@ static int msm_geni_ir_resume(struct device *dev) { struct msm_geni_ir *ir = platform_get_drvdata(to_platform_device(dev)); u32 status; - int rc; disable_irq_wake(ir->wakeup_irq); if (ir->image_loaded == NULL) return 0; - rc = clk_prepare_enable(ir->ahb_clk); - if (rc) { - pr_err("ahb clk enable failed %d\n", rc); - return rc; - } - /* clear wakeup irq */ status = readl_relaxed(ir->base + GENI_IR_IRQ_STATUS); writel_relaxed(status, ir->base + GENI_IR_IRQ_CLEAR); /*write memory barrier*/ wmb(); - /* configure normal mode */ - msm_geni_ir_normal_mode(ir); - return 0; } #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/mtd/devices/msm_qpic_nand.c b/drivers/mtd/devices/msm_qpic_nand.c index fbeaf6c0f7e1..7056ab0738fb 100644 --- a/drivers/mtd/devices/msm_qpic_nand.c +++ b/drivers/mtd/devices/msm_qpic_nand.c @@ -26,6 +26,8 @@ #define MAX_DESC 16 #define SMEM_AARM_PARTITION_TABLE 9 #define SMEM_APPS 0 +#define ONE_CODEWORD_SIZE 516 + static bool enable_euclean; static bool enable_perfstats; @@ -1054,8 +1056,9 @@ static int msm_nand_flash_onfi_probe(struct msm_nand_info *info) flash->blksize = onfi_param_page_ptr->number_of_pages_per_block * flash->pagesize; flash->oobsize = onfi_param_page_ptr->number_of_spare_bytes_per_page; - flash->density = onfi_param_page_ptr->number_of_blocks_per_logical_unit - * flash->blksize; + flash->density = onfi_param_page_ptr->number_of_logical_units * + onfi_param_page_ptr->number_of_blocks_per_logical_unit * + flash->blksize; flash->ecc_correctability = onfi_param_page_ptr->number_of_bits_ecc_correctability; @@ -1183,10 +1186,16 @@ static int msm_nand_validate_mtd_params(struct mtd_info *mtd, bool read, err = -EINVAL; goto out; } - args->page_count = ops->len / (mtd->writesize + mtd->oobsize); + if (ops->len <= ONE_CODEWORD_SIZE) + args->page_count = 1; + else + args->page_count = ops->len / + (mtd->writesize + mtd->oobsize); } else if (ops->mode == MTD_OPS_AUTO_OOB) { - if (ops->datbuf && (ops->len % mtd->writesize) != 0) { + if (ops->datbuf && (ops->len % + ((ops->len <= ONE_CODEWORD_SIZE) ? + ONE_CODEWORD_SIZE : mtd->writesize)) != 0) { /* when ops->datbuf is NULL, ops->len can be ooblen */ pr_err("unsupported data len %d for AUTO mode\n", ops->len); @@ -1199,7 +1208,10 @@ static int msm_nand_validate_mtd_params(struct mtd_info *mtd, bool read, if ((args->page_count == 0) && (ops->ooblen)) args->page_count = 1; } else if (ops->datbuf) { - args->page_count = ops->len / mtd->writesize; + if (ops->len <= ONE_CODEWORD_SIZE) + args->page_count = 1; + else + args->page_count = ops->len / mtd->writesize; } } @@ -1245,12 +1257,20 @@ static void msm_nand_update_rw_reg_data(struct msm_nand_chip *chip, struct msm_nand_rw_params *args, struct msm_nand_rw_reg_data *data) { + /* + * While reading one codeword, CW_PER_PAGE bits of QPIC_NAND_DEV0_CFG0 + * should be set to 0, which implies 1 codeword per page. 'n' below, + * is used to configure cfg0 for reading one full page or one single + * codeword. + */ + int n = (ops->len <= ONE_CODEWORD_SIZE) ? args->cwperpage : 1; + if (args->read) { if (ops->mode != MTD_OPS_RAW) { data->cmd = MSM_NAND_CMD_PAGE_READ_ECC; data->cfg0 = (chip->cfg0 & ~(7U << CW_PER_PAGE)) | - (((args->cwperpage-1) - args->start_sector) + (((args->cwperpage-n) - args->start_sector) << CW_PER_PAGE); data->cfg1 = chip->cfg1; data->ecc_bch_cfg = chip->ecc_bch_cfg; @@ -1258,7 +1278,7 @@ static void msm_nand_update_rw_reg_data(struct msm_nand_chip *chip, data->cmd = MSM_NAND_CMD_PAGE_READ_ALL; data->cfg0 = (chip->cfg0_raw & ~(7U << CW_PER_PAGE)) | - (((args->cwperpage-1) - args->start_sector) + (((args->cwperpage-n) - args->start_sector) << CW_PER_PAGE); data->cfg1 = chip->cfg1_raw; data->ecc_bch_cfg = chip->ecc_cfg_raw; @@ -1302,6 +1322,11 @@ static void msm_nand_prep_rw_cmd_desc(struct mtd_oob_ops *ops, uint32_t offset, size, last_read; struct sps_command_element *curr_ce, *start_ce; uint32_t *flags_ptr, *num_ce_ptr; + /* + * Variable to configure read_location register parameters + * while reading one codeword or one full page + */ + int n = (ops->len <= ONE_CODEWORD_SIZE) ? args->cwperpage : 1; if (curr_cw == args->start_sector) { curr_ce = start_ce = &cmd_list->setup_desc.ce[0]; @@ -1394,10 +1419,15 @@ static void msm_nand_prep_rw_cmd_desc(struct mtd_oob_ops *ops, if (ops->mode == MTD_OPS_AUTO_OOB) { if (ops->datbuf) { offset = 0; - size = (curr_cw < (args->cwperpage - 1)) ? 516 : - (512 - ((args->cwperpage - 1) << 2)); - last_read = (curr_cw < (args->cwperpage - 1)) ? 1 : - (ops->oobbuf ? 0 : 1); + if (ops->len <= ONE_CODEWORD_SIZE) { + size = ONE_CODEWORD_SIZE; + last_read = 1; + } else { + size = (curr_cw < (args->cwperpage - 1)) ? 516 : + (512 - ((args->cwperpage - 1) << 2)); + last_read = (curr_cw < (args->cwperpage - 1)) ? + 1 : (ops->oobbuf ? 0 : 1); + } rdata = (offset << 0) | (size << 16) | (last_read << 31); @@ -1413,7 +1443,7 @@ static void msm_nand_prep_rw_cmd_desc(struct mtd_oob_ops *ops, curr_ce++; } } - if (curr_cw == (args->cwperpage - 1) && ops->oobbuf) { + if (curr_cw == (args->cwperpage - n) && ops->oobbuf) { offset = 512 - ((args->cwperpage - 1) << 2); size = (args->cwperpage) << 2; if (size > args->oob_len_cmd) @@ -1471,6 +1501,11 @@ static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops, uint32_t sectordatasize, sectoroobsize; uint32_t sps_flags = 0; int err = 0; + /* + * Variable to configure sectordatasize and sectoroobsize + * while reading one codeword or one full page. + */ + int n = (ops->len <= ONE_CODEWORD_SIZE) ? args->cwperpage : 1; if (args->read) data_pipe_handle = info->sps.data_prod.handle; @@ -1479,7 +1514,7 @@ static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops, if (ops->mode == MTD_OPS_RAW) { if (ecc_parity_bytes && args->read) { - if (curr_cw == (args->cwperpage - 1)) + if (curr_cw == (args->cwperpage - n)) sps_flags |= SPS_IOVEC_FLAG_INT; /* read only ecc bytes */ @@ -1494,7 +1529,7 @@ static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops, sectordatasize = chip->cw_size; if (!args->read) sps_flags = SPS_IOVEC_FLAG_EOT; - if (curr_cw == (args->cwperpage - 1)) + if (curr_cw == (args->cwperpage - n)) sps_flags |= SPS_IOVEC_FLAG_INT; err = sps_transfer_one(data_pipe_handle, @@ -1507,8 +1542,13 @@ static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops, } } else if (ops->mode == MTD_OPS_AUTO_OOB) { if (ops->datbuf) { - sectordatasize = (curr_cw < (args->cwperpage - 1)) - ? 516 : (512 - ((args->cwperpage - 1) << 2)); + if (ops->len <= ONE_CODEWORD_SIZE) + sectordatasize = ONE_CODEWORD_SIZE; + else + sectordatasize = + (curr_cw < (args->cwperpage - 1)) + ? 516 : + (512 - ((args->cwperpage - 1) << 2)); if (!args->read) { sps_flags = SPS_IOVEC_FLAG_EOT; @@ -1516,7 +1556,7 @@ static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops, ops->oobbuf) sps_flags = 0; } - if ((curr_cw == (args->cwperpage - 1)) && !ops->oobbuf) + if ((curr_cw == (args->cwperpage - n)) && !ops->oobbuf) sps_flags |= SPS_IOVEC_FLAG_INT; err = sps_transfer_one(data_pipe_handle, @@ -1528,7 +1568,7 @@ static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops, args->data_dma_addr_curr += sectordatasize; } - if (ops->oobbuf && (curr_cw == (args->cwperpage - 1))) { + if (ops->oobbuf && (curr_cw == (args->cwperpage - n))) { sectoroobsize = args->cwperpage << 2; if (sectoroobsize > args->oob_len_data) sectoroobsize = args->oob_len_data; @@ -1984,7 +2024,7 @@ static int msm_nand_is_erased_page_ps(struct mtd_info *mtd, loff_t from, total_ecc_byte_cnt, DMA_FROM_DEVICE); /* check for bit flips in ecc data */ ecc_temp = ecc; - for (n = rw_params->start_sector; n < cwperpage; n++) { + for (n = rw_params->start_sector; !err && n < cwperpage; n++) { int last_pos = 0, next_pos = 0; int ecc_bytes_percw_in_bits = (chip->ecc_parity_bytes * 8); @@ -2624,7 +2664,7 @@ static int msm_nand_is_erased_page(struct mtd_info *mtd, loff_t from, total_ecc_byte_cnt, DMA_FROM_DEVICE); /* check for bit flips in ecc data */ ecc_temp = ecc; - for (n = rw_params->start_sector; n < cwperpage; n++) { + for (n = rw_params->start_sector; !err && n < cwperpage; n++) { int last_pos = 0, next_pos = 0; int ecc_bytes_percw_in_bits = (chip->ecc_parity_bytes * 8); @@ -2728,6 +2768,9 @@ static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from, data.addr0 = (rw_params.page << 16) | rw_params.oob_col; data.addr1 = (rw_params.page >> 16) & 0xff; + if (ops->len <= ONE_CODEWORD_SIZE) + cwperpage = 1; + for (n = rw_params.start_sector; n < cwperpage; n++) { struct sps_command_element *curr_ce, *start_ce; @@ -2805,7 +2848,7 @@ static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from, } else if (ops->mode == MTD_OPS_AUTO_OOB) { if (ops->datbuf) submitted_num_desc = cwperpage - - rw_params.start_sector; + rw_params.start_sector; if (ops->oobbuf) submitted_num_desc++; } @@ -2998,7 +3041,10 @@ static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from, } validate_mtd_params_failed: if (ops->mode != MTD_OPS_RAW) - ops->retlen = mtd->writesize * pages_read; + if (ops->len <= ONE_CODEWORD_SIZE) + ops->retlen = ONE_CODEWORD_SIZE; + else + ops->retlen = mtd->writesize * pages_read; else ops->retlen = (mtd->writesize + mtd->oobsize) * pages_read; ops->oobretlen = ops->ooblen - rw_params.oob_len_data; @@ -3071,8 +3117,11 @@ static int msm_nand_read_partial_page(struct mtd_info *mtd, ops->datbuf = no_copy ? actual_buf : bounce_buf; if (info->nand_chip.caps & MSM_NAND_CAP_PAGE_SCOPE_READ) err = msm_nand_read_pagescope(mtd, aligned_from, ops); - else + else { + if ((len <= ONE_CODEWORD_SIZE) && (offset == 0)) + ops->len = ONE_CODEWORD_SIZE; err = msm_nand_read_oob(mtd, aligned_from, ops); + } if (err == -EUCLEAN) { is_euclean = 1; err = 0; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c index 3ee89fdd8c54..2e658278fa70 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c @@ -103,6 +103,7 @@ EXPORT_SYMBOL(rmnet_descriptor_add_frag); int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc, int start, u8 *nexthdrp, __be16 *fragp) { + u32 frag_size = skb_frag_size(&frag_desc->frag); u8 nexthdr = *nexthdrp; *fragp = 0; @@ -114,11 +115,17 @@ int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc, if (nexthdr == NEXTHDR_NONE) return -EINVAL; - hp = rmnet_frag_data_ptr(frag_desc) + start; + if (start >= frag_size) + return -EINVAL; + hp = rmnet_frag_data_ptr(frag_desc) + start; if (nexthdr == NEXTHDR_FRAGMENT) { __be16 *fp; + if (start + offsetof(struct frag_hdr, frag_off) >= + frag_size) + return -EINVAL; + fp = rmnet_frag_data_ptr(frag_desc) + start + offsetof(struct frag_hdr, frag_off); *fragp = *fp; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 8c7109008185..fc89b34b74e8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -376,6 +376,7 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev, struct stmmac_priv *priv = netdev_priv(dev); struct phy_device *phy = dev->phydev; int rc; + u32 cmd_speed = cmd->base.speed; if (priv->hw->pcs & STMMAC_PCS_RGMII || priv->hw->pcs & STMMAC_PCS_SGMII) { @@ -403,7 +404,12 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev, return 0; } - rc = phy_ethtool_ksettings_set(phy, cmd); + /* Half duplex is not supported */ + if (cmd->base.duplex != DUPLEX_FULL || + (cmd_speed == SPEED_1000 && cmd->base.autoneg == AUTONEG_DISABLE)) + rc = -EINVAL; + else + rc = phy_ethtool_ksettings_set(phy, cmd); return rc; } diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 3a15923bdd26..7c6733b026e5 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -58,6 +59,74 @@ * the buffer is sent/received. */ +static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar, + struct ath10k_ce_pipe *ce_state) +{ + u32 ce_id = ce_state->id; + u32 addr = 0; + + switch (ce_id) { + case 0: + addr = 0x00032000; + break; + case 3: + addr = 0x0003200C; + break; + case 4: + addr = 0x00032010; + break; + case 5: + addr = 0x00032014; + break; + case 7: + addr = 0x0003201C; + break; + default: + ath10k_warn(ar, "invalid CE id: %d", ce_id); + break; + } + return addr; +} + +static inline u32 shadow_dst_wr_ind_addr(struct ath10k *ar, + struct ath10k_ce_pipe *ce_state) +{ + u32 ce_id = ce_state->id; + u32 addr = 0; + + switch (ce_id) { + case 1: + addr = 0x00032034; + break; + case 2: + addr = 0x00032038; + break; + case 5: + addr = 0x00032044; + break; + case 7: + addr = 0x0003204C; + break; + case 8: + addr = 0x00032050; + break; + case 9: + addr = 0x00032054; + break; + case 10: + addr = 0x00032058; + break; + case 11: + addr = 0x0003205C; + break; + default: + ath10k_warn(ar, "invalid CE id: %d", ce_id); + break; + } + + return addr; +} + static inline unsigned int ath10k_set_ring_byte(unsigned int offset, struct ath10k_hw_ce_regs_addr_map *addr_map) @@ -116,19 +185,74 @@ static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar, ar->hw_ce_regs->sr_wr_index_addr); } +static inline u32 ath10k_ce_src_ring_read_index_from_ddr(struct ath10k *ar, + u32 ce_id) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + return ce->vaddr_rri[ce_id] & CE_DDR_RRI_MASK; +} + static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_ce_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->current_srri_addr); + struct ath10k_ce *ce = ath10k_ce_priv(ar); + u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + u32 index; + + if (ar->hw_params.rri_on_ddr && + (ce_state->attr_flags & CE_ATTR_DIS_INTR)) + index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_id); + else + index = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->current_srri_addr); + + return index; +} + +static inline void +ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar, + struct ath10k_ce_pipe *ce_state, + unsigned int value) +{ + ath10k_ce_write32(ar, shadow_sr_wr_ind_addr(ar, ce_state), value); +} + +static inline void +ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar, + struct ath10k_ce_pipe *ce_state, + unsigned int value) +{ + ath10k_ce_write32(ar, shadow_dst_wr_ind_addr(ar, ce_state), value); } static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar, - u32 ce_ctrl_addr, - unsigned int addr) + u32 ce_id, + u64 addr) { + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id); + u32 addr_lo = lower_32_bits(addr); + ath10k_ce_write32(ar, ce_ctrl_addr + - ar->hw_ce_regs->sr_base_addr, addr); + ar->hw_ce_regs->sr_base_addr_lo, addr_lo); + + if (ce_state->ops->ce_set_src_ring_base_addr_hi) { + ce_state->ops->ce_set_src_ring_base_addr_hi(ar, ce_ctrl_addr, + addr); + } +} + +static void ath10k_ce_set_src_ring_base_addr_hi(struct ath10k *ar, + u32 ce_ctrl_addr, + u64 addr) +{ + u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK; + + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->sr_base_addr_hi, addr_hi); } static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar, @@ -181,19 +305,64 @@ static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar, ath10k_set_ring_byte(n, ctrl_regs->dst_ring)); } +static inline + u32 ath10k_ce_dest_ring_read_index_from_ddr(struct ath10k *ar, u32 ce_id) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + return (ce->vaddr_rri[ce_id] >> CE_DDR_DRRI_SHIFT) & + CE_DDR_RRI_MASK; +} + static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_ce_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->current_drri_addr); + struct ath10k_ce *ce = ath10k_ce_priv(ar); + u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + u32 index; + + if (ar->hw_params.rri_on_ddr && + (ce_state->attr_flags & CE_ATTR_DIS_INTR)) + index = ath10k_ce_dest_ring_read_index_from_ddr(ar, ce_id); + else + index = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->current_drri_addr); + + return index; } static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar, - u32 ce_ctrl_addr, - u32 addr) + u32 ce_id, + u64 addr) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id); + u32 addr_lo = lower_32_bits(addr); + + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->dr_base_addr_lo, addr_lo); + + if (ce_state->ops->ce_set_dest_ring_base_addr_hi) { + ce_state->ops->ce_set_dest_ring_base_addr_hi(ar, ce_ctrl_addr, + addr); + } +} + +static void ath10k_ce_set_dest_ring_base_addr_hi(struct ath10k *ar, + u32 ce_ctrl_addr, + u64 addr) { + u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK; + u32 reg_value; + + reg_value = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->dr_base_addr_hi); + reg_value &= ~CE_DESC_ADDR_HI_MASK; + reg_value |= addr_hi; ath10k_ce_write32(ar, ce_ctrl_addr + - ar->hw_ce_regs->dr_base_addr, addr); + ar->hw_ce_regs->dr_base_addr_hi, reg_value); } static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar, @@ -376,8 +545,14 @@ static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, write_index = CE_RING_IDX_INCR(nentries_mask, write_index); /* WORKAROUND */ - if (!(flags & CE_SEND_FLAG_GATHER)) - ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index); + if (!(flags & CE_SEND_FLAG_GATHER)) { + if (ar->hw_params.shadow_reg_support) + ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state, + write_index); + else + ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, + write_index); + } src_ring->write_index = write_index; exit: @@ -395,7 +570,7 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state, struct ath10k_ce_ring *src_ring = ce_state->src_ring; struct ce_desc_64 *desc, sdesc; unsigned int nentries_mask = src_ring->nentries_mask; - unsigned int sw_index = src_ring->sw_index; + unsigned int sw_index; unsigned int write_index = src_ring->write_index; u32 ctrl_addr = ce_state->ctrl_addr; __le32 *addr; @@ -409,6 +584,11 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state, ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n", __func__, nbytes, ce_state->src_sz_max); + if (ar->hw_params.rri_on_ddr) + sw_index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_state->id); + else + sw_index = src_ring->sw_index; + if (unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) <= 0)) { ret = -ENOSR; @@ -428,7 +608,7 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state, addr = (__le32 *)&sdesc.addr; - flags |= upper_32_bits(buffer) & CE_DESC_FLAGS_GET_MASK; + flags |= upper_32_bits(buffer) & CE_DESC_ADDR_HI_MASK; addr[0] = __cpu_to_le32(buffer); addr[1] = __cpu_to_le32(flags); if (flags & CE_SEND_FLAG_GATHER) @@ -596,7 +776,7 @@ static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe, return -ENOSPC; desc->addr = __cpu_to_le64(paddr); - desc->addr &= __cpu_to_le64(CE_DESC_37BIT_ADDR_MASK); + desc->addr &= __cpu_to_le64(CE_DESC_ADDR_MASK); desc->nbytes = 0; @@ -925,7 +1105,10 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, src_ring->hw_index = read_index; } - read_index = src_ring->hw_index; + if (ar->hw_params.rri_on_ddr) + read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); + else + read_index = src_ring->hw_index; if (read_index == sw_index) return -EIO; @@ -1198,7 +1381,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); src_ring->write_index &= src_ring->nentries_mask; - ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, + ath10k_ce_src_ring_base_addr_set(ar, ce_id, src_ring->base_addr_ce_space); ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max); @@ -1237,7 +1420,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); dest_ring->write_index &= dest_ring->nentries_mask; - ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, + ath10k_ce_dest_ring_base_addr_set(ar, ce_id, dest_ring->base_addr_ce_space); ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries); ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0); @@ -1251,6 +1434,22 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, return 0; } +static int ath10k_ce_alloc_shadow_base(struct ath10k *ar, + struct ath10k_ce_ring *src_ring, + u32 nentries) +{ + src_ring->shadow_base_unaligned = kcalloc(nentries, + sizeof(struct ce_desc), + GFP_KERNEL); + if (!src_ring->shadow_base_unaligned) + return -ENOMEM; + + src_ring->shadow_base = (struct ce_desc *) + PTR_ALIGN(src_ring->shadow_base_unaligned, + CE_DESC_RING_ALIGN); + return 0; +} + static struct ath10k_ce_ring * ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, const struct ce_attr *attr) @@ -1258,6 +1457,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, struct ath10k_ce_ring *src_ring; u32 nentries = attr->src_nentries; dma_addr_t base_addr; + int ret; nentries = roundup_pow_of_two(nentries); @@ -1294,6 +1494,19 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, ALIGN(src_ring->base_addr_ce_space_unaligned, CE_DESC_RING_ALIGN); + if (ar->hw_params.shadow_reg_support) { + ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries); + if (ret) { + dma_free_coherent(ar->dev, + (nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + src_ring->base_addr_owner_space_unaligned, + base_addr); + kfree(src_ring); + return ERR_PTR(ret); + } + } + return src_ring; } @@ -1304,6 +1517,7 @@ ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id, struct ath10k_ce_ring *src_ring; u32 nentries = attr->src_nentries; dma_addr_t base_addr; + int ret; nentries = roundup_pow_of_two(nentries); @@ -1339,6 +1553,19 @@ ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id, ALIGN(src_ring->base_addr_ce_space_unaligned, CE_DESC_RING_ALIGN); + if (ar->hw_params.shadow_reg_support) { + ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries); + if (ret) { + dma_free_coherent(ar->dev, + (nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + src_ring->base_addr_owner_space_unaligned, + base_addr); + kfree(src_ring); + return ERR_PTR(ret); + } + } + return src_ring; } @@ -1477,7 +1704,7 @@ static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) { u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); - ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0); + ath10k_ce_src_ring_base_addr_set(ar, ce_id, 0); ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0); @@ -1487,7 +1714,7 @@ static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id) { u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); - ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0); + ath10k_ce_dest_ring_base_addr_set(ar, ce_id, 0); ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0); ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0); } @@ -1505,6 +1732,8 @@ static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; if (ce_state->src_ring) { + if (ar->hw_params.shadow_reg_support) + kfree(ce_state->src_ring->shadow_base_unaligned); dma_free_coherent(ar->dev, (ce_state->src_ring->nentries * sizeof(struct ce_desc) + @@ -1534,6 +1763,8 @@ static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id) struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; if (ce_state->src_ring) { + if (ar->hw_params.shadow_reg_support) + kfree(ce_state->src_ring->shadow_base_unaligned); dma_free_coherent(ar->dev, (ce_state->src_ring->nentries * sizeof(struct ce_desc_64) + @@ -1615,6 +1846,8 @@ static const struct ath10k_ce_ops ce_ops = { .ce_extract_desc_data = ath10k_ce_extract_desc_data, .ce_free_pipe = _ath10k_ce_free_pipe, .ce_send_nolock = _ath10k_ce_send_nolock, + .ce_set_src_ring_base_addr_hi = NULL, + .ce_set_dest_ring_base_addr_hi = NULL, }; static const struct ath10k_ce_ops ce_64_ops = { @@ -1627,6 +1860,8 @@ static const struct ath10k_ce_ops ce_64_ops = { .ce_extract_desc_data = ath10k_ce_extract_desc_data_64, .ce_free_pipe = _ath10k_ce_free_pipe_64, .ce_send_nolock = _ath10k_ce_send_nolock_64, + .ce_set_src_ring_base_addr_hi = ath10k_ce_set_src_ring_base_addr_hi, + .ce_set_dest_ring_base_addr_hi = ath10k_ce_set_dest_ring_base_addr_hi, }; static void ath10k_ce_set_ops(struct ath10k *ar, @@ -1702,3 +1937,46 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, return 0; } EXPORT_SYMBOL(ath10k_ce_alloc_pipe); + +void ath10k_ce_alloc_rri(struct ath10k *ar) +{ + int i; + u32 value; + u32 ctrl1_regs; + u32 ce_base_addr; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + ce->vaddr_rri = dma_alloc_coherent(ar->dev, + (CE_COUNT * sizeof(u32)), + &ce->paddr_rri, GFP_KERNEL); + + if (!ce->vaddr_rri) + return; + + ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_low, + lower_32_bits(ce->paddr_rri)); + ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high, + (upper_32_bits(ce->paddr_rri) & + CE_DESC_ADDR_HI_MASK)); + + for (i = 0; i < CE_COUNT; i++) { + ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr; + ce_base_addr = ath10k_ce_base_address(ar, i); + value = ath10k_ce_read32(ar, ce_base_addr + ctrl1_regs); + value |= ar->hw_ce_regs->upd->mask; + ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value); + } + + memset(ce->vaddr_rri, 0, CE_COUNT * sizeof(u32)); +} +EXPORT_SYMBOL(ath10k_ce_alloc_rri); + +void ath10k_ce_free_rri(struct ath10k *ar) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + dma_free_coherent(ar->dev, (CE_COUNT * sizeof(u32)), + ce->vaddr_rri, + ce->paddr_rri); +} +EXPORT_SYMBOL(ath10k_ce_free_rri); diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index ed96dbfe8894..2282aae9557f 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -38,8 +39,8 @@ struct ath10k_ce_pipe; #define CE_DESC_FLAGS_BYTE_SWAP (1 << 1) #define CE_WCN3990_DESC_FLAGS_GATHER BIT(31) -#define CE_DESC_FLAGS_GET_MASK GENMASK(4, 0) -#define CE_DESC_37BIT_ADDR_MASK GENMASK_ULL(37, 0) +#define CE_DESC_ADDR_MASK GENMASK_ULL(34, 0) +#define CE_DESC_ADDR_HI_MASK GENMASK(4, 0) /* Following desc flags are used in QCA99X0 */ #define CE_DESC_FLAGS_HOST_INT_DIS (1 << 2) @@ -48,6 +49,9 @@ struct ath10k_ce_pipe; #define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask #define CE_DESC_FLAGS_META_DATA_LSB ar->hw_values->ce_desc_meta_data_lsb +#define CE_DDR_RRI_MASK GENMASK(15, 0) +#define CE_DDR_DRRI_SHIFT 16 + struct ce_desc { __le32 addr; __le16 nbytes; @@ -100,7 +104,7 @@ struct ath10k_ce_ring { /* Host address space */ void *base_addr_owner_space_unaligned; /* CE address space */ - u32 base_addr_ce_space_unaligned; + dma_addr_t base_addr_ce_space_unaligned; /* * Actual start of descriptors. @@ -111,7 +115,10 @@ struct ath10k_ce_ring { void *base_addr_owner_space; /* CE address space */ - u32 base_addr_ce_space; + dma_addr_t base_addr_ce_space; + + char *shadow_base_unaligned; + struct ce_desc *shadow_base; /* keep last */ void *per_transfer_context[0]; @@ -153,6 +160,8 @@ struct ath10k_ce { spinlock_t ce_lock; const struct ath10k_bus_ops *bus_ops; struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; + u32 *vaddr_rri; + dma_addr_t paddr_rri; }; /*==================Send====================*/ @@ -261,6 +270,8 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar); void ath10k_ce_enable_interrupts(struct ath10k *ar); void ath10k_ce_dump_registers(struct ath10k *ar, struct ath10k_fw_crash_data *crash_data); +void ath10k_ce_alloc_rri(struct ath10k *ar); +void ath10k_ce_free_rri(struct ath10k *ar); /* ce_attr.flags values */ /* Use NonSnooping PCIe accesses? */ @@ -320,12 +331,21 @@ struct ath10k_ce_ops { void *per_transfer_context, dma_addr_t buffer, u32 nbytes, u32 transfer_id, u32 flags); + void (*ce_set_src_ring_base_addr_hi)(struct ath10k *ar, + u32 ce_ctrl_addr, + u64 addr); + void (*ce_set_dest_ring_base_addr_hi)(struct ath10k *ar, + u32 ce_ctrl_addr, + u64 addr); }; static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) { return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id; } +#define COPY_ENGINE_ID(COPY_ENGINE_BASE_ADDRESS) (((COPY_ENGINE_BASE_ADDRESS) \ + - CE0_BASE_ADDRESS) / (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS)) + #define CE_SRC_RING_TO_DESC(baddr, idx) \ (&(((struct ce_desc *)baddr)[idx])) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 22fb8e6f21c1..81e9b36c056a 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -39,6 +39,7 @@ static unsigned int ath10k_cryptmode_param; static bool uart_print; static bool skip_otp; static bool rawmode; +static bool fw_diag_log; /* Enable ATH10K_FW_CRASH_DUMP_REGISTERS and ATH10K_FW_CRASH_DUMP_CE_DATA * by default. @@ -51,6 +52,7 @@ module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644); module_param(uart_print, bool, 0644); module_param(skip_otp, bool, 0644); module_param(rawmode, bool, 0644); +module_param(fw_diag_log, bool, 0644); module_param_named(coredump_mask, ath10k_coredump_mask, ulong, 0444); MODULE_PARM_DESC(debug_mask, "Debugging mask"); @@ -59,6 +61,7 @@ MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode"); MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software"); MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath"); MODULE_PARM_DESC(coredump_mask, "Bitfield of what to include in firmware crash file"); +MODULE_PARM_DESC(fw_diag_log, "Diag based fw log debugging"); static const struct ath10k_hw_params ath10k_hw_params_list[] = { { @@ -90,6 +93,41 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, + }, + { + .id = QCA988X_HW_2_0_VERSION, + .dev_id = QCA988X_2_0_DEVICE_ID_UBNT, + .name = "qca988x hw2.0 ubiquiti", + .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .cal_data_len = 2116, + .fw = { + .dir = QCA988X_HW_2_0_FW_DIR, + .board = QCA988X_HW_2_0_BOARD_DATA_FILE, + .board_size = QCA988X_BOARD_DATA_SZ, + .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ, + }, + .hw_ops = &qca988x_ops, + .decap_align_bytes = 4, + .spectral_bin_discard = 0, + .vht160_mcs_rx_highest = 0, + .vht160_mcs_tx_highest = 0, + .n_cipher_suites = 8, + .num_peers = TARGET_TLV_NUM_PEERS, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA9887_HW_1_0_VERSION, @@ -120,6 +158,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA6174_HW_2_1_VERSION, @@ -149,6 +190,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA6174_HW_2_1_VERSION, @@ -178,6 +222,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA6174_HW_3_0_VERSION, @@ -207,6 +254,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA6174_HW_3_2_VERSION, @@ -239,6 +289,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA99X0_HW_2_0_DEV_VERSION, @@ -274,6 +327,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA9984_HW_1_0_DEV_VERSION, @@ -316,6 +372,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA9888_HW_2_0_DEV_VERSION, @@ -355,6 +414,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA9377_HW_1_0_DEV_VERSION, @@ -384,6 +446,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA9377_HW_1_1_DEV_VERSION, @@ -415,6 +480,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA4019_HW_1_0_DEV_VERSION, @@ -451,6 +519,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = WCN3990_HW_1_0_DEV_VERSION, @@ -472,6 +543,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = true, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC, .per_ce_irq = true, + .shadow_reg_support = true, + .rri_on_ddr = true, + .hw_filter_reset_required = false, }, }; @@ -2505,7 +2579,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, * possible to implicitly make it correct by creating a dummy vdev and * then deleting it. */ - if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { + if (ar->hw_params.hw_filter_reset_required && + mode == ATH10K_FIRMWARE_MODE_NORMAL) { status = ath10k_core_reset_rx_filter(ar); if (status) { ath10k_err(ar, @@ -2540,6 +2615,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, if (status) goto err_hif_stop; + status = ath10k_hif_set_target_log_mode(ar, fw_diag_log); + if (status && status != -EOPNOTSUPP) { + ath10k_warn(ar, "set traget log mode faileds: %d\n", status); + goto err_hif_stop; + } + return 0; err_hif_stop: diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 352efdbf6747..04dab735de7f 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -496,6 +496,7 @@ struct ath10k_debug { u32 reg_addr; u32 nf_cal_period; void *cal_data; + u8 fw_dbglog_mode; }; enum ath10k_state { diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 2ab3c751e6ba..a57f892830c0 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -1708,7 +1708,9 @@ int ath10k_debug_start(struct ath10k *ar) ath10k_warn(ar, "failed to disable pktlog: %d\n", ret); } - if (ar->debug.nf_cal_period) { + if (ar->debug.nf_cal_period && + !test_bit(ATH10K_FW_FEATURE_NON_BMI, + ar->normal_mode_fw.fw_file.fw_features)) { ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->cal_period, ar->debug.nf_cal_period); @@ -1725,7 +1727,9 @@ void ath10k_debug_stop(struct ath10k *ar) { lockdep_assert_held(&ar->conf_mutex); - ath10k_debug_cal_data_fetch(ar); + if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, + ar->normal_mode_fw.fw_file.fw_features)) + ath10k_debug_cal_data_fetch(ar); /* Must not use _sync to avoid deadlock, we do that in * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid @@ -2217,15 +2221,18 @@ int ath10k_debug_register(struct ath10k *ar) debugfs_create_file("fw_dbglog", 0600, ar->debug.debugfs_phy, ar, &fops_fw_dbglog); - debugfs_create_file("cal_data", 0400, ar->debug.debugfs_phy, ar, - &fops_cal_data); + if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, + ar->normal_mode_fw.fw_file.fw_features)) { + debugfs_create_file("cal_data", 0400, ar->debug.debugfs_phy, ar, + &fops_cal_data); + + debugfs_create_file("nf_cal_period", 0600, ar->debug.debugfs_phy, ar, + &fops_nf_cal_period); + } debugfs_create_file("ani_enable", 0600, ar->debug.debugfs_phy, ar, &fops_ani_enable); - debugfs_create_file("nf_cal_period", 0600, ar->debug.debugfs_phy, ar, - &fops_nf_cal_period); - if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { debugfs_create_file("dfs_simulate_radar", 0200, ar->debug.debugfs_phy, ar, &fops_simulate_radar); diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h index 1a59ea0068c2..ee825a78480d 100644 --- a/drivers/net/wireless/ath/ath10k/hif.h +++ b/drivers/net/wireless/ath/ath10k/hif.h @@ -23,6 +23,12 @@ #include "bmi.h" #include "debug.h" +/* Types of fw logging mode */ +enum ath_dbg_mode { + ATH10K_ENABLE_FW_LOG_DIAG, + ATH10K_ENABLE_FW_LOG_CE, +}; + struct ath10k_hif_sg_item { u16 transfer_id; void *transfer_context; /* NULL = tx completion callback not called */ @@ -97,6 +103,7 @@ struct ath10k_hif_ops { int (*get_target_info)(struct ath10k *ar, struct bmi_target_info *target_info); + int (*set_target_log_mode)(struct ath10k *ar, u8 fw_log_mode); }; static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id, @@ -231,4 +238,12 @@ static inline int ath10k_hif_get_target_info(struct ath10k *ar, return ar->hif.ops->get_target_info(ar, tgt_info); } +static inline int ath10k_hif_set_target_log_mode(struct ath10k *ar, + u8 fw_log_mode) +{ + if (!ar->hif.ops->set_target_log_mode) + return -EOPNOTSUPP; + + return ar->hif.ops->set_target_log_mode(ar, fw_log_mode); +} #endif /* _HIF_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 8902720b4e49..31f1a316be7c 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -87,7 +87,8 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep, hdr->eid = ep->eid; hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr)); hdr->flags = 0; - hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE; + if (ep->tx_credit_flow_enabled) + hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE; spin_lock_bh(&ep->htc->tx_lock); hdr->seq_no = ep->seq_no++; diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index 625198dea18b..55743f781d5a 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c @@ -268,7 +268,7 @@ int ath10k_htt_setup(struct ath10k_htt *htt) return status; } - status = ath10k_htt_h2t_aggr_cfg_msg(htt, + status = htt->tx_ops->htt_h2t_aggr_cfg_msg(htt, htt->max_num_ampdu, htt->max_num_amsdu); if (status) { diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 5de693845d31..e9f54b37db6f 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -357,6 +357,13 @@ struct htt_aggr_conf { u8 max_num_amsdu_subframes; } __packed; +struct htt_aggr_conf_v2 { + u8 max_num_ampdu_subframes; + /* amsdu_subframes is limited by 0x1F mask */ + u8 max_num_amsdu_subframes; + u8 reserved; +} __packed; + #define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32 struct htt_mgmt_tx_desc_qca99x0 { __le32 rate; @@ -1621,6 +1628,7 @@ struct htt_cmd { struct htt_stats_req stats_req; struct htt_oob_sync_req oob_sync_req; struct htt_aggr_conf aggr_conf; + struct htt_aggr_conf_v2 aggr_conf_v2; struct htt_frag_desc_bank_cfg32 frag_desc_bank_cfg32; struct htt_frag_desc_bank_cfg64 frag_desc_bank_cfg64; struct htt_tx_fetch_resp tx_fetch_resp; @@ -1859,6 +1867,9 @@ struct ath10k_htt_tx_ops { struct sk_buff *msdu); int (*htt_alloc_txbuff)(struct ath10k_htt *htt); void (*htt_free_txbuff)(struct ath10k_htt *htt); + int (*htt_h2t_aggr_cfg_msg)(struct ath10k_htt *htt, + u8 max_subfrms_ampdu, + u8 max_subfrms_amsdu); }; struct ath10k_htt_rx_ops { diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 62f3bfab405f..ad0692e2de8e 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -981,6 +981,53 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, return 0; } +static int ath10k_htt_h2t_aggr_cfg_msg_v2(struct ath10k_htt *htt, + u8 max_subfrms_ampdu, + u8 max_subfrms_amsdu) +{ + struct ath10k *ar = htt->ar; + struct htt_aggr_conf_v2 *aggr_conf; + struct sk_buff *skb; + struct htt_cmd *cmd; + int len; + int ret; + + /* Firmware defaults are: amsdu = 3 and ampdu = 64 */ + + if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64) + return -EINVAL; + + if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31) + return -EINVAL; + + len = sizeof(cmd->hdr); + len += sizeof(cmd->aggr_conf_v2); + + skb = ath10k_htc_alloc_skb(ar, len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + cmd = (struct htt_cmd *)skb->data; + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG; + + aggr_conf = &cmd->aggr_conf_v2; + aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; + aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", + aggr_conf->max_num_amsdu_subframes, + aggr_conf->max_num_ampdu_subframes); + + ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); + if (ret) { + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + int ath10k_htt_tx_fetch_resp(struct ath10k *ar, __le32 token, __le16 fetch_seq_num, @@ -1359,7 +1406,7 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt, u16 msdu_id, flags1 = 0; u16 freq = 0; dma_addr_t frags_paddr = 0; - u32 txbuf_paddr; + dma_addr_t txbuf_paddr; struct htt_msdu_ext_desc_64 *ext_desc = NULL; struct htt_msdu_ext_desc_64 *ext_desc_t = NULL; @@ -1555,6 +1602,7 @@ static const struct ath10k_htt_tx_ops htt_tx_ops_32 = { .htt_tx = ath10k_htt_tx_32, .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32, .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32, + .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg, }; static const struct ath10k_htt_tx_ops htt_tx_ops_64 = { @@ -1565,6 +1613,7 @@ static const struct ath10k_htt_tx_ops htt_tx_ops_64 = { .htt_tx = ath10k_htt_tx_64, .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64, .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64, + .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_v2, }; void ath10k_htt_set_tx_ops(struct ath10k_htt *htt) diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index 07933da8c84f..c3bb2b947f4c 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -310,18 +310,24 @@ static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = { .wm_high = &wcn3990_dst_wm_high, }; +static struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = { + .shift = 19, + .mask = 0x00080000, + .enable = 0x00000000, +}; + struct ath10k_hw_ce_regs wcn3990_ce_regs = { - .sr_base_addr = 0x00000000, + .sr_base_addr_lo = 0x00000000, + .sr_base_addr_hi = 0x00000004, .sr_size_addr = 0x00000008, - .dr_base_addr = 0x0000000c, + .dr_base_addr_lo = 0x0000000c, + .dr_base_addr_hi = 0x00000010, .dr_size_addr = 0x00000014, .misc_ie_addr = 0x00000034, .sr_wr_index_addr = 0x0000003c, .dst_wr_index_addr = 0x00000040, .current_srri_addr = 0x00000044, .current_drri_addr = 0x00000048, - .ddr_addr_for_rri_low = 0x00000004, - .ddr_addr_for_rri_high = 0x00000008, .ce_rri_low = 0x0024C004, .ce_rri_high = 0x0024C008, .host_ie_addr = 0x0000002c, @@ -331,6 +337,7 @@ struct ath10k_hw_ce_regs wcn3990_ce_regs = { .misc_regs = &wcn3990_misc_reg, .wm_srcr = &wcn3990_wm_src_ring, .wm_dstr = &wcn3990_wm_dst_ring, + .upd = &wcn3990_ctrl1_upd, }; const struct ath10k_hw_values wcn3990_values = { @@ -458,9 +465,9 @@ static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = { }; struct ath10k_hw_ce_regs qcax_ce_regs = { - .sr_base_addr = 0x00000000, + .sr_base_addr_lo = 0x00000000, .sr_size_addr = 0x00000004, - .dr_base_addr = 0x00000008, + .dr_base_addr_lo = 0x00000008, .dr_size_addr = 0x0000000c, .ce_cmd_addr = 0x00000018, .misc_ie_addr = 0x00000034, diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index cfea37299d2b..862da0b8caaf 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -22,6 +23,7 @@ #define ATH10K_FW_DIR "ath10k" +#define QCA988X_2_0_DEVICE_ID_UBNT (0x11ac) #define QCA988X_2_0_DEVICE_ID (0x003c) #define QCA6164_2_1_DEVICE_ID (0x0041) #define QCA6174_2_1_DEVICE_ID (0x003e) @@ -336,10 +338,18 @@ struct ath10k_hw_ce_dst_src_wm_regs { struct ath10k_hw_ce_regs_addr_map *wm_low; struct ath10k_hw_ce_regs_addr_map *wm_high; }; +struct ath10k_hw_ce_ctrl1_upd { + u32 shift; + u32 mask; + u32 enable; +}; + struct ath10k_hw_ce_regs { - u32 sr_base_addr; + u32 sr_base_addr_lo; + u32 sr_base_addr_hi; u32 sr_size_addr; - u32 dr_base_addr; + u32 dr_base_addr_lo; + u32 dr_base_addr_hi; u32 dr_size_addr; u32 ce_cmd_addr; u32 misc_ie_addr; @@ -358,7 +368,9 @@ struct ath10k_hw_ce_regs { struct ath10k_hw_ce_cmd_halt *cmd_halt; struct ath10k_hw_ce_host_ie *host_ie; struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr; - struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr; }; + struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr; + struct ath10k_hw_ce_ctrl1_upd *upd; +}; struct ath10k_hw_values { u32 rtc_state_val_on; @@ -574,6 +586,17 @@ struct ath10k_hw_params { /* target supporting per ce IRQ */ bool per_ce_irq; + + /* target supporting shadow register for ce write */ + bool shadow_reg_support; + + /* target supporting retention restore on ddr */ + bool rri_on_ddr; + + /* targets which require hw filter reset during boot up, + * to avoid it sending spurious acks. + */ + bool hw_filter_reset_required; }; struct htt_rx_desc; diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index cd514c4b22f5..209639477eff 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -58,6 +58,9 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); #define ATH10K_DIAG_TRANSFER_LIMIT 0x5000 static const struct pci_device_id ath10k_pci_id_table[] = { + /* PCI-E QCA988X V2 (Ubiquiti branded) */ + { PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) }, + { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */ @@ -74,6 +77,7 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { * hacks. ath10k doesn't have them and these devices crash horribly * because of that. */ + { QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV }, { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV }, { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, @@ -2195,6 +2199,7 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar) struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); switch (ar_pci->pdev->device) { + case QCA988X_2_0_DEVICE_ID_UBNT: case QCA988X_2_0_DEVICE_ID: case QCA99X0_2_0_DEVICE_ID: case QCA9888_2_0_DEVICE_ID: @@ -3427,6 +3432,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr); switch (pci_dev->device) { + case QCA988X_2_0_DEVICE_ID_UBNT: case QCA988X_2_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA988X; pci_ps = false; diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c index 56cb1831dcdf..664a51831abb 100644 --- a/drivers/net/wireless/ath/ath10k/qmi.c +++ b/drivers/net/wireless/ath/ath10k/qmi.c @@ -95,6 +95,9 @@ static int ath10k_qmi_setup_msa_permissions(struct ath10k_qmi *qmi) int ret; int i; + if (qmi->msa_fixed_perm) + return 0; + for (i = 0; i < qmi->nr_mem_region; i++) { ret = ath10k_qmi_map_msa_permission(qmi, &qmi->mem_region[i]); if (ret) @@ -113,6 +116,9 @@ static void ath10k_qmi_remove_msa_permission(struct ath10k_qmi *qmi) { int i; + if (qmi->msa_fixed_perm) + return; + for (i = 0; i < qmi->nr_mem_region; i++) ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]); } @@ -630,6 +636,51 @@ static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi) return ret; } +int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct wlfw_ini_resp_msg_v01 resp = {}; + struct ath10k_qmi *qmi = ar_snoc->qmi; + struct wlfw_ini_req_msg_v01 req = {}; + struct qmi_txn txn; + int ret; + + req.enablefwlog_valid = 1; + req.enablefwlog = fw_log_mode; + + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_ini_resp_msg_v01_ei, + &resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, + QMI_WLFW_INI_REQ_V01, + WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN, + wlfw_ini_req_msg_v01_ei, &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + ath10k_err(ar, "fail to send fw log reqest: %d\n", ret); + goto out; + } + + ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); + if (ret < 0) + goto out; + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ath10k_err(ar, "fw log request rejectedr: %d\n", + resp.resp.error); + ret = -EINVAL; + goto out; + } + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi fw log request completed, mode: %d\n", + fw_log_mode); + return 0; + +out: + return ret; +} + static int ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi) { @@ -945,6 +996,9 @@ static int ath10k_qmi_setup_msa_resources(struct ath10k_qmi *qmi, u32 msa_size) qmi->msa_mem_size = msa_size; } + if (of_property_read_bool(dev->of_node, "qcom,msa_fixed_perm")) + qmi->msa_fixed_perm = true; + ath10k_dbg(ar, ATH10K_DBG_QMI, "msa pa: %pad , msa va: 0x%p\n", &qmi->msa_pa, qmi->msa_va); diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h index 1efe1d22fc2f..3a039e6e29df 100644 --- a/drivers/net/wireless/ath/ath10k/qmi.h +++ b/drivers/net/wireless/ath/ath10k/qmi.h @@ -115,6 +115,7 @@ struct ath10k_qmi { bool fw_ready; char fw_build_timestamp[MAX_TIMESTAMP_LEN + 1]; struct ath10k_qmi_cal_data cal_data[MAX_NUM_CAL_V01]; + bool msa_fixed_perm; }; int ath10k_qmi_wlan_enable(struct ath10k *ar, @@ -125,5 +126,6 @@ int ath10k_qmi_wlan_disable(struct ath10k *ar); int ath10k_qmi_register_service_notifier(struct notifier_block *nb); int ath10k_qmi_init(struct ath10k *ar, u32 msa_size); int ath10k_qmi_deinit(struct ath10k *ar); +int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode); #endif /* ATH10K_QMI_H */ diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c index ba79c2e4aed6..7ace965e6115 100644 --- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c +++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c @@ -23,7 +23,7 @@ static struct qmi_elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, pipe_num), @@ -32,7 +32,7 @@ static struct qmi_elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(enum wlfw_pipedir_enum_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, pipe_dir), @@ -41,7 +41,7 @@ static struct qmi_elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, nentries), @@ -50,7 +50,7 @@ static struct qmi_elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, nbytes_max), @@ -59,7 +59,7 @@ static struct qmi_elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, flags), @@ -72,7 +72,7 @@ static struct qmi_elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01, service_id), @@ -81,7 +81,7 @@ static struct qmi_elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(enum wlfw_pipedir_enum_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01, pipe_dir), @@ -90,7 +90,7 @@ static struct qmi_elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01, pipe_num), @@ -103,7 +103,7 @@ static struct qmi_elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = { .data_type = QMI_UNSIGNED_2_BYTE, .elem_len = 1, .elem_size = sizeof(u16), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01, id), @@ -112,7 +112,7 @@ static struct qmi_elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = { .data_type = QMI_UNSIGNED_2_BYTE, .elem_len = 1, .elem_size = sizeof(u16), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01, offset), @@ -125,7 +125,7 @@ static struct qmi_elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_shadow_reg_v2_cfg_s_v01, addr), @@ -138,7 +138,7 @@ static struct qmi_elem_info wlfw_memory_region_info_s_v01_ei[] = { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(u64), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_memory_region_info_s_v01, region_addr), @@ -147,7 +147,7 @@ static struct qmi_elem_info wlfw_memory_region_info_s_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_memory_region_info_s_v01, size), @@ -156,7 +156,7 @@ static struct qmi_elem_info wlfw_memory_region_info_s_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_memory_region_info_s_v01, secure_flag), @@ -169,7 +169,7 @@ static struct qmi_elem_info wlfw_mem_cfg_s_v01_ei[] = { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(u64), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_mem_cfg_s_v01, offset), @@ -178,7 +178,7 @@ static struct qmi_elem_info wlfw_mem_cfg_s_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_mem_cfg_s_v01, size), @@ -187,7 +187,7 @@ static struct qmi_elem_info wlfw_mem_cfg_s_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_mem_cfg_s_v01, secure_flag), @@ -200,7 +200,7 @@ static struct qmi_elem_info wlfw_mem_seg_s_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_mem_seg_s_v01, size), @@ -209,7 +209,7 @@ static struct qmi_elem_info wlfw_mem_seg_s_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(enum wlfw_mem_type_enum_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_mem_seg_s_v01, type), @@ -218,7 +218,7 @@ static struct qmi_elem_info wlfw_mem_seg_s_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_mem_seg_s_v01, mem_cfg_len), @@ -227,7 +227,7 @@ static struct qmi_elem_info wlfw_mem_seg_s_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = QMI_WLFW_MAX_NUM_MEM_CFG_V01, .elem_size = sizeof(struct wlfw_mem_cfg_s_v01), - .array_type = VAR_LEN_ARRAY, + .is_array = VAR_LEN_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_mem_seg_s_v01, mem_cfg), @@ -241,7 +241,7 @@ static struct qmi_elem_info wlfw_mem_seg_resp_s_v01_ei[] = { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(u64), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_mem_seg_resp_s_v01, addr), @@ -250,7 +250,7 @@ static struct qmi_elem_info wlfw_mem_seg_resp_s_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_mem_seg_resp_s_v01, size), @@ -259,7 +259,7 @@ static struct qmi_elem_info wlfw_mem_seg_resp_s_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(enum wlfw_mem_type_enum_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_mem_seg_resp_s_v01, type), @@ -272,7 +272,7 @@ static struct qmi_elem_info wlfw_rf_chip_info_s_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_rf_chip_info_s_v01, chip_id), @@ -281,7 +281,7 @@ static struct qmi_elem_info wlfw_rf_chip_info_s_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_rf_chip_info_s_v01, chip_family), @@ -294,7 +294,7 @@ static struct qmi_elem_info wlfw_rf_board_info_s_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_rf_board_info_s_v01, board_id), @@ -307,7 +307,7 @@ static struct qmi_elem_info wlfw_soc_info_s_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_soc_info_s_v01, soc_id), @@ -320,7 +320,7 @@ static struct qmi_elem_info wlfw_fw_version_info_s_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_fw_version_info_s_v01, fw_version), @@ -329,7 +329,7 @@ static struct qmi_elem_info wlfw_fw_version_info_s_v01_ei[] = { .data_type = QMI_STRING, .elem_len = QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1, .elem_size = sizeof(char), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct wlfw_fw_version_info_s_v01, fw_build_timestamp), @@ -342,7 +342,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, fw_ready_enable_valid), @@ -351,7 +351,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, fw_ready_enable), @@ -360,7 +360,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, initiate_cal_download_enable_valid), @@ -369,7 +369,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, initiate_cal_download_enable), @@ -378,7 +378,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, initiate_cal_update_enable_valid), @@ -387,7 +387,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, initiate_cal_update_enable), @@ -396,7 +396,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, msa_ready_enable_valid), @@ -405,7 +405,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, msa_ready_enable), @@ -414,7 +414,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, pin_connect_result_enable_valid), @@ -423,7 +423,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, pin_connect_result_enable), @@ -432,7 +432,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, client_id_valid), @@ -441,7 +441,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, client_id), @@ -450,7 +450,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, request_mem_enable_valid), @@ -459,7 +459,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, request_mem_enable), @@ -468,7 +468,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, mem_ready_enable_valid), @@ -477,7 +477,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, mem_ready_enable), @@ -486,7 +486,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, fw_init_done_enable_valid), @@ -495,7 +495,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, fw_init_done_enable), @@ -504,7 +504,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, rejuvenate_enable_valid), @@ -513,7 +513,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, rejuvenate_enable), @@ -522,7 +522,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x1A, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, xo_cal_enable_valid), @@ -531,7 +531,7 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x1A, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, xo_cal_enable), @@ -544,7 +544,7 @@ struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_ind_register_resp_msg_v01, resp), @@ -554,7 +554,7 @@ struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_ind_register_resp_msg_v01, fw_status_valid), @@ -563,7 +563,7 @@ struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(u64), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_ind_register_resp_msg_v01, fw_status), @@ -584,7 +584,7 @@ struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01, pwr_pin_result_valid), @@ -593,7 +593,7 @@ struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01, pwr_pin_result), @@ -602,7 +602,7 @@ struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01, phy_io_pin_result_valid), @@ -611,7 +611,7 @@ struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01, phy_io_pin_result), @@ -620,7 +620,7 @@ struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01, rf_pin_result_valid), @@ -629,7 +629,7 @@ struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01, rf_pin_result), @@ -642,7 +642,7 @@ struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(enum wlfw_driver_mode_enum_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01, mode), @@ -651,7 +651,7 @@ struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01, hw_debug_valid), @@ -660,7 +660,7 @@ struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01, hw_debug), @@ -673,7 +673,7 @@ struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_wlan_mode_resp_msg_v01, resp), @@ -687,7 +687,7 @@ struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, host_version_valid), @@ -696,7 +696,7 @@ struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = { .data_type = QMI_STRING, .elem_len = QMI_WLFW_MAX_STR_LEN_V01 + 1, .elem_size = sizeof(char), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, host_version), @@ -705,7 +705,7 @@ struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, tgt_cfg_valid), @@ -714,7 +714,7 @@ struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, tgt_cfg_len), @@ -723,7 +723,7 @@ struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = QMI_WLFW_MAX_NUM_CE_V01, .elem_size = sizeof(struct wlfw_ce_tgt_pipe_cfg_s_v01), - .array_type = VAR_LEN_ARRAY, + .is_array = VAR_LEN_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, tgt_cfg), @@ -733,7 +733,7 @@ struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, svc_cfg_valid), @@ -742,7 +742,7 @@ struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, svc_cfg_len), @@ -751,7 +751,7 @@ struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = QMI_WLFW_MAX_NUM_SVC_V01, .elem_size = sizeof(struct wlfw_ce_svc_pipe_cfg_s_v01), - .array_type = VAR_LEN_ARRAY, + .is_array = VAR_LEN_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, svc_cfg), @@ -761,7 +761,7 @@ struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, shadow_reg_valid), @@ -770,7 +770,7 @@ struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, shadow_reg_len), @@ -779,7 +779,7 @@ struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01, .elem_size = sizeof(struct wlfw_shadow_reg_cfg_s_v01), - .array_type = VAR_LEN_ARRAY, + .is_array = VAR_LEN_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, shadow_reg), @@ -789,7 +789,7 @@ struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, shadow_reg_v2_valid), @@ -798,7 +798,7 @@ struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, shadow_reg_v2_len), @@ -807,7 +807,7 @@ struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = QMI_WLFW_MAX_SHADOW_REG_V2, .elem_size = sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01), - .array_type = VAR_LEN_ARRAY, + .is_array = VAR_LEN_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, shadow_reg_v2), @@ -821,7 +821,7 @@ struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_wlan_cfg_resp_msg_v01, resp), @@ -839,7 +839,7 @@ struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_cap_resp_msg_v01, resp), @@ -849,7 +849,7 @@ struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_cap_resp_msg_v01, chip_info_valid), @@ -858,7 +858,7 @@ struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct wlfw_rf_chip_info_s_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_cap_resp_msg_v01, chip_info), @@ -868,7 +868,7 @@ struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct wlfw_cap_resp_msg_v01, board_info_valid), @@ -877,7 +877,7 @@ struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct wlfw_rf_board_info_s_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct wlfw_cap_resp_msg_v01, board_info), @@ -887,7 +887,7 @@ struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct wlfw_cap_resp_msg_v01, soc_info_valid), @@ -896,7 +896,7 @@ struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct wlfw_soc_info_s_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct wlfw_cap_resp_msg_v01, soc_info), @@ -906,7 +906,7 @@ struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct wlfw_cap_resp_msg_v01, fw_version_info_valid), @@ -915,7 +915,7 @@ struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct wlfw_fw_version_info_s_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct wlfw_cap_resp_msg_v01, fw_version_info), @@ -925,7 +925,7 @@ struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct wlfw_cap_resp_msg_v01, fw_build_id_valid), @@ -934,7 +934,7 @@ struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = { .data_type = QMI_STRING, .elem_len = QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1, .elem_size = sizeof(char), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct wlfw_cap_resp_msg_v01, fw_build_id), @@ -943,7 +943,7 @@ struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct wlfw_cap_resp_msg_v01, num_macs_valid), @@ -952,7 +952,7 @@ struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct wlfw_cap_resp_msg_v01, num_macs), @@ -965,7 +965,7 @@ struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, valid), @@ -974,7 +974,7 @@ struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, file_id_valid), @@ -983,7 +983,7 @@ struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, file_id), @@ -992,7 +992,7 @@ struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, total_size_valid), @@ -1001,7 +1001,7 @@ struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, total_size), @@ -1010,7 +1010,7 @@ struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, seg_id_valid), @@ -1019,7 +1019,7 @@ struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, seg_id), @@ -1028,7 +1028,7 @@ struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, data_valid), @@ -1037,7 +1037,7 @@ struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u16), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, data_len), @@ -1046,7 +1046,7 @@ struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01, .elem_size = sizeof(u8), - .array_type = VAR_LEN_ARRAY, + .is_array = VAR_LEN_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, data), @@ -1055,7 +1055,7 @@ struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, end_valid), @@ -1064,7 +1064,7 @@ struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, end), @@ -1073,7 +1073,7 @@ struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, bdf_type_valid), @@ -1082,7 +1082,7 @@ struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, bdf_type), @@ -1095,7 +1095,7 @@ struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_bdf_download_resp_msg_v01, resp), @@ -1109,7 +1109,7 @@ struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_cal_report_req_msg_v01, meta_data_len), @@ -1118,7 +1118,7 @@ struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = QMI_WLFW_MAX_NUM_CAL_V01, .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), - .array_type = VAR_LEN_ARRAY, + .is_array = VAR_LEN_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_cal_report_req_msg_v01, meta_data), @@ -1127,7 +1127,7 @@ struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_cal_report_req_msg_v01, xo_cal_data_valid), @@ -1136,7 +1136,7 @@ struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_cal_report_req_msg_v01, xo_cal_data), @@ -1149,7 +1149,7 @@ struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_cal_report_resp_msg_v01, resp), @@ -1163,7 +1163,7 @@ struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_initiate_cal_download_ind_msg_v01, cal_id), @@ -1176,7 +1176,7 @@ struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_cal_download_req_msg_v01, valid), @@ -1185,7 +1185,7 @@ struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_cal_download_req_msg_v01, file_id_valid), @@ -1194,7 +1194,7 @@ struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_cal_download_req_msg_v01, file_id), @@ -1203,7 +1203,7 @@ struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct wlfw_cal_download_req_msg_v01, total_size_valid), @@ -1212,7 +1212,7 @@ struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct wlfw_cal_download_req_msg_v01, total_size), @@ -1221,7 +1221,7 @@ struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct wlfw_cal_download_req_msg_v01, seg_id_valid), @@ -1230,7 +1230,7 @@ struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct wlfw_cal_download_req_msg_v01, seg_id), @@ -1239,7 +1239,7 @@ struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct wlfw_cal_download_req_msg_v01, data_valid), @@ -1248,7 +1248,7 @@ struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u16), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct wlfw_cal_download_req_msg_v01, data_len), @@ -1257,7 +1257,7 @@ struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01, .elem_size = sizeof(u8), - .array_type = VAR_LEN_ARRAY, + .is_array = VAR_LEN_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct wlfw_cal_download_req_msg_v01, data), @@ -1266,7 +1266,7 @@ struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct wlfw_cal_download_req_msg_v01, end_valid), @@ -1275,7 +1275,7 @@ struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct wlfw_cal_download_req_msg_v01, end), @@ -1288,7 +1288,7 @@ struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_cal_download_resp_msg_v01, resp), @@ -1302,7 +1302,7 @@ struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_initiate_cal_update_ind_msg_v01, cal_id), @@ -1311,7 +1311,7 @@ struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_initiate_cal_update_ind_msg_v01, total_size), @@ -1324,7 +1324,7 @@ struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_cal_update_req_msg_v01, cal_id), @@ -1333,7 +1333,7 @@ struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_cal_update_req_msg_v01, seg_id), @@ -1346,7 +1346,7 @@ struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, resp), @@ -1356,7 +1356,7 @@ struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, file_id_valid), @@ -1365,7 +1365,7 @@ struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, file_id), @@ -1374,7 +1374,7 @@ struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, total_size_valid), @@ -1383,7 +1383,7 @@ struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, total_size), @@ -1392,7 +1392,7 @@ struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, seg_id_valid), @@ -1401,7 +1401,7 @@ struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, seg_id), @@ -1410,7 +1410,7 @@ struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, data_valid), @@ -1419,7 +1419,7 @@ struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u16), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, data_len), @@ -1428,7 +1428,7 @@ struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01, .elem_size = sizeof(u8), - .array_type = VAR_LEN_ARRAY, + .is_array = VAR_LEN_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, data), @@ -1437,7 +1437,7 @@ struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, end_valid), @@ -1446,7 +1446,7 @@ struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, end), @@ -1459,7 +1459,7 @@ struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(u64), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_msa_info_req_msg_v01, msa_addr), @@ -1468,7 +1468,7 @@ struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_msa_info_req_msg_v01, size), @@ -1481,7 +1481,7 @@ struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_msa_info_resp_msg_v01, resp), @@ -1491,7 +1491,7 @@ struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x03, .offset = offsetof(struct wlfw_msa_info_resp_msg_v01, mem_region_info_len), @@ -1500,7 +1500,7 @@ struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = QMI_WLFW_MAX_MEM_REG_V01, .elem_size = sizeof(struct wlfw_memory_region_info_s_v01), - .array_type = VAR_LEN_ARRAY, + .is_array = VAR_LEN_ARRAY, .tlv_type = 0x03, .offset = offsetof(struct wlfw_msa_info_resp_msg_v01, mem_region_info), @@ -1518,7 +1518,7 @@ struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_msa_ready_resp_msg_v01, resp), @@ -1532,7 +1532,7 @@ struct qmi_elem_info wlfw_ini_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_ini_req_msg_v01, enablefwlog_valid), @@ -1541,7 +1541,7 @@ struct qmi_elem_info wlfw_ini_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_ini_req_msg_v01, enablefwlog), @@ -1554,7 +1554,7 @@ struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_ini_resp_msg_v01, resp), @@ -1568,7 +1568,7 @@ struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01, offset), @@ -1577,7 +1577,7 @@ struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01, mem_type), @@ -1586,7 +1586,7 @@ struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x03, .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01, data_len), @@ -1599,7 +1599,7 @@ struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01, resp), @@ -1609,7 +1609,7 @@ struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01, data_valid), @@ -1618,7 +1618,7 @@ struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u16), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01, data_len), @@ -1627,7 +1627,7 @@ struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01, .elem_size = sizeof(u8), - .array_type = VAR_LEN_ARRAY, + .is_array = VAR_LEN_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01, data), @@ -1640,7 +1640,7 @@ struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01, offset), @@ -1649,7 +1649,7 @@ struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01, mem_type), @@ -1658,7 +1658,7 @@ struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u16), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x03, .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01, data_len), @@ -1667,7 +1667,7 @@ struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01, .elem_size = sizeof(u8), - .array_type = VAR_LEN_ARRAY, + .is_array = VAR_LEN_ARRAY, .tlv_type = 0x03, .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01, data), @@ -1680,7 +1680,7 @@ struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_athdiag_write_resp_msg_v01, resp), @@ -1694,7 +1694,7 @@ struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(u64), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_vbatt_req_msg_v01, voltage_uv), @@ -1707,7 +1707,7 @@ struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_vbatt_resp_msg_v01, resp), @@ -1721,7 +1721,7 @@ struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_mac_addr_req_msg_v01, mac_addr_valid), @@ -1730,7 +1730,7 @@ struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = QMI_WLFW_MAC_ADDR_SIZE_V01, .elem_size = sizeof(u8), - .array_type = STATIC_ARRAY, + .is_array = STATIC_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_mac_addr_req_msg_v01, mac_addr), @@ -1743,7 +1743,7 @@ struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_mac_addr_resp_msg_v01, resp), @@ -1757,20 +1757,245 @@ struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_host_cap_req_msg_v01, daemon_support_valid), }, { - .data_type = QMI_UNSIGNED_1_BYTE, + .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, - .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_host_cap_req_msg_v01, daemon_support), }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + wake_msi_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + wake_msi), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + gpios_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + gpios_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_WLFW_MAX_NUM_GPIO_V01, + .elem_size = sizeof(u32), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + gpios), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + nm_modem_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + nm_modem), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + bdf_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + bdf_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + bdf_cache_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + bdf_cache_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + m3_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + m3_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + m3_cache_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + m3_cache_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_filesys_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_filesys_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_cache_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_cache_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_done_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_done), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + mem_bucket_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + mem_bucket), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + mem_cfg_mode_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + mem_cfg_mode), + }, {} }; @@ -1779,7 +2004,7 @@ struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_host_cap_resp_msg_v01, resp), @@ -1793,7 +2018,7 @@ struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_request_mem_ind_msg_v01, mem_seg_len), @@ -1802,7 +2027,7 @@ struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01, .elem_size = sizeof(struct wlfw_mem_seg_s_v01), - .array_type = VAR_LEN_ARRAY, + .is_array = VAR_LEN_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_request_mem_ind_msg_v01, mem_seg), @@ -1816,7 +2041,7 @@ struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_respond_mem_req_msg_v01, mem_seg_len), @@ -1825,7 +2050,7 @@ struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01, .elem_size = sizeof(struct wlfw_mem_seg_resp_s_v01), - .array_type = VAR_LEN_ARRAY, + .is_array = VAR_LEN_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_respond_mem_req_msg_v01, mem_seg), @@ -1839,7 +2064,7 @@ struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_respond_mem_resp_msg_v01, resp), @@ -1861,7 +2086,7 @@ struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, cause_for_rejuvenation_valid), @@ -1870,7 +2095,7 @@ struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, cause_for_rejuvenation), @@ -1879,7 +2104,7 @@ struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, requesting_sub_system_valid), @@ -1888,7 +2113,7 @@ struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, requesting_sub_system), @@ -1897,7 +2122,7 @@ struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, line_number_valid), @@ -1906,7 +2131,7 @@ struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_2_BYTE, .elem_len = 1, .elem_size = sizeof(u16), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, line_number), @@ -1915,7 +2140,7 @@ struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, function_name_valid), @@ -1924,7 +2149,7 @@ struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = { .data_type = QMI_STRING, .elem_len = QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1, .elem_size = sizeof(char), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, function_name), @@ -1941,7 +2166,7 @@ struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_rejuvenate_ack_resp_msg_v01, resp), @@ -1955,7 +2180,7 @@ struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_dynamic_feature_mask_req_msg_v01, mask_valid), @@ -1964,7 +2189,7 @@ struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(u64), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_dynamic_feature_mask_req_msg_v01, mask), @@ -1977,7 +2202,7 @@ struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01, resp), @@ -1987,7 +2212,7 @@ struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01, prev_mask_valid), @@ -1996,7 +2221,7 @@ struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(u64), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01, prev_mask), @@ -2005,7 +2230,7 @@ struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01, curr_mask_valid), @@ -2014,7 +2239,7 @@ struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(u64), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01, curr_mask), @@ -2027,7 +2252,7 @@ struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(u64), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_m3_info_req_msg_v01, addr), @@ -2036,7 +2261,7 @@ struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_m3_info_req_msg_v01, size), @@ -2049,7 +2274,7 @@ struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct wlfw_m3_info_resp_msg_v01, resp), @@ -2063,7 +2288,7 @@ struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), - .array_type = NO_ARRAY, + .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_xo_cal_ind_msg_v01, xo_cal_data), diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h index c5e3870b8871..ff668f5d8afd 100644 --- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h +++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h @@ -553,12 +553,38 @@ struct wlfw_mac_addr_resp_msg_v01 { #define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7 extern struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[]; +#define QMI_WLFW_MAX_NUM_GPIO_V01 32 struct wlfw_host_cap_req_msg_v01 { u8 daemon_support_valid; - u8 daemon_support; -}; - -#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 4 + u32 daemon_support; + u8 wake_msi_valid; + u32 wake_msi; + u8 gpios_valid; + u32 gpios_len; + u32 gpios[QMI_WLFW_MAX_NUM_GPIO_V01]; + u8 nm_modem_valid; + u8 nm_modem; + u8 bdf_support_valid; + u8 bdf_support; + u8 bdf_cache_support_valid; + u8 bdf_cache_support; + u8 m3_support_valid; + u8 m3_support; + u8 m3_cache_support_valid; + u8 m3_cache_support; + u8 cal_filesys_support_valid; + u8 cal_filesys_support; + u8 cal_cache_support_valid; + u8 cal_cache_support; + u8 cal_done_valid; + u8 cal_done; + u8 mem_bucket_valid; + u32 mem_bucket; + u8 mem_cfg_mode_valid; + u8 mem_cfg_mode; +}; + +#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 189 extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[]; struct wlfw_host_cap_resp_msg_v01 { diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 7bca714ef4b4..f704760d27b0 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -21,6 +21,9 @@ #include #include #include +#include +#include +#include #include "ce.h" #include "debug.h" @@ -30,6 +33,7 @@ #define ATH10K_SNOC_RX_POST_RETRY_MS 50 #define CE_POLL_PIPE 4 +#define ATH10K_SNOC_WAKE_IRQ 2 static char *const ce_name[] = { "WLAN_CE_0", @@ -65,7 +69,7 @@ static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); static const struct ath10k_snoc_drv_priv drv_priv = { .hw_rev = ATH10K_HW_WCN3990, - .dma_mask = DMA_BIT_MASK(37), + .dma_mask = DMA_BIT_MASK(35), .msa_size = 0x100000, }; @@ -992,6 +996,7 @@ static void ath10k_snoc_hif_power_down(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); ath10k_snoc_wlan_disable(ar); + ath10k_ce_free_rri(ar); } static int ath10k_snoc_hif_power_up(struct ath10k *ar) @@ -1007,6 +1012,8 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar) return ret; } + ath10k_ce_alloc_rri(ar); + ret = ath10k_snoc_init_pipes(ar); if (ret) { ath10k_err(ar, "failed to initialize CE: %d\n", ret); @@ -1022,6 +1029,59 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar) return ret; } +static int ath10k_snoc_hif_set_target_log_mode(struct ath10k *ar, + u8 fw_log_mode) +{ + u8 fw_dbg_mode; + + if (fw_log_mode) + fw_dbg_mode = ATH10K_ENABLE_FW_LOG_CE; + else + fw_dbg_mode = ATH10K_ENABLE_FW_LOG_DIAG; + + return ath10k_qmi_set_fw_log_mode(ar, fw_dbg_mode); +} + +#ifdef CONFIG_PM +static int ath10k_snoc_hif_suspend(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int ret; + + if (!device_may_wakeup(ar->dev)) + return -EPERM; + + ret = enable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line); + if (ret) { + ath10k_err(ar, "failed to enable wakeup irq :%d\n", ret); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device suspended\n"); + + return ret; +} + +static int ath10k_snoc_hif_resume(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int ret; + + if (!device_may_wakeup(ar->dev)) + return -EPERM; + + ret = disable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line); + if (ret) { + ath10k_err(ar, "failed to disable wakeup irq: %d\n", ret); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device resumed\n"); + + return ret; +} +#endif + static const struct ath10k_hif_ops ath10k_snoc_hif_ops = { .read32 = ath10k_snoc_read32, .write32 = ath10k_snoc_write32, @@ -1035,6 +1095,11 @@ static const struct ath10k_hif_ops ath10k_snoc_hif_ops = { .send_complete_check = ath10k_snoc_hif_send_complete_check, .get_free_queue_number = ath10k_snoc_hif_get_free_queue_number, .get_target_info = ath10k_snoc_hif_get_target_info, + .set_target_log_mode = ath10k_snoc_hif_set_target_log_mode, +#ifdef CONFIG_PM + .suspend = ath10k_snoc_hif_suspend, + .resume = ath10k_snoc_hif_resume, +#endif }; static const struct ath10k_bus_ops ath10k_snoc_bus_ops = { @@ -1504,6 +1569,88 @@ static int ath10k_hw_power_off(struct ath10k *ar) return ret; } +static int ath10k_smmu_attach(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct dma_iommu_mapping *mapping; + struct platform_device *pdev; + int ret = 0; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Initializing SMMU\n"); + + pdev = ar_snoc->dev; + mapping = arm_iommu_create_mapping(&platform_bus_type, + ar_snoc->smmu_iova_start, + ar_snoc->smmu_iova_len); + if (IS_ERR(mapping)) { + ath10k_err(ar, "create mapping failed, err = %d\n", ret); + ret = PTR_ERR(mapping); + goto map_fail; + } + + ret = arm_iommu_attach_device(&pdev->dev, mapping); + if (ret < 0 && ret != -EEXIST) { + ath10k_err(ar, "iommu attach device failed, err = %d\n", ret); + goto attach_fail; + } else if (ret == -EEXIST) { + ret = 0; + } + + ar_snoc->smmu_mapping = mapping; + + return ret; + +attach_fail: + arm_iommu_release_mapping(mapping); +map_fail: + return ret; +} + +static void ath10k_smmu_deinit(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct platform_device *pdev; + + pdev = ar_snoc->dev; + + if (!ar_snoc->smmu_mapping) + return; + + arm_iommu_detach_device(&pdev->dev); + arm_iommu_release_mapping(ar_snoc->smmu_mapping); + + ar_snoc->smmu_mapping = NULL; +} + +static int ath10k_smmu_init(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct platform_device *pdev; + struct resource *res; + int ret = 0; + + pdev = ar_snoc->dev; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "smmu_iova_base"); + if (!res) { + ath10k_err(ar, "SMMU iova base not found\n"); + } else { + ar_snoc->smmu_iova_start = res->start; + ar_snoc->smmu_iova_len = resource_size(res); + ath10k_dbg(ar, ATH10K_DBG_SNOC, "SMMU iova start: %pa, len: %zu\n", + &ar_snoc->smmu_iova_start, ar_snoc->smmu_iova_len); + + ret = ath10k_smmu_attach(ar); + if (ret < 0) { + ath10k_err(ar, "SMMU init failed, err = %d, start: %pad, len: %zx\n", + ret, &ar_snoc->smmu_iova_start, + ar_snoc->smmu_iova_len); + } + } + + return ret; +} + static const struct of_device_id ath10k_snoc_dt_match[] = { { .compatible = "qcom,wcn3990-wifi", .data = &drv_priv, @@ -1553,16 +1700,22 @@ static int ath10k_snoc_probe(struct platform_device *pdev) ar->ce_priv = &ar_snoc->ce; msa_size = drv_data->msa_size; + ret = ath10k_smmu_init(ar); + if (ret) { + ath10k_warn(ar, "failed to int SMMU: %d\n", ret); + goto err_core_destroy; + } + ret = ath10k_snoc_resource_init(ar); if (ret) { ath10k_warn(ar, "failed to initialize resource: %d\n", ret); - goto err_core_destroy; + goto err_smmu_deinit; } ret = ath10k_snoc_setup_resource(ar); if (ret) { ath10k_warn(ar, "failed to setup resource: %d\n", ret); - goto err_core_destroy; + goto err_smmu_deinit; } ret = ath10k_snoc_request_irq(ar); if (ret) { @@ -1607,6 +1760,9 @@ static int ath10k_snoc_probe(struct platform_device *pdev) err_release_resource: ath10k_snoc_release_resource(ar); +err_smmu_deinit: + ath10k_smmu_deinit(ar); + err_core_destroy: ath10k_core_destroy(ar); @@ -1620,6 +1776,7 @@ static int ath10k_snoc_remove(struct platform_device *pdev) ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n"); ath10k_core_unregister(ar); ath10k_hw_power_off(ar); + ath10k_smmu_deinit(ar); ath10k_snoc_free_irq(ar); ath10k_snoc_release_resource(ar); ath10k_qmi_deinit(ar); diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h index e1d2d6675556..524c2dfa0024 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.h +++ b/drivers/net/wireless/ath/ath10k/snoc.h @@ -84,6 +84,9 @@ struct ath10k_snoc { struct ath10k_wcn3990_vreg_info *vreg; struct ath10k_wcn3990_clk_info *clk; struct ath10k_qmi *qmi; + struct dma_iommu_mapping *smmu_mapping; + dma_addr_t smmu_iova_start; + size_t smmu_iova_len; }; static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 78996a9b0057..dc1061f10c97 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -1423,17 +1423,50 @@ ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id, return skb; } +static void +ath10k_wmi_tlv_put_host_mem_chunks(struct ath10k *ar, void *host_mem_chunks) +{ + struct host_memory_chunk *chunk; + struct wmi_tlv *tlv; + int i; + __le16 tlv_len, tlv_tag; + + tlv_tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK); + tlv_len = __cpu_to_le16(sizeof(*chunk)); + for (i = 0; i < ar->wmi.num_mem_chunks; i++) { + tlv = host_mem_chunks; + tlv->tag = tlv_tag; + tlv->len = tlv_len; + chunk = (void *)tlv->value; + + chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); + chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len); + chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi-tlv chunk %d len %d, addr 0x%llx, id 0x%x\n", + i, + ar->wmi.mem_chunks[i].len, + (unsigned long long)ar->wmi.mem_chunks[i].paddr, + ar->wmi.mem_chunks[i].req_id); + + host_mem_chunks += sizeof(*tlv); + host_mem_chunks += sizeof(*chunk); + } +} + static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) { struct sk_buff *skb; struct wmi_tlv *tlv; struct wmi_tlv_init_cmd *cmd; struct wmi_tlv_resource_config *cfg; - struct wmi_host_mem_chunks *chunks; + void *chunks; size_t len, chunks_len; void *ptr; - chunks_len = ar->wmi.num_mem_chunks * sizeof(struct host_memory_chunk); + chunks_len = ar->wmi.num_mem_chunks * + (sizeof(struct host_memory_chunk) + sizeof(*tlv)); len = (sizeof(*tlv) + sizeof(*cmd)) + (sizeof(*tlv) + sizeof(*cfg)) + (sizeof(*tlv) + chunks_len); @@ -1527,7 +1560,7 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) cfg->num_ocb_schedules = __cpu_to_le32(0); cfg->host_capab = __cpu_to_le32(0); - ath10k_wmi_put_host_mem_chunks(ar, chunks); + ath10k_wmi_tlv_put_host_mem_chunks(ar, chunks); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n"); return skb; diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c index 64b5efb9d531..e0569f25982a 100644 --- a/drivers/pci/host/pci-msm.c +++ b/drivers/pci/host/pci-msm.c @@ -3700,7 +3700,7 @@ static int msm_pcie_get_resources(struct msm_pcie_dev_t *dev, of_property_read_u32_array(pdev->dev.of_node, "qcom,bw-scale", (u32 *)dev->bw_scale, size / sizeof(u32)); - dev->bw_gen_max = size / sizeof(u32); + dev->bw_gen_max = size / sizeof(*dev->bw_scale); } else { PCIE_DBG(dev, "RC%d: bandwidth scaling is not supported\n", dev->rc_idx); @@ -6394,6 +6394,15 @@ int msm_pcie_set_link_bandwidth(struct pci_dev *pci_dev, u16 target_link_speed, pcie_dev = PCIE_BUS_PRIV_DATA(root_pci_dev->bus); + if (target_link_speed > pcie_dev->bw_gen_max || + (pcie_dev->target_link_speed && + target_link_speed > pcie_dev->target_link_speed)) { + PCIE_DBG(pcie_dev, + "PCIe: RC%d: invalid target link speed: %d\n", + pcie_dev->rc_idx, target_link_speed); + return -EINVAL; + } + pcie_capability_read_word(root_pci_dev, PCI_EXP_LNKSTA, &link_status); current_link_speed = link_status & PCI_EXP_LNKSTA_CLS; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index da790f26d295..11e5a14b6470 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4641,6 +4641,7 @@ static const struct pci_dev_acs_enabled { /* QCOM QDF2xxx root ports */ { 0x17cb, 0x400, pci_quirk_qcom_rp_acs }, { 0x17cb, 0x401, pci_quirk_qcom_rp_acs }, + { 0x17cb, 0x10c, pci_quirk_qcom_rp_acs }, /* Intel PCH root ports */ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs }, { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs }, diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_com.h b/drivers/platform/msm/ep_pcie/ep_pcie_com.h index d4552f5dd4f4..0006f4ba2567 100644 --- a/drivers/platform/msm/ep_pcie/ep_pcie_com.h +++ b/drivers/platform/msm/ep_pcie/ep_pcie_com.h @@ -406,7 +406,6 @@ struct ep_pcie_dev_t { bool config_mmio_init; bool enumerated; enum ep_pcie_link_status link_status; - bool perst_deast; bool power_on; bool suspending; bool l23_ready; @@ -414,6 +413,8 @@ struct ep_pcie_dev_t { struct ep_pcie_msi_config msi_cfg; bool no_notify; bool client_ready; + atomic_t ep_pcie_dev_wake; + atomic_t perst_deast; struct ep_pcie_register_event *event_reg; struct work_struct handle_perst_work; diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_core.c b/drivers/platform/msm/ep_pcie/ep_pcie_core.c index 09b1ade55292..864c44ad2ff9 100644 --- a/drivers/platform/msm/ep_pcie/ep_pcie_core.c +++ b/drivers/platform/msm/ep_pcie/ep_pcie_core.c @@ -1804,7 +1804,7 @@ int ep_pcie_core_enable_endpoint(enum ep_pcie_options opt) ret = EP_PCIE_ERROR; goto link_fail; } else { - dev->perst_deast = true; + atomic_set(&dev->perst_deast, 1); if (opt & EP_PCIE_OPT_AST_WAKE) { /* deassert PCIe WAKE# */ EP_PCIE_DBG(dev, @@ -1967,11 +1967,19 @@ int ep_pcie_core_enable_endpoint(enum ep_pcie_options opt) int ep_pcie_core_disable_endpoint(void) { int rc = 0; + u32 val = 0; + unsigned long irqsave_flags; struct ep_pcie_dev_t *dev = &ep_pcie_dev; EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev); mutex_lock(&dev->setup_mtx); + if (atomic_read(&dev->perst_deast)) { + EP_PCIE_DBG(dev, + "PCIe V%d: PERST is de-asserted, exiting disable\n", + dev->rev); + goto out; + } if (!dev->power_on) { EP_PCIE_DBG(dev, @@ -1988,9 +1996,25 @@ int ep_pcie_core_disable_endpoint(void) dev->rev); } + val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS); + EP_PCIE_DBG(dev, "PCIe V%d: LTSSM_STATE during disable:0x%x\n", + dev->rev, (val >> 0xC) & 0x3f); ep_pcie_pipe_clk_deinit(dev); ep_pcie_clk_deinit(dev); ep_pcie_vreg_deinit(dev); + + spin_lock_irqsave(&dev->isr_lock, irqsave_flags); + if (atomic_read(&dev->ep_pcie_dev_wake) && + !atomic_read(&dev->perst_deast)) { + EP_PCIE_DBG(dev, "PCIe V%d: Released wakelock\n", dev->rev); + atomic_set(&dev->ep_pcie_dev_wake, 0); + pm_relax(&dev->pdev->dev); + } else { + EP_PCIE_DBG(dev, "PCIe V%d: Bail, Perst-assert:%d wake:%d\n", + dev->rev, atomic_read(&dev->perst_deast), + atomic_read(&dev->ep_pcie_dev_wake)); + } + spin_unlock_irqrestore(&dev->isr_lock, irqsave_flags); out: mutex_unlock(&dev->setup_mtx); return rc; @@ -2247,12 +2271,25 @@ static void handle_d3cold_func(struct work_struct *work) { struct ep_pcie_dev_t *dev = container_of(work, struct ep_pcie_dev_t, handle_d3cold_work); + unsigned long irqsave_flags; EP_PCIE_DBG(dev, "PCIe V%d: shutdown PCIe link due to PERST assertion before BME is set\n", dev->rev); ep_pcie_core_disable_endpoint(); dev->no_notify = false; + spin_lock_irqsave(&dev->isr_lock, irqsave_flags); + if (atomic_read(&dev->ep_pcie_dev_wake) && + !atomic_read(&dev->perst_deast)) { + atomic_set(&dev->ep_pcie_dev_wake, 0); + pm_relax(&dev->pdev->dev); + EP_PCIE_DBG(dev, "PCIe V%d: Released wakelock\n", dev->rev); + } else { + EP_PCIE_DBG(dev, "PCIe V%d: Bail, Perst-assert:%d wake:%d\n", + dev->rev, atomic_read(&dev->perst_deast), + atomic_read(&dev->ep_pcie_dev_wake)); + } + spin_unlock_irqrestore(&dev->isr_lock, irqsave_flags); } static void handle_bme_func(struct work_struct *work) @@ -2290,14 +2327,24 @@ static irqreturn_t ep_pcie_handle_perst_irq(int irq, void *data) } if (perst) { - dev->perst_deast = true; + atomic_set(&dev->perst_deast, 1); dev->perst_deast_counter++; + /* + * Hold a wakelock to avoid missing BME and other + * interrupts if apps goes into suspend before BME is set. + */ + if (!atomic_read(&dev->ep_pcie_dev_wake)) { + pm_stay_awake(&dev->pdev->dev); + atomic_set(&dev->ep_pcie_dev_wake, 1); + EP_PCIE_DBG(dev, "PCIe V%d: Acquired wakelock\n", + dev->rev); + } EP_PCIE_DBG(dev, "PCIe V%d: No. %ld PERST deassertion\n", dev->rev, dev->perst_deast_counter); ep_pcie_notify_event(dev, EP_PCIE_EVENT_PM_RST_DEAST); } else { - dev->perst_deast = false; + atomic_set(&dev->perst_deast, 0); dev->perst_ast_counter++; EP_PCIE_DBG(dev, "PCIe V%d: No. %ld PERST assertion\n", @@ -2548,13 +2595,15 @@ int32_t ep_pcie_irq_init(struct ep_pcie_dev_t *dev) * based on the next expected level of the gpio */ if (gpio_get_value(dev->gpio[EP_PCIE_GPIO_PERST].num) == 1) - dev->perst_deast = true; + atomic_set(&dev->perst_deast, 1); /* register handler for PERST interrupt */ perst_irq = gpio_to_irq(dev->gpio[EP_PCIE_GPIO_PERST].num); ret = devm_request_irq(pdev, perst_irq, ep_pcie_handle_perst_irq, - (dev->perst_deast ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH), + ((atomic_read(&dev->perst_deast) ? + IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH) + | IRQF_EARLY_RESUME), "ep_pcie_perst", dev); if (ret) { EP_PCIE_ERR(dev, @@ -2948,7 +2997,7 @@ static int ep_pcie_core_wakeup_host(enum ep_pcie_event event) if (event == EP_PCIE_EVENT_PM_D3_HOT) ep_pcie_core_issue_inband_pme(); - if (dev->perst_deast && !dev->l23_ready) { + if (atomic_read(&dev->perst_deast) && !dev->l23_ready) { EP_PCIE_ERR(dev, "PCIe V%d: request to assert WAKE# when PERST is de-asserted and D3hot is not received\n", dev->rev); @@ -2959,7 +3008,7 @@ static int ep_pcie_core_wakeup_host(enum ep_pcie_event event) EP_PCIE_DBG(dev, "PCIe V%d: No. %ld to assert PCIe WAKE#; perst is %s de-asserted; D3hot is %s received\n", dev->rev, dev->wake_counter, - dev->perst_deast ? "" : "not", + atomic_read(&dev->perst_deast) ? "" : "not", dev->l23_ready ? "" : "not"); /* * Assert WAKE# GPIO until link is back to L0. @@ -3214,6 +3263,14 @@ static int ep_pcie_probe(struct platform_device *pdev) goto irq_failure; } + /* + * Wakelock is needed to avoid missing BME and other + * interrupts if apps goes into suspend before host + * sets them. + */ + device_init_wakeup(&ep_pcie_dev.pdev->dev, true); + atomic_set(&ep_pcie_dev.ep_pcie_dev_wake, 0); + if (ep_pcie_dev.perst_enum && !gpio_get_value(ep_pcie_dev.gpio[EP_PCIE_GPIO_PERST].num)) { EP_PCIE_DBG2(&ep_pcie_dev, diff --git a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c index 02253135bbf2..b8d4bc4adece 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c +++ b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -668,7 +668,8 @@ static netdev_tx_t ecm_ipa_start_xmit fail_tx_packet: out: - resource_release(ecm_ipa_ctx); + if (atomic_read(&ecm_ipa_ctx->outstanding_pkts) == 0) + resource_release(ecm_ipa_ctx); resource_busy: return status; } @@ -1340,6 +1341,9 @@ static void ecm_ipa_tx_complete_notify netif_wake_queue(ecm_ipa_ctx->net); } + if (atomic_read(&ecm_ipa_ctx->outstanding_pkts) == 0) + resource_release(ecm_ipa_ctx); + out: dev_kfree_skb_any(skb); } diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c index 7090b63f4cc3..67ed1fd76347 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c @@ -540,20 +540,16 @@ int ipa_uc_ntn_conn_pipes(struct ipa_ntn_conn_in_params *inp, goto fail; } - if (ntn_ctx->conn.dl.smmu_enabled) { - result = ipa_uc_ntn_alloc_conn_smmu_info(&ntn_ctx->conn.dl, - &inp->dl); - if (result) { - IPA_UC_OFFLOAD_ERR("alloc failure on TX\n"); - goto fail; - } - result = ipa_uc_ntn_alloc_conn_smmu_info(&ntn_ctx->conn.ul, - &inp->ul); - if (result) { - ipa_uc_ntn_free_conn_smmu_info(&ntn_ctx->conn.dl); - IPA_UC_OFFLOAD_ERR("alloc failure on RX\n"); - goto fail; - } + result = ipa_uc_ntn_alloc_conn_smmu_info(&ntn_ctx->conn.dl, &inp->dl); + if (result) { + IPA_UC_OFFLOAD_ERR("alloc failure on TX\n"); + goto fail; + } + result = ipa_uc_ntn_alloc_conn_smmu_info(&ntn_ctx->conn.ul, &inp->ul); + if (result) { + ipa_uc_ntn_free_conn_smmu_info(&ntn_ctx->conn.dl); + IPA_UC_OFFLOAD_ERR("alloc failure on RX\n"); + goto fail; } fail: diff --git a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c index 7aa28cad3242..0612e97e3813 100644 --- a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c +++ b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -996,7 +996,8 @@ static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb, fail_tx_packet: rndis_ipa_xmit_error(skb); out: - resource_release(rndis_ipa_ctx); + if (atomic_read(&rndis_ipa_ctx->outstanding_pkts) == 0) + resource_release(rndis_ipa_ctx); resource_busy: RNDIS_IPA_DEBUG ("packet Tx done - %s\n", @@ -1069,6 +1070,10 @@ static void rndis_ipa_tx_complete_notify( RNDIS_IPA_DEBUG("send queue was awaken\n"); } + /*Release resource only when outstanding packets are zero*/ + if (atomic_read(&rndis_ipa_ctx->outstanding_pkts) == 0) + resource_release(rndis_ipa_ctx); + out: dev_kfree_skb_any(skb); } diff --git a/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth.c b/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth.c index 8a64c5a6e7b1..71c7c6ffbad7 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth.c +++ b/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth.c @@ -571,9 +571,10 @@ static int ipa_eth_pm_notifier_event_suspend_prepare( * and reverts the device suspension by aborting the system suspend. */ if (ipa_eth_net_check_active(eth_dev)) { - pr_info("%s: %s is active, preventing suspend for some time", - IPA_ETH_SUBSYS, eth_dev->net_dev->name); - ipa_eth_dev_wakeup_event(eth_dev); + pr_info("%s: %s is active, preventing suspend for %u ms", + IPA_ETH_SUBSYS, eth_dev->net_dev->name, + IPA_ETH_WAKE_TIME_MS); + pm_wakeup_dev_event(eth_dev->dev, IPA_ETH_WAKE_TIME_MS, false); return NOTIFY_BAD; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth_i.h b/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth_i.h index f92da41cd7aa..2ffc31c27da7 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth_i.h @@ -35,8 +35,12 @@ #define IPA_ETH_IPC_LOGDBG_DEFAULT false #endif +/* Time to remain awake after a suspend abort due to NIC activity */ #define IPA_ETH_WAKE_TIME_MS 500 +/* Time for NIC HW to settle down (ex. receive link interrupt) after a resume */ +#define IPA_ETH_RESUME_SETTLE_MS 2000 + #define IPA_ETH_PFDEV (ipa3_ctx ? ipa3_ctx->pdev : NULL) #define IPA_ETH_SUBSYS "ipa_eth" @@ -161,9 +165,31 @@ extern bool ipa_eth_ipc_logdbg; bool ipa_eth_is_ready(void); bool ipa_eth_all_ready(void); -static inline void ipa_eth_dev_wakeup_event(struct ipa_eth_device *eth_dev) +static inline void ipa_eth_dev_assume_active_ms( + struct ipa_eth_device *eth_dev, + unsigned int msec) +{ + eth_dev_priv(eth_dev)->assume_active += + DIV_ROUND_UP(msec, IPA_ETH_WAKE_TIME_MS); + pm_system_wakeup(); +} + +static inline void ipa_eth_dev_assume_active_inc( + struct ipa_eth_device *eth_dev, + unsigned int count) +{ + eth_dev_priv(eth_dev)->assume_active += count; + pm_system_wakeup(); +} + +static inline void ipa_eth_dev_assume_active_dec( + struct ipa_eth_device *eth_dev, + unsigned int count) { - pm_wakeup_dev_event(eth_dev->dev, IPA_ETH_WAKE_TIME_MS, false); + if (eth_dev_priv(eth_dev)->assume_active > count) + eth_dev_priv(eth_dev)->assume_active -= count; + else + eth_dev_priv(eth_dev)->assume_active = 0; } struct ipa_eth_device *ipa_eth_alloc_device( diff --git a/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth_pci.c b/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth_pci.c index 1095306b009f..80f4a80b7181 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth_pci.c +++ b/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth_pci.c @@ -374,7 +374,7 @@ static int ipa_eth_pci_suspend_late_handler(struct device *dev) IPA_ETH_SUBSYS, eth_dev->net_dev->name); /* Have PM_SUSPEND_PREPARE give us one wakeup time quanta */ - eth_dev_priv(eth_dev)->assume_active++; + ipa_eth_dev_assume_active_inc(eth_dev, 1); return -EAGAIN; } @@ -428,8 +428,8 @@ static int ipa_eth_pci_resume_handler(struct device *dev) "Device resume delegated to net driver"); rc = eth_dev_pm_ops(eth_dev)->resume(dev); - /* Give some time after a resume for the device to settle */ - eth_dev_priv(eth_dev)->assume_active++; + /* Give some time for device to settle after a resume */ + ipa_eth_dev_assume_active_ms(eth_dev, IPA_ETH_RESUME_SETTLE_MS); } if (rc) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c index 7767a74dcbeb..ee3b3aa0a450 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c @@ -1521,6 +1521,7 @@ int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, struct gsi_chan_info ul_gsi_chan_info, dl_gsi_chan_info; int aggr_active_bitmap = 0; struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + struct ipa_ep_cfg_holb holb_cfg; /* In case of DPL, dl is the DPL channel/client */ @@ -1606,6 +1607,15 @@ int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, goto unsuspend_dl_and_exit; } + /*enable holb to discard the packets*/ + if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5 && + IPA_CLIENT_IS_CONS(dl_ep->client) && !is_dpl) { + memset(&holb_cfg, 0, sizeof(holb_cfg)); + holb_cfg.en = IPA_HOLB_TMR_EN; + holb_cfg.tmr_val = IPA_HOLB_TMR_VAL_4_5; + result = ipa3_cfg_ep_holb(dl_clnt_hdl, &holb_cfg); + } + /* Stop DL channel */ result = ipa3_stop_gsi_channel(dl_clnt_hdl); if (result) { @@ -1635,6 +1645,14 @@ int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, start_dl_and_exit: gsi_start_channel(dl_ep->gsi_chan_hdl); ipa3_start_gsi_debug_monitor(dl_clnt_hdl); + /*disable holb to allow packets*/ + if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5 && + IPA_CLIENT_IS_CONS(dl_ep->client) && !is_dpl) { + memset(&holb_cfg, 0, sizeof(holb_cfg)); + holb_cfg.en = IPA_HOLB_TMR_DIS; + holb_cfg.tmr_val = 0; + ipa3_cfg_ep_holb(dl_clnt_hdl, &holb_cfg); + } unsuspend_dl_and_exit: if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { /* Unsuspend the DL EP */ @@ -1691,6 +1709,7 @@ int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl) struct ipa3_ep_context *dl_ep = NULL; enum gsi_status gsi_res; struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + struct ipa_ep_cfg_holb holb_cfg; /* In case of DPL, dl is the DPL channel/client */ @@ -1721,6 +1740,15 @@ int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl) IPAERR("Error starting DL channel: %d\n", gsi_res); ipa3_start_gsi_debug_monitor(dl_clnt_hdl); + /*disable holb to allow packets*/ + if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5 && + IPA_CLIENT_IS_CONS(dl_ep->client) && !is_dpl) { + memset(&holb_cfg, 0, sizeof(holb_cfg)); + holb_cfg.en = IPA_HOLB_TMR_DIS; + holb_cfg.tmr_val = 0; + ipa3_cfg_ep_holb(dl_clnt_hdl, &holb_cfg); + } + /* Start UL channel */ if (!is_dpl) { gsi_res = gsi_start_channel(ul_ep->gsi_chan_hdl); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index 66212ef8e680..d932b7efeb98 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -71,6 +71,7 @@ #define IPA_HOLB_TMR_DIS 0x0 #define IPA_HOLB_TMR_EN 0x1 #define IPA_HOLB_TMR_VAL 65535 +#define IPA_HOLB_TMR_VAL_4_5 31 /* * The transport descriptor size was changed to GSI_CHAN_RE_SIZE_16B, but * IPA users still use sps_iovec size as FIFO element size. diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c index 01bb8f3fe3e7..3b855589f88e 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -623,7 +623,9 @@ static long ipa_adpl_ioctl(struct file *filp, switch (cmd) { case IPA_IOC_ODL_GET_AGG_BYTE_LIMIT: odl_pipe_info.agg_byte_limit = - ipa3_odl_ctx->odl_sys_param.ipa_ep_cfg.aggr.aggr_byte_limit; + /*Modem expecting value in bytes. so passing 15 = 15*1024*/ + (ipa3_odl_ctx->odl_sys_param.ipa_ep_cfg.aggr.aggr_byte_limit * + 1024); if (copy_to_user((void __user *)arg, &odl_pipe_info, sizeof(odl_pipe_info))) { retval = -EFAULT; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.h b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.h index 0a23c49e324c..6effa1b2f18a 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -13,7 +13,7 @@ #ifndef _IPA3_ODL_H_ #define _IPA3_ODL_H_ -#define IPA_ODL_AGGR_BYTE_LIMIT (15 * 1024) +#define IPA_ODL_AGGR_BYTE_LIMIT 15 #define IPA_ODL_RX_RING_SIZE 192 #define MAX_QUEUE_TO_ODL 1024 #define CONFIG_SUCCESS 1 diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c index 4490e0cc2db8..849b8cff87e1 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c @@ -86,14 +86,16 @@ static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip, if (entry->hdr) { hdr_entry = ipa3_id_find(entry->rule.hdr_hdl); - if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) { + if (!hdr_entry || (hdr_entry->cookie != IPA_HDR_COOKIE) || + ipa3_check_idr_if_freed(entry->hdr)) { IPAERR_RL("Header entry already deleted\n"); return -EPERM; } } else if (entry->proc_ctx) { hdr_proc_entry = ipa3_id_find(entry->rule.hdr_proc_ctx_hdl); if (!hdr_proc_entry || - hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) { + (hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) || + ipa3_check_idr_if_freed(entry->proc_ctx)) { IPAERR_RL("Proc header entry already deleted\n"); return -EPERM; } @@ -1767,18 +1769,19 @@ int __ipa3_del_rt_rule(u32 rule_hdl) hdr_entry = ipa3_id_find(entry->rule.hdr_hdl); if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) { IPAERR_RL("Header entry already deleted\n"); - return -EINVAL; + entry->hdr = NULL; } } else if (entry->proc_ctx) { hdr_proc_entry = ipa3_id_find(entry->rule.hdr_proc_ctx_hdl); if (!hdr_proc_entry || hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) { IPAERR_RL("Proc header entry already deleted\n"); - return -EINVAL; + entry->proc_ctx = NULL; } } - if (entry->hdr) + if (entry->hdr && + (!ipa3_check_idr_if_freed(entry->hdr))) __ipa3_release_hdr(entry->hdr->id); else if (entry->proc_ctx && (!ipa3_check_idr_if_freed(entry->proc_ctx))) @@ -1955,7 +1958,6 @@ int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only) if (!user_only || rule->ipacm_installed) { - list_del(&rule->link); if (rule->hdr) { hdr_entry = ipa3_id_find( rule->rule.hdr_hdl); @@ -1963,8 +1965,7 @@ int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only) hdr_entry->cookie != IPA_HDR_COOKIE) { IPAERR_RL( "Header already deleted\n"); - mutex_unlock(&ipa3_ctx->lock); - return -EINVAL; + rule->hdr = NULL; } } else if (rule->proc_ctx) { hdr_proc_entry = @@ -1975,12 +1976,13 @@ int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only) IPA_PROC_HDR_COOKIE) { IPAERR_RL( "Proc entry already deleted\n"); - mutex_unlock(&ipa3_ctx->lock); - return -EINVAL; + rule->proc_ctx = NULL; } } tbl->rule_cnt--; - if (rule->hdr) + list_del(&rule->link); + if (rule->hdr && + (!ipa3_check_idr_if_freed(rule->hdr))) __ipa3_release_hdr(rule->hdr->id); else if (rule->proc_ctx && (!ipa3_check_idr_if_freed( @@ -2157,20 +2159,8 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy_i *rtrule) struct ipa3_hdr_entry *hdr_entry; struct ipa3_hdr_proc_ctx_entry *hdr_proc_entry; - if (rtrule->rule.hdr_hdl) { - hdr = ipa3_id_find(rtrule->rule.hdr_hdl); - if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) { - IPAERR_RL("rt rule does not point to valid hdr\n"); - goto error; - } - } else if (rtrule->rule.hdr_proc_ctx_hdl) { - proc_ctx = ipa3_id_find(rtrule->rule.hdr_proc_ctx_hdl); - if ((proc_ctx == NULL) || - (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) { - IPAERR_RL("rt rule does not point to valid proc ctx\n"); - goto error; - } - } + if (__ipa_rt_validate_hndls(&rtrule->rule, &hdr, &proc_ctx)) + goto error; entry = ipa3_id_find(rtrule->rt_rule_hdl); if (entry == NULL) { @@ -2193,14 +2183,16 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy_i *rtrule) if (entry->hdr) { hdr_entry = ipa3_id_find(entry->rule.hdr_hdl); - if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) { + if (!hdr_entry || (hdr_entry->cookie != IPA_HDR_COOKIE) || + ipa3_check_idr_if_freed(entry->hdr)) { IPAERR_RL("Header entry already deleted\n"); return -EPERM; } } else if (entry->proc_ctx) { hdr_proc_entry = ipa3_id_find(entry->rule.hdr_proc_ctx_hdl); if (!hdr_proc_entry || - hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) { + (hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) || + ipa3_check_idr_if_freed(entry->proc_ctx)) { IPAERR_RL("Proc header entry already deleted\n"); return -EPERM; } @@ -2208,7 +2200,7 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy_i *rtrule) if (entry->hdr) entry->hdr->ref_cnt--; - if (entry->proc_ctx) + else if (entry->proc_ctx) entry->proc_ctx->ref_cnt--; entry->rule = rtrule->rule; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index d47aedd22d1a..8d2b4faf224d 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -2617,6 +2617,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_PCIE, { 22, 2, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, + [IPA_4_5_MHI][IPA_CLIENT_ODL_DPL_CONS] = { + true, IPA_v4_5_MHI_GROUP_DDR, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 22, 2, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, [IPA_4_5_MHI][IPA_CLIENT_MHI_LOW_LAT_CONS] = { true, IPA_v4_5_MHI_GROUP_PCIE, false, diff --git a/drivers/platform/msm/mhi_dev/mhi.c b/drivers/platform/msm/mhi_dev/mhi.c index fb8908b3537d..795511ac794f 100644 --- a/drivers/platform/msm/mhi_dev/mhi.c +++ b/drivers/platform/msm/mhi_dev/mhi.c @@ -2151,25 +2151,34 @@ static void mhi_dev_transfer_completion_cb(void *mreq) { int rc = 0; struct mhi_req *req = mreq; - struct mhi_dev_channel *ch = req->client->channel; + struct mhi_dev_channel *ch; u32 snd_cmpl = req->snd_cmpl; + bool inbound = false; - if (mhi_ctx->ch_ctx_cache[ch->ch_id].ch_type == - MHI_DEV_CH_TYPE_INBOUND_CHANNEL) - ch->pend_wr_count--; + ch = &mhi_ctx->ch[req->chan]; dma_unmap_single(&mhi_ctx->pdev->dev, req->dma, - req->len, DMA_FROM_DEVICE); + req->len, DMA_FROM_DEVICE); + + if (mhi_ctx->ch_ctx_cache[ch->ch_id].ch_type == + MHI_DEV_CH_TYPE_INBOUND_CHANNEL) { + inbound = true; + ch->pend_wr_count--; + } /* - * Channel got stopped or closed with transfers pending + * Channel got closed with transfers pending * Do not trigger callback or send cmpl to host */ if (ch->state == MHI_DEV_CH_CLOSED || ch->state == MHI_DEV_CH_STOPPED) { - mhi_log(MHI_MSG_DBG, - "Ch %d not in started state, %d writes pending\n", + if (inbound) + mhi_log(MHI_MSG_DBG, + "Ch %d closed with %d writes pending\n", ch->ch_id, ch->pend_wr_count + 1); + else + mhi_log(MHI_MSG_DBG, + "Ch %d closed with read pending\n", ch->ch_id); return; } @@ -2413,34 +2422,51 @@ static int mhi_dev_cache_host_cfg(struct mhi_dev *mhi) mhi->cfg.event_rings; mhi->ch_ctx_shadow.size = sizeof(struct mhi_dev_ch_ctx) * mhi->cfg.channels; - - mhi->cmd_ctx_cache = dma_alloc_coherent(&pdev->dev, - sizeof(struct mhi_dev_cmd_ctx), - &mhi->cmd_ctx_cache_dma_handle, - GFP_KERNEL); + /* + * This func mhi_dev_cache_host_cfg will be called when + * processing mhi device reset as well, do not allocate + * the command, event and channel context caches if they + * were already allocated during device boot, to avoid + * memory leak. + */ if (!mhi->cmd_ctx_cache) { - pr_err("no memory while allocating cmd ctx\n"); - return -ENOMEM; + mhi->cmd_ctx_cache = dma_alloc_coherent(&pdev->dev, + sizeof(struct mhi_dev_cmd_ctx), + &mhi->cmd_ctx_cache_dma_handle, + GFP_KERNEL); + if (!mhi->cmd_ctx_cache) { + pr_err("no memory while allocating cmd ctx\n"); + rc = -ENOMEM; + goto exit; + } } memset(mhi->cmd_ctx_cache, 0, sizeof(struct mhi_dev_cmd_ctx)); - mhi->ev_ctx_cache = dma_alloc_coherent(&pdev->dev, - sizeof(struct mhi_dev_ev_ctx) * - mhi->cfg.event_rings, - &mhi->ev_ctx_cache_dma_handle, - GFP_KERNEL); - if (!mhi->ev_ctx_cache) - return -ENOMEM; + if (!mhi->ev_ctx_cache) { + mhi->ev_ctx_cache = dma_alloc_coherent(&pdev->dev, + sizeof(struct mhi_dev_ev_ctx) * + mhi->cfg.event_rings, + &mhi->ev_ctx_cache_dma_handle, + GFP_KERNEL); + if (!mhi->ev_ctx_cache) { + rc = -ENOMEM; + goto exit; + } + } memset(mhi->ev_ctx_cache, 0, sizeof(struct mhi_dev_ev_ctx) * mhi->cfg.event_rings); - mhi->ch_ctx_cache = dma_alloc_coherent(&pdev->dev, - sizeof(struct mhi_dev_ch_ctx) * - mhi->cfg.channels, - &mhi->ch_ctx_cache_dma_handle, - GFP_KERNEL); - if (!mhi->ch_ctx_cache) - return -ENOMEM; + if (!mhi->ch_ctx_cache) { + mhi->ch_ctx_cache = dma_alloc_coherent(&pdev->dev, + sizeof(struct mhi_dev_ch_ctx) * + mhi->cfg.channels, + &mhi->ch_ctx_cache_dma_handle, + GFP_KERNEL); + if (!mhi->ch_ctx_cache) { + rc = -ENOMEM; + goto exit; + } + } memset(mhi->ch_ctx_cache, 0, sizeof(struct mhi_dev_ch_ctx) * mhi->cfg.channels); @@ -2476,6 +2502,20 @@ static int mhi_dev_cache_host_cfg(struct mhi_dev *mhi) return mhi_ring_start(&mhi->ring[0], (union mhi_dev_ring_ctx *)mhi->cmd_ctx_cache, mhi); + +exit: + if (mhi->cmd_ctx_cache) + dma_free_coherent(&pdev->dev, + sizeof(struct mhi_dev_cmd_ctx), + mhi->cmd_ctx_cache, + mhi->cmd_ctx_cache_dma_handle); + if (mhi->ev_ctx_cache) + dma_free_coherent(&pdev->dev, + sizeof(struct mhi_dev_ev_ctx) * + mhi->cfg.event_rings, + mhi->ev_ctx_cache, + mhi->ev_ctx_cache_dma_handle); + return rc; } void mhi_dev_pm_relax(void) @@ -2726,6 +2766,7 @@ int mhi_dev_open_channel(uint32_t chan_id, ch->active_client = (*handle_client); (*handle_client)->channel = ch; (*handle_client)->event_trigger = mhi_dev_client_cb_reason; + ch->pend_wr_count = 0; if (ch->state == MHI_DEV_CH_UNINT) { ch->ring = &mhi_ctx->ring[chan_id + mhi_ctx->ch_ring_start]; @@ -2898,6 +2939,14 @@ int mhi_dev_read_channel(struct mhi_req *mreq) mutex_lock(&ch->ch_lock); do { + if (ch->state == MHI_DEV_CH_STOPPED) { + mhi_log(MHI_MSG_VERBOSE, + "channel (%d) already stopped\n", + mreq->chan); + bytes_read = -1; + goto exit; + } + el = &ring->ring_cache[ring->rd_offset]; mhi_log(MHI_MSG_VERBOSE, "evtptr : 0x%llx\n", el->tre.data_buf_ptr); @@ -2919,13 +2968,6 @@ int mhi_dev_read_channel(struct mhi_req *mreq) goto exit; } - if (ch->state == MHI_DEV_CH_STOPPED) { - mhi_log(MHI_MSG_VERBOSE, - "channel (%d) already stopped\n", - mreq->chan); - bytes_read = -1; - goto exit; - } ch->tre_loc = el->tre.data_buf_ptr; ch->tre_size = el->tre.len; @@ -3591,35 +3633,9 @@ static int get_device_tree_data(struct platform_device *pdev) static int mhi_deinit(struct mhi_dev *mhi) { - int i = 0, ring_id = 0; - struct mhi_dev_ring *ring; struct platform_device *pdev = mhi->pdev; - ring_id = mhi->cfg.channels + mhi->cfg.event_rings + 1; - - for (i = 0; i < ring_id; i++) { - ring = &mhi->ring[i]; - if (ring->state == RING_STATE_UINT) - continue; - - dma_free_coherent(mhi->dev, ring->ring_size * - sizeof(union mhi_dev_ring_element_type), - ring->ring_cache, - ring->ring_cache_dma_handle); - if (ring->type == RING_TYPE_ER) { - dma_free_coherent(mhi->dev, ring->ring_size * - sizeof(uint64_t), - ring->evt_rp_cache, - ring->evt_rp_cache_dma_handle); - dma_free_coherent(mhi->dev, - sizeof(uint32_t), - ring->msi_buf, - ring->msi_buf_dma_handle); - } - } - devm_kfree(&pdev->dev, mhi->mmio_backup); - devm_kfree(&pdev->dev, mhi->ring); mhi_dev_sm_exit(mhi); @@ -3639,10 +3655,11 @@ static int mhi_init(struct mhi_dev *mhi) return rc; } - mhi->ring = devm_kzalloc(&pdev->dev, - (sizeof(struct mhi_dev_ring) * - (mhi->cfg.channels + mhi->cfg.event_rings + 1)), - GFP_KERNEL); + if (!mhi->ring) + mhi->ring = devm_kzalloc(&pdev->dev, + (sizeof(struct mhi_dev_ring) * + (mhi->cfg.channels + mhi->cfg.event_rings + 1)), + GFP_KERNEL); if (!mhi->ring) return -ENOMEM; diff --git a/drivers/platform/msm/mhi_dev/mhi.h b/drivers/platform/msm/mhi_dev/mhi.h index 5d7923a7dc6e..6f5e1fea3cd0 100644 --- a/drivers/platform/msm/mhi_dev/mhi.h +++ b/drivers/platform/msm/mhi_dev/mhi.h @@ -424,7 +424,8 @@ static inline void mhi_dev_ring_inc_index(struct mhi_dev_ring *ring, #define TRACE_DATA_MAX 128 #define MHI_DEV_DATA_MAX 512 -#define MHI_DEV_MMIO_RANGE 0xc80 +#define MHI_DEV_MMIO_RANGE 0xb80 +#define MHI_DEV_MMIO_OFFSET 0x100 struct ring_cache_req { struct completion *done; diff --git a/drivers/platform/msm/mhi_dev/mhi_mmio.c b/drivers/platform/msm/mhi_dev/mhi_mmio.c index 240eff2edf5f..fd43d1152dbf 100644 --- a/drivers/platform/msm/mhi_dev/mhi_mmio.c +++ b/drivers/platform/msm/mhi_dev/mhi_mmio.c @@ -619,7 +619,8 @@ int mhi_dev_restore_mmio(struct mhi_dev *dev) mhi_dev_mmio_mask_interrupts(dev); for (i = 0; i < (MHI_DEV_MMIO_RANGE/4); i++) { - reg_cntl_addr = dev->mmio_base_addr + (i * 4); + reg_cntl_addr = dev->mmio_base_addr + + MHI_DEV_MMIO_OFFSET + (i * 4); reg_cntl_value = dev->mmio_backup[i]; writel_relaxed(reg_cntl_value, reg_cntl_addr); } @@ -640,13 +641,16 @@ EXPORT_SYMBOL(mhi_dev_restore_mmio); int mhi_dev_backup_mmio(struct mhi_dev *dev) { uint32_t i = 0; + void __iomem *reg_cntl_addr; if (WARN_ON(!dev)) return -EINVAL; - for (i = 0; i < MHI_DEV_MMIO_RANGE/4; i++) - dev->mmio_backup[i] = - readl_relaxed(dev->mmio_base_addr + (i * 4)); + for (i = 0; i < MHI_DEV_MMIO_RANGE/4; i++) { + reg_cntl_addr = (void __iomem *) (dev->mmio_base_addr + + MHI_DEV_MMIO_OFFSET + (i * 4)); + dev->mmio_backup[i] = readl_relaxed(reg_cntl_addr); + } return 0; } diff --git a/drivers/platform/msm/mhi_dev/mhi_ring.c b/drivers/platform/msm/mhi_dev/mhi_ring.c index 7268ca6ff916..6c2b60d230a1 100644 --- a/drivers/platform/msm/mhi_dev/mhi_ring.c +++ b/drivers/platform/msm/mhi_dev/mhi_ring.c @@ -416,33 +416,43 @@ int mhi_ring_start(struct mhi_dev_ring *ring, union mhi_dev_ring_ctx *ctx, wr_offset = mhi_dev_ring_addr2ofst(ring, ring->ring_ctx->generic.wp); - ring->ring_cache = dma_alloc_coherent(mhi->dev, - ring->ring_size * - sizeof(union mhi_dev_ring_element_type), - &ring->ring_cache_dma_handle, - GFP_KERNEL); - if (!ring->ring_cache) - return -ENOMEM; + if (!ring->ring_cache) { + ring->ring_cache = dma_alloc_coherent(mhi->dev, + ring->ring_size * + sizeof(union mhi_dev_ring_element_type), + &ring->ring_cache_dma_handle, + GFP_KERNEL); + if (!ring->ring_cache) { + mhi_log(MHI_MSG_ERROR, + "Failed to allocate ring cache\n"); + return -ENOMEM; + } + } if (ring->type == RING_TYPE_ER) { - ring->evt_rp_cache = dma_alloc_coherent(mhi->dev, - sizeof(uint64_t) * ring->ring_size, - &ring->evt_rp_cache_dma_handle, - GFP_KERNEL); if (!ring->evt_rp_cache) { - mhi_log(MHI_MSG_ERROR, - "Failed to allocate evt rp cache\n"); - rc = -ENOMEM; - goto cleanup; + ring->evt_rp_cache = dma_alloc_coherent(mhi->dev, + sizeof(uint64_t) * ring->ring_size, + &ring->evt_rp_cache_dma_handle, + GFP_KERNEL); + if (!ring->evt_rp_cache) { + mhi_log(MHI_MSG_ERROR, + "Failed to allocate evt rp cache\n"); + rc = -ENOMEM; + goto cleanup; + } } - ring->msi_buf = dma_alloc_coherent(mhi->dev, - sizeof(uint32_t), - &ring->msi_buf_dma_handle, - GFP_KERNEL); if (!ring->msi_buf) { - mhi_log(MHI_MSG_ERROR, "Failed to allocate msi buf\n"); - rc = -ENOMEM; - goto cleanup; + ring->msi_buf = dma_alloc_coherent(mhi->dev, + sizeof(uint32_t), + &ring->msi_buf_dma_handle, + GFP_KERNEL); + if (!ring->msi_buf) { + mhi_log(MHI_MSG_ERROR, + "Failed to allocate msi buf\n"); + rc = -ENOMEM; + goto cleanup; + } } } @@ -488,11 +498,13 @@ int mhi_ring_start(struct mhi_dev_ring *ring, union mhi_dev_ring_ctx *ctx, sizeof(union mhi_dev_ring_element_type), ring->ring_cache, ring->ring_cache_dma_handle); + ring->ring_cache = NULL; if (ring->evt_rp_cache) { dma_free_coherent(mhi->dev, sizeof(uint64_t) * ring->ring_size, ring->evt_rp_cache, ring->evt_rp_cache_dma_handle); + ring->evt_rp_cache = NULL; } return rc; } diff --git a/drivers/platform/msm/mhi_dev/mhi_sm.c b/drivers/platform/msm/mhi_dev/mhi_sm.c index 62d4fe0bf7de..4d208bb931c1 100644 --- a/drivers/platform/msm/mhi_dev/mhi_sm.c +++ b/drivers/platform/msm/mhi_dev/mhi_sm.c @@ -1102,7 +1102,7 @@ int mhi_dev_sm_init(struct mhi_dev *mhi_dev) /*init debugfs*/ mhi_sm_debugfs_init(); - mhi_sm_ctx->mhi_sm_wq = create_singlethread_workqueue("mhi_sm_wq"); + mhi_sm_ctx->mhi_sm_wq = alloc_workqueue("mhi_sm_wq", WQ_HIGHPRI, 0); if (!mhi_sm_ctx->mhi_sm_wq) { MHI_SM_ERR("Failed to create singlethread_workqueue: sm_wq\n"); res = -ENOMEM; diff --git a/drivers/platform/msm/mhi_dev/mhi_uci.c b/drivers/platform/msm/mhi_dev/mhi_uci.c index ac8901689fa9..a51f95b7bc9f 100644 --- a/drivers/platform/msm/mhi_dev/mhi_uci.c +++ b/drivers/platform/msm/mhi_dev/mhi_uci.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015,2017-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015,2017-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -33,8 +33,8 @@ #define MHI_SOFTWARE_CLIENT_LIMIT (MHI_MAX_SOFTWARE_CHANNELS/2) #define MHI_UCI_IPC_LOG_PAGES (100) -/* Max number of MHI write request structures (used in async writes) */ -#define MHI_UCI_NUM_WR_REQ_DEFAULT 10 +/* Max number of MHI read/write request structs (used in async transfers) */ +#define MHI_UCI_NUM_REQ_DEFAULT 10 #define MAX_NR_TRBS_PER_CHAN 9 #define MHI_QTI_IFACE_ID 4 #define MHI_ADPL_IFACE_ID 5 @@ -50,6 +50,8 @@ #define MHI_UCI_RELEASE_TIMEOUT_MAX 5100 #define MHI_UCI_RELEASE_TIMEOUT_COUNT 30 +#define MHI_UCI_IS_CHAN_DIR_IN(n) ((n % 2) ? true : false) + enum uci_dbg_level { UCI_DBG_VERBOSE = 0x0, UCI_DBG_INFO = 0x1, @@ -92,8 +94,7 @@ struct chan_attr { /* Uevent broadcast of channel state */ bool state_bcast; /* Number of write request structs to allocate */ - u32 num_wr_reqs; - + u32 num_reqs; }; static void mhi_uci_generic_client_cb(struct mhi_dev_client_cb_data *cb_data); @@ -330,15 +331,19 @@ struct uci_client { struct mhi_uci_ctxt_t *uci_ctxt; struct mutex in_chan_lock; struct mutex out_chan_lock; - spinlock_t wr_req_lock; + spinlock_t req_lock; unsigned int f_flags; - struct mhi_req *wreqs; - struct list_head wr_req_list; + /* Pointer to dynamically allocated mhi_req structs */ + struct mhi_req *reqs; + /* Pointer to available (free) reqs */ + struct list_head req_list; + /* Pointer to in-use reqs */ + struct list_head in_use_list; struct completion read_done; struct completion at_ctrl_read_done; struct completion *write_done; int (*send)(struct uci_client*, void*, u32); - int (*read)(struct uci_client*, struct mhi_req*, int*); + int (*read)(struct uci_client*, int*); unsigned int tiocm; unsigned int at_ctrl_mask; }; @@ -478,23 +483,75 @@ static int mhi_init_read_chan(struct uci_client *client_handle, return rc; } +static struct mhi_req *mhi_uci_get_req(struct uci_client *uci_handle) +{ + struct mhi_req *req; + unsigned long flags; + + spin_lock_irqsave(&uci_handle->req_lock, flags); + if (list_empty(&uci_handle->req_list)) { + uci_log(UCI_DBG_ERROR, "Request pool empty for chans %d, %d\n", + uci_handle->in_chan, uci_handle->out_chan); + spin_unlock_irqrestore(&uci_handle->req_lock, flags); + return NULL; + } + /* Remove from free list and add to in-use list */ + req = container_of(uci_handle->req_list.next, + struct mhi_req, list); + list_del_init(&req->list); + /* + * If req is marked stale and if it was used for the write channel + * to host, free the previously allocated input buffer before the + * req is re-used + */ + if (req->is_stale && req->buf && MHI_UCI_IS_CHAN_DIR_IN(req->chan)) { + uci_log(UCI_DBG_VERBOSE, "Freeing write buf for chan %d\n", + req->chan); + kfree(req->buf); + } + req->is_stale = false; + uci_log(UCI_DBG_VERBOSE, "Adding req to in-use list\n"); + list_add_tail(&req->list, &uci_handle->in_use_list); + spin_unlock_irqrestore(&uci_handle->req_lock, flags); + + return req; +} + +static void mhi_uci_put_req(struct uci_client *uci_handle, struct mhi_req *req) +{ + unsigned long flags; + + spin_lock_irqsave(&uci_handle->req_lock, flags); + /* Remove from in-use list and add back to free list */ + list_del_init(&req->list); + list_add_tail(&req->list, &uci_handle->req_list); + spin_unlock_irqrestore(&uci_handle->req_lock, flags); +} + static void mhi_uci_write_completion_cb(void *req) { struct mhi_req *ureq = req; - struct uci_client *uci_handle; - unsigned long flags; + struct uci_client *uci_handle = (struct uci_client *)ureq->context; - uci_handle = (struct uci_client *)ureq->context; kfree(ureq->buf); ureq->buf = NULL; - spin_lock_irqsave(&uci_handle->wr_req_lock, flags); - list_add_tail(&ureq->list, &uci_handle->wr_req_list); - spin_unlock_irqrestore(&uci_handle->wr_req_lock, flags); + /* + * If this is a delayed write completion, just clear + * the stale flag and return. The ureq was added to + * the free list when client called release function. + */ + if (ureq->is_stale) { + uci_log(UCI_DBG_VERBOSE, + "Got stale completion for ch %d\n", ureq->chan); + ureq->is_stale = false; + return; + } if (uci_handle->write_done) complete(uci_handle->write_done); + mhi_uci_put_req(uci_handle, ureq); /* Write queue may be waiting for write request structs */ wake_up(&uci_handle->write_wq); } @@ -504,7 +561,19 @@ static void mhi_uci_read_completion_cb(void *req) struct mhi_req *ureq = req; struct uci_client *uci_handle; + if (ureq->is_stale) { + uci_log(UCI_DBG_VERBOSE, + "Got stale completion for ch %d, ignoring\n", + ureq->chan); + return; + } + uci_handle = (struct uci_client *)ureq->context; + + uci_handle->pkt_loc = (void *)ureq->buf; + uci_handle->pkt_size = ureq->transfer_len; + + mhi_uci_put_req(uci_handle, ureq); complete(&uci_handle->read_done); } @@ -514,6 +583,9 @@ static int mhi_uci_send_sync(struct uci_client *uci_handle, struct mhi_req ureq; int ret_val; + uci_log(UCI_DBG_VERBOSE, + "Sync write for ch %d size %d\n", uci_handle->out_chan, size); + ureq.client = uci_handle->out_handle; ureq.buf = data_loc; ureq.len = size; @@ -534,19 +606,12 @@ static int mhi_uci_send_async(struct uci_client *uci_handle, struct mhi_req *ureq; uci_log(UCI_DBG_VERBOSE, - "Got async write for ch %d of size %d\n", + "Async write for ch %d size %d\n", uci_handle->out_chan, size); - spin_lock_irq(&uci_handle->wr_req_lock); - if (list_empty(&uci_handle->wr_req_list)) { - uci_log(UCI_DBG_ERROR, "Write request pool empty\n"); - spin_unlock_irq(&uci_handle->wr_req_lock); + ureq = mhi_uci_get_req(uci_handle); + if (!ureq) return -EBUSY; - } - ureq = container_of(uci_handle->wr_req_list.next, - struct mhi_req, list); - list_del_init(&ureq->list); - spin_unlock_irq(&uci_handle->wr_req_lock); ureq->client = uci_handle->out_handle; ureq->context = uci_handle; @@ -565,9 +630,7 @@ static int mhi_uci_send_async(struct uci_client *uci_handle, error_async_transfer: ureq->buf = NULL; - spin_lock_irq(&uci_handle->wr_req_lock); - list_add_tail(&ureq->list, &uci_handle->wr_req_list); - spin_unlock_irq(&uci_handle->wr_req_lock); + mhi_uci_put_req(uci_handle, ureq); return bytes_to_write; } @@ -609,7 +672,7 @@ static int mhi_uci_send_packet(struct uci_client *uci_handle, void *data_loc, return -EAGAIN; ret_val = wait_event_interruptible_timeout( uci_handle->write_wq, - !list_empty(&uci_handle->wr_req_list), + !list_empty(&uci_handle->req_list), MHI_UCI_WRITE_REQ_AVAIL_TIMEOUT); if (ret_val > 0) { /* @@ -710,42 +773,62 @@ static unsigned int mhi_uci_client_poll(struct file *file, poll_table *wait) return mask; } -static int mhi_uci_alloc_write_reqs(struct uci_client *client) +static int mhi_uci_alloc_reqs(struct uci_client *client) { int i; - u32 num_wr_reqs; + u32 num_reqs; + + if (client->reqs) { + uci_log(UCI_DBG_VERBOSE, "Reqs already allocated\n"); + return 0; + } - num_wr_reqs = client->in_chan_attr->num_wr_reqs; - if (!num_wr_reqs) - num_wr_reqs = MHI_UCI_NUM_WR_REQ_DEFAULT; + num_reqs = client->in_chan_attr->num_reqs; + if (!num_reqs) + num_reqs = MHI_UCI_NUM_REQ_DEFAULT; - client->wreqs = kcalloc(num_wr_reqs, + client->reqs = kcalloc(num_reqs, sizeof(struct mhi_req), GFP_KERNEL); - if (!client->wreqs) { - uci_log(UCI_DBG_ERROR, "Write reqs alloc failed\n"); + if (!client->reqs) { + uci_log(UCI_DBG_ERROR, "Reqs alloc failed\n"); return -ENOMEM; } - INIT_LIST_HEAD(&client->wr_req_list); - for (i = 0; i < num_wr_reqs; ++i) - list_add_tail(&client->wreqs[i].list, &client->wr_req_list); + INIT_LIST_HEAD(&client->req_list); + INIT_LIST_HEAD(&client->in_use_list); + for (i = 0; i < num_reqs; ++i) + list_add_tail(&client->reqs[i].list, &client->req_list); uci_log(UCI_DBG_INFO, "Allocated %d write reqs for chan %d\n", - num_wr_reqs, client->out_chan); + num_reqs, client->out_chan); return 0; } -static int mhi_uci_read_async(struct uci_client *uci_handle, - struct mhi_req *ureq, int *bytes_avail) +static int mhi_uci_read_async(struct uci_client *uci_handle, int *bytes_avail) { int ret_val = 0; unsigned long compl_ret; + struct mhi_req *ureq; + struct mhi_dev_client *client_handle; uci_log(UCI_DBG_ERROR, "Async read for ch %d\n", uci_handle->in_chan); + ureq = mhi_uci_get_req(uci_handle); + if (!ureq) { + uci_log(UCI_DBG_ERROR, + "Out of reqs for chan %d\n", uci_handle->in_chan); + return -EBUSY; + } + + client_handle = uci_handle->in_handle; + ureq->chan = uci_handle->in_chan; + ureq->client = client_handle; + ureq->buf = uci_handle->in_buf_list[0].addr; + ureq->len = uci_handle->in_buf_list[0].buf_size; + ureq->mode = DMA_ASYNC; ureq->client_cb = mhi_uci_read_completion_cb; ureq->snd_cmpl = 1; @@ -754,14 +837,14 @@ static int mhi_uci_read_async(struct uci_client *uci_handle, reinit_completion(&uci_handle->read_done); *bytes_avail = mhi_dev_read_channel(ureq); - uci_log(UCI_DBG_VERBOSE, "buf_size = 0x%lx bytes_read = 0x%x\n", - ureq->len, *bytes_avail); if (*bytes_avail < 0) { uci_log(UCI_DBG_ERROR, "Failed to read channel ret %dlu\n", *bytes_avail); + mhi_uci_put_req(uci_handle, ureq); return -EIO; } - + uci_log(UCI_DBG_VERBOSE, "buf_size = 0x%lx bytes_read = 0x%x\n", + ureq->len, *bytes_avail); if (*bytes_avail > 0) { uci_log(UCI_DBG_VERBOSE, "Waiting for async read completion!\n"); @@ -769,7 +852,6 @@ static int mhi_uci_read_async(struct uci_client *uci_handle, wait_for_completion_interruptible_timeout( &uci_handle->read_done, MHI_UCI_ASYNC_READ_TIMEOUT); - if (compl_ret == -ERESTARTSYS) { uci_log(UCI_DBG_ERROR, "Exit signal caught\n"); return compl_ret; @@ -779,33 +861,41 @@ static int mhi_uci_read_async(struct uci_client *uci_handle, return -EIO; } uci_log(UCI_DBG_VERBOSE, - "wk up Read completed on ch %d\n", ureq->chan); - - uci_handle->pkt_loc = (void *)ureq->buf; - uci_handle->pkt_size = ureq->transfer_len; - + "wk up Read completed on ch %d\n", uci_handle->in_chan); uci_log(UCI_DBG_VERBOSE, "Got pkt of sz 0x%lx at adr %pK, ch %d\n", uci_handle->pkt_size, - ureq->buf, ureq->chan); + uci_handle->pkt_loc, uci_handle->in_chan); } else { uci_handle->pkt_loc = NULL; uci_handle->pkt_size = 0; + uci_log(UCI_DBG_VERBOSE, + "No read data available, return req to free liat\n"); + mhi_uci_put_req(uci_handle, ureq); } return ret_val; } -static int mhi_uci_read_sync(struct uci_client *uci_handle, - struct mhi_req *ureq, int *bytes_avail) +static int mhi_uci_read_sync(struct uci_client *uci_handle, int *bytes_avail) { int ret_val = 0; + struct mhi_req ureq; + struct mhi_dev_client *client_handle; - ureq->mode = DMA_SYNC; - *bytes_avail = mhi_dev_read_channel(ureq); + uci_log(UCI_DBG_ERROR, + "Sync read for ch %d\n", uci_handle->in_chan); + + client_handle = uci_handle->in_handle; + ureq.chan = uci_handle->in_chan; + ureq.client = client_handle; + ureq.buf = uci_handle->in_buf_list[0].addr; + ureq.len = uci_handle->in_buf_list[0].buf_size; + ureq.mode = DMA_SYNC; + *bytes_avail = mhi_dev_read_channel(&ureq); uci_log(UCI_DBG_VERBOSE, "buf_size = 0x%lx bytes_read = 0x%x\n", - ureq->len, *bytes_avail); + ureq.len, *bytes_avail); if (*bytes_avail < 0) { uci_log(UCI_DBG_ERROR, "Failed to read channel ret %d\n", @@ -814,13 +904,13 @@ static int mhi_uci_read_sync(struct uci_client *uci_handle, } if (*bytes_avail > 0) { - uci_handle->pkt_loc = (void *)ureq->buf; - uci_handle->pkt_size = ureq->transfer_len; + uci_handle->pkt_loc = (void *)ureq.buf; + uci_handle->pkt_size = ureq.transfer_len; uci_log(UCI_DBG_VERBOSE, "Got pkt of sz 0x%lx at adr %pK, ch %d\n", uci_handle->pkt_size, - ureq->buf, ureq->chan); + ureq.buf, ureq.chan); } else { uci_handle->pkt_loc = NULL; uci_handle->pkt_size = 0; @@ -833,8 +923,11 @@ static int open_client_mhi_channels(struct uci_client *uci_client) { int rc = 0; - if (!mhi_uci_are_channels_connected(uci_client)) + if (!mhi_uci_are_channels_connected(uci_client)) { + uci_log(UCI_DBG_ERROR, "%s:Channels are not connected\n", + __func__); return -ENODEV; + } uci_log(UCI_DBG_DBG, "Starting channels %d %d.\n", @@ -845,7 +938,7 @@ static int open_client_mhi_channels(struct uci_client *uci_client) /* Allocate write requests for async operations */ if (!(uci_client->f_flags & O_SYNC)) { - rc = mhi_uci_alloc_write_reqs(uci_client); + rc = mhi_uci_alloc_reqs(uci_client); if (rc) goto handle_not_rdy_err; uci_client->send = mhi_uci_send_async; @@ -964,11 +1057,20 @@ static int mhi_uci_client_release(struct inode *mhi_inode, struct file *file_handle) { struct uci_client *uci_handle = file_handle->private_data; - int count = 0; + const struct chan_attr *in_chan_attr; + int count = 0, i; + struct mhi_req *ureq; if (!uci_handle) return -EINVAL; + in_chan_attr = uci_handle->in_chan_attr; + if (!in_chan_attr) { + uci_log(UCI_DBG_ERROR, "Null channel attributes for chan %d\n", + uci_handle->in_chan); + return -EINVAL; + } + if (atomic_sub_return(1, &uci_handle->ref_count)) { uci_log(UCI_DBG_DBG, "Client close chan %d, ref count 0x%x\n", iminor(mhi_inode), @@ -995,9 +1097,6 @@ static int mhi_uci_client_release(struct inode *mhi_inode, if (atomic_read(&uci_handle->mhi_chans_open)) { atomic_set(&uci_handle->mhi_chans_open, 0); - - if (!(uci_handle->f_flags & O_SYNC)) - kfree(uci_handle->wreqs); mutex_lock(&uci_handle->out_chan_lock); mhi_dev_close_channel(uci_handle->out_handle); wake_up(&uci_handle->write_wq); @@ -1007,6 +1106,33 @@ static int mhi_uci_client_release(struct inode *mhi_inode, mhi_dev_close_channel(uci_handle->in_handle); wake_up(&uci_handle->read_wq); mutex_unlock(&uci_handle->in_chan_lock); + /* + * Add back reqs in in-use list, if any, to free list. + * Mark the ureq stale to avoid returning stale data + * to client if the transfer completes later. + */ + count = 0; + while (!(list_empty(&uci_handle->in_use_list))) { + ureq = container_of(uci_handle->in_use_list.next, + struct mhi_req, list); + list_del_init(&ureq->list); + ureq->is_stale = true; + uci_log(UCI_DBG_VERBOSE, + "Adding back req for chan %d to free list\n", + ureq->chan); + list_add_tail(&ureq->list, &uci_handle->req_list); + count++; + } + if (count) + uci_log(UCI_DBG_DBG, + "Client %d closed with %d transfers pending\n", + iminor(mhi_inode), count); + } + + for (i = 0; i < (in_chan_attr->nr_trbs); i++) { + kfree(uci_handle->in_buf_list[i].addr); + uci_handle->in_buf_list[i].addr = NULL; + uci_handle->in_buf_list[i].buf_size = 0; } atomic_set(&uci_handle->read_data_ready, 0); @@ -1123,20 +1249,17 @@ static int __mhi_uci_client_read(struct uci_client *uci_handle, int *bytes_avail) { int ret_val = 0; - struct mhi_dev_client *client_handle; - struct mhi_req ureq; - - client_handle = uci_handle->in_handle; - ureq.chan = uci_handle->in_chan; - ureq.client = client_handle; - ureq.buf = uci_handle->in_buf_list[0].addr; - ureq.len = uci_handle->in_buf_list[0].buf_size; do { + if (!mhi_uci_are_channels_connected(uci_handle)) { + uci_log(UCI_DBG_ERROR, + "%s:Channels are not connected\n", __func__); + return -ENODEV; + } + if (!uci_handle->pkt_loc && !atomic_read(&uci_ctxt.mhi_disabled)) { - ret_val = uci_handle->read(uci_handle, &ureq, - bytes_avail); + ret_val = uci_handle->read(uci_handle, bytes_avail); if (ret_val) return ret_val; } @@ -1146,12 +1269,13 @@ static int __mhi_uci_client_read(struct uci_client *uci_handle, uci_log(UCI_DBG_VERBOSE, "No data read_data_ready %d, chan %d\n", atomic_read(&uci_handle->read_data_ready), - ureq.chan); + uci_handle->in_chan); if (uci_handle->f_flags & (O_NONBLOCK | O_NDELAY)) return -EAGAIN; ret_val = wait_event_interruptible(uci_handle->read_wq, - (!mhi_dev_channel_isempty(client_handle))); + (!mhi_dev_channel_isempty( + uci_handle->in_handle))); if (ret_val == -ERESTARTSYS) { uci_log(UCI_DBG_ERROR, "Exit signal caught\n"); @@ -1160,27 +1284,16 @@ static int __mhi_uci_client_read(struct uci_client *uci_handle, uci_log(UCI_DBG_VERBOSE, "wk up Got data on ch %d read_data_ready %d\n", - ureq.chan, + uci_handle->in_chan, atomic_read(&uci_handle->read_data_ready)); } else if (*bytes_avail > 0) { /* A valid packet was returned from MHI */ uci_log(UCI_DBG_VERBOSE, "Got packet: avail pkts %d phy_adr %pK, ch %d\n", atomic_read(&uci_handle->read_data_ready), - ureq.buf, - ureq.chan); + uci_handle->pkt_loc, + uci_handle->in_chan); break; - } else { - /* - * MHI did not return a valid packet, but we have one - * which we did not finish returning to user - */ - uci_log(UCI_DBG_CRITICAL, - "chan %d err: avail pkts %d phy_adr %pK", - ureq.chan, - atomic_read(&uci_handle->read_data_ready), - ureq.buf); - return -EIO; } } while (!uci_handle->pkt_loc); @@ -1290,6 +1403,12 @@ static ssize_t mhi_uci_client_write(struct file *file, return -EIO; } + if (!mhi_uci_are_channels_connected(uci_handle)) { + uci_log(UCI_DBG_ERROR, "%s:Channels are not connected\n", + __func__); + return -ENODEV; + } + if (count > TRB_MAX_DATA_SIZE) { uci_log(UCI_DBG_ERROR, "Too big write size: %d, max supported size is %d\n", @@ -1442,7 +1561,7 @@ static int mhi_register_client(struct uci_client *mhi_client, int index) mutex_init(&mhi_client->in_chan_lock); mutex_init(&mhi_client->out_chan_lock); - spin_lock_init(&mhi_client->wr_req_lock); + spin_lock_init(&mhi_client->req_lock); /* Init the completion event for AT ctrl read */ init_completion(&mhi_client->at_ctrl_read_done); @@ -1787,6 +1906,7 @@ static void mhi_uci_at_ctrl_client_cb(struct mhi_dev_client_cb_data *cb_data) { struct uci_client *client = cb_data->user_data; int rc; + struct mhi_req *ureq; uci_log(UCI_DBG_VERBOSE, " Rcvd MHI cb for channel %d, state %d\n", cb_data->channel, cb_data->ctrl_info); @@ -1813,10 +1933,17 @@ static void mhi_uci_at_ctrl_client_cb(struct mhi_dev_client_cb_data *cb_data) } destroy_workqueue(uci_ctxt.at_ctrl_wq); uci_ctxt.at_ctrl_wq = NULL; - if (!(client->f_flags & O_SYNC)) - kfree(client->wreqs); mhi_dev_close_channel(client->out_handle); mhi_dev_close_channel(client->in_handle); + + /* Add back reqs in in-use list, if any, to free list */ + while (!(list_empty(&client->in_use_list))) { + ureq = container_of(client->in_use_list.next, + struct mhi_req, list); + list_del_init(&ureq->list); + /* Add to in-use list */ + list_add_tail(&ureq->list, &client->req_list); + } } } diff --git a/drivers/platform/msm/sps/bam.c b/drivers/platform/msm/sps/bam.c index c2b2137185b8..be6e45604f8a 100644 --- a/drivers/platform/msm/sps/bam.c +++ b/drivers/platform/msm/sps/bam.c @@ -2174,13 +2174,16 @@ void print_bam_pipe_desc_fifo(void *virt_addr, u32 pipe_index, u32 option) u32 pipe = pipe_index; u32 desc_fifo_addr; u32 desc_fifo_size; - u32 *desc_fifo; + u32 __iomem *desc_fifo; int i; char desc_info[MAX_MSG_LEN]; + struct sps_bam *dev; if (base == NULL) return; + dev = to_sps_bam_dev(virt_addr); + desc_fifo_addr = bam_read_reg(base, P_DESC_FIFO_ADDR, pipe); desc_fifo_size = bam_read_reg_field(base, P_FIFO_SIZES, pipe, P_DESC_FIFO_SIZE); @@ -2202,7 +2205,14 @@ void print_bam_pipe_desc_fifo(void *virt_addr, u32 pipe_index, u32 option) "BAM_P_DESC_FIFO_SIZE: 0x%x (%d)\n\n", desc_fifo_addr, desc_fifo_size, desc_fifo_size); - desc_fifo = (u32 *) phys_to_virt(desc_fifo_addr); + if (dev->props.options & SPS_BAM_SMMU_EN) { + struct sps_pipe *pipe_indx = dev->pipes[pipe_index]; + + SPS_DUMP("%s", "SMMU is enabled\n"); + desc_fifo = pipe_indx->map->desc.base; + } else { + desc_fifo = (u32 __iomem *) phys_to_virt(desc_fifo_addr); + } if (option == 100) { SPS_DUMP("%s", diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c index 1220e123870d..ec3c42b3f220 100644 --- a/drivers/spi/spi-geni-qcom.c +++ b/drivers/spi/spi-geni-qcom.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1044,10 +1044,11 @@ static int spi_geni_unprepare_transfer_hardware(struct spi_master *spi) return 0; } -static void setup_fifo_xfer(struct spi_transfer *xfer, +static int setup_fifo_xfer(struct spi_transfer *xfer, struct spi_geni_master *mas, u16 mode, struct spi_master *spi) { + int ret = 0; u32 m_cmd = 0; u32 m_param = 0; u32 spi_tx_cfg = geni_read_reg(mas->base, SE_SPI_TRANS_CFG); @@ -1060,7 +1061,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer, /* Speed and bits per word can be overridden per transfer */ if (xfer->speed_hz != mas->cur_speed_hz) { - int ret = 0; u32 clk_sel = 0; u32 m_clk_cfg = 0; int idx = 0; @@ -1070,7 +1070,7 @@ static void setup_fifo_xfer(struct spi_transfer *xfer, if (ret) { dev_err(mas->dev, "%s:Err setting clks:%d\n", __func__, ret); - return; + return ret; } mas->cur_speed_hz = xfer->speed_hz; clk_sel |= (idx & CLK_SEL_MSK); @@ -1151,13 +1151,14 @@ static void setup_fifo_xfer(struct spi_transfer *xfer, __func__, trans_len, xfer->len, spi_tx_cfg, m_cmd, xfer->cs_change, mas->cur_xfer_mode); if ((m_cmd & SPI_RX_ONLY) && (mas->cur_xfer_mode == SE_DMA)) { - int ret = 0; - ret = geni_se_rx_dma_prep(mas->wrapper_dev, mas->base, xfer->rx_buf, xfer->len, &xfer->rx_dma); - if (ret) + if (ret) { GENI_SE_ERR(mas->ipc, true, mas->dev, "Failed to setup Rx dma %d\n", ret); + xfer->rx_dma = 0; + return ret; + } } if (m_cmd & SPI_TX_ONLY) { if (mas->cur_xfer_mode == FIFO_MODE) { @@ -1169,14 +1170,18 @@ static void setup_fifo_xfer(struct spi_transfer *xfer, ret = geni_se_tx_dma_prep(mas->wrapper_dev, mas->base, (void *)xfer->tx_buf, xfer->len, &xfer->tx_dma); - if (ret) + if (ret) { GENI_SE_ERR(mas->ipc, true, mas->dev, "Failed to setup tx dma %d\n", ret); + xfer->tx_dma = 0; + return ret; + } } } /* Ensure all writes are done before the WM interrupt */ mb(); + return ret; } static void handle_fifo_timeout(struct spi_geni_master *mas, @@ -1204,10 +1209,10 @@ static void handle_fifo_timeout(struct spi_geni_master *mas, "Failed to cancel/abort m_cmd\n"); } if (mas->cur_xfer_mode == SE_DMA) { - if (xfer->tx_buf) + if (xfer->tx_buf && xfer->tx_dma) geni_se_tx_dma_unprep(mas->wrapper_dev, xfer->tx_dma, xfer->len); - if (xfer->rx_buf) + if (xfer->rx_buf && xfer->rx_dma) geni_se_rx_dma_unprep(mas->wrapper_dev, xfer->rx_dma, xfer->len); } @@ -1235,7 +1240,14 @@ static int spi_geni_transfer_one(struct spi_master *spi, if (mas->cur_xfer_mode != GSI_DMA) { reinit_completion(&mas->xfer_done); - setup_fifo_xfer(xfer, mas, slv->mode, spi); + ret = setup_fifo_xfer(xfer, mas, slv->mode, spi); + if (ret) { + GENI_SE_ERR(mas->ipc, true, mas->dev, + "setup_fifo_xfer failed: %d\n", ret); + mas->cur_xfer = NULL; + goto err_fifo_geni_transfer_one; + } + if (spi->slave) spi->slave_state = true; mutex_unlock(&mas->spi_ssr.ssr_lock); @@ -1274,7 +1286,13 @@ static int spi_geni_transfer_one(struct spi_master *spi, reinit_completion(&mas->tx_cb); reinit_completion(&mas->rx_cb); - setup_gsi_xfer(xfer, mas, slv, spi); + ret = setup_gsi_xfer(xfer, mas, slv, spi); + if (ret) { + GENI_SE_ERR(mas->ipc, true, mas->dev, + "setup_gsi_xfer failed: %d\n", ret); + mas->cur_xfer = NULL; + goto err_gsi_geni_transfer_one; + } if ((mas->num_xfers >= NUM_SPI_XFER) || (list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))) { diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c index 58608651fbff..2b96e4057ab6 100644 --- a/drivers/usb/gadget/function/f_cdev.c +++ b/drivers/usb/gadget/function/f_cdev.c @@ -57,7 +57,7 @@ #define BRIDGE_RX_QUEUE_SIZE 8 #define BRIDGE_RX_BUF_SIZE 2048 #define BRIDGE_TX_QUEUE_SIZE 8 -#define BRIDGE_TX_BUF_SIZE (50 * 1024) +#define BRIDGE_TX_BUF_SIZE 2048 #define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */ #define GS_NOTIFY_MAXPACKET 10 /* notification + 2 bytes */ @@ -963,6 +963,9 @@ static void usb_cser_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_cdev *port = func_to_port(f); + /* Reset string id */ + cser_string_defs[0].id = 0; + usb_free_all_descriptors(f); usb_cser_free_req(port->port_usb.notify, port->port_usb.notify_req); } diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c index 0e0cba202662..ca172be4369b 100644 --- a/drivers/usb/gadget/function/f_gsi.c +++ b/drivers/usb/gadget/function/f_gsi.c @@ -3335,6 +3335,13 @@ static void gsi_unbind(struct usb_configuration *c, struct usb_function *f) drain_workqueue(gsi->d_port.ipa_usb_wq); ipa_usb_deinit_teth_prot((enum ipa_usb_teth_prot)gsi->prot_id); + /* Reset string ids */ + rndis_gsi_string_defs[0].id = 0; + ecm_gsi_string_defs[0].id = 0; + rmnet_gsi_string_defs[0].id = 0; + mbim_gsi_string_defs[0].id = 0; + qdss_gsi_string_defs[0].id = 0; + skip_ipa_dinit: if (gsi->prot_id == USB_PROT_RNDIS_IPA) { gsi->d_port.sm_state = STATE_UNINITIALIZED; diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c index afe2569193c7..6e54df08ced5 100644 --- a/drivers/usb/gadget/function/f_qdss.c +++ b/drivers/usb/gadget/function/f_qdss.c @@ -1,8 +1,7 @@ /* * f_qdss.c -- QDSS function Driver * - * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - + * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. @@ -423,11 +422,13 @@ static int qdss_bind(struct usb_configuration *c, struct usb_function *f) qdss_data_intf_desc.bInterfaceNumber = iface; qdss->data_iface_id = iface; - id = usb_string_id(c->cdev); - if (id < 0) - return id; - qdss_string_defs[QDSS_DATA_IDX].id = id; - qdss_data_intf_desc.iInterface = id; + if (!qdss_string_defs[QDSS_DATA_IDX].id) { + id = usb_string_id(c->cdev); + if (id < 0) + return id; + qdss_string_defs[QDSS_DATA_IDX].id = id; + qdss_data_intf_desc.iInterface = id; + } if (qdss->debug_inface_enabled) { /* Allocate ctrl I/F */ @@ -438,11 +439,14 @@ static int qdss_bind(struct usb_configuration *c, struct usb_function *f) } qdss_ctrl_intf_desc.bInterfaceNumber = iface; qdss->ctrl_iface_id = iface; - id = usb_string_id(c->cdev); - if (id < 0) - return id; - qdss_string_defs[QDSS_CTRL_IDX].id = id; - qdss_ctrl_intf_desc.iInterface = id; + + if (!qdss_string_defs[QDSS_CTRL_IDX].id) { + id = usb_string_id(c->cdev); + if (id < 0) + return id; + qdss_string_defs[QDSS_CTRL_IDX].id = id; + qdss_ctrl_intf_desc.iInterface = id; + } } /* for non-accelerated path keep tx fifo size 1k */ @@ -526,6 +530,10 @@ static void qdss_unbind(struct usb_configuration *c, struct usb_function *f) flush_workqueue(qdss->wq); + /* Reset string ids */ + qdss_string_defs[QDSS_DATA_IDX].id = 0; + qdss_string_defs[QDSS_CTRL_IDX].id = 0; + qdss->debug_inface_enabled = 0; clear_eps(f); diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 50bbe0c69dad..2c3e7b378901 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -1160,11 +1160,8 @@ static int block_operations(struct f2fs_sb_info *sbi) .nr_to_write = LONG_MAX, .for_reclaim = 0, }; - struct blk_plug plug; int err = 0, cnt = 0; - blk_start_plug(&plug); - retry_flush_quotas: f2fs_lock_all(sbi); if (__need_flush_quota(sbi)) { @@ -1192,7 +1189,7 @@ static int block_operations(struct f2fs_sb_info *sbi) f2fs_unlock_all(sbi); err = f2fs_sync_dirty_inodes(sbi, DIR_INODE); if (err) - goto out; + return err; cond_resched(); goto retry_flush_quotas; } @@ -1208,7 +1205,7 @@ static int block_operations(struct f2fs_sb_info *sbi) f2fs_unlock_all(sbi); err = f2fs_sync_inode_meta(sbi); if (err) - goto out; + return err; cond_resched(); goto retry_flush_quotas; } @@ -1224,7 +1221,7 @@ static int block_operations(struct f2fs_sb_info *sbi) if (err) { up_write(&sbi->node_change); f2fs_unlock_all(sbi); - goto out; + return err; } cond_resched(); goto retry_flush_nodes; @@ -1236,8 +1233,6 @@ static int block_operations(struct f2fs_sb_info *sbi) */ __prepare_cp_block(sbi); up_write(&sbi->node_change); -out: - blk_finish_plug(&plug); return err; } diff --git a/include/linux/ipa_wdi3.h b/include/linux/ipa_wdi3.h index bdbeb2ffd8e9..965b3d95b8f5 100644 --- a/include/linux/ipa_wdi3.h +++ b/include/linux/ipa_wdi3.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -89,6 +89,7 @@ struct ipa_wdi_hdr_info { * @is_meta_data_valid: if meta data is valid * @meta_data: meta data if any * @meta_data_mask: meta data mask + * @is_tx1_used: to indicate whether 2.4g or 5g iface */ struct ipa_wdi_reg_intf_in_params { const char *netdev_name; @@ -97,6 +98,7 @@ struct ipa_wdi_reg_intf_in_params { u8 is_meta_data_valid; u32 meta_data; u32 meta_data_mask; + u8 is_tx1_used; }; /** @@ -189,6 +191,9 @@ struct ipa_wdi_pipe_setup_info_smmu { * @tx_smmu: smmu parameters to connect TX pipe(from IPA to WLAN) * @rx: parameters to connect RX pipe(from WLAN to IPA) * @rx_smmu: smmu parameters to connect RX pipe(from WLAN to IPA) + * @is_tx1_used: to notify extra pipe required/not + * @tx1: parameters to connect TX1 pipe(from IPA to WLAN second pipe) + * @tx1_smmu: smmu parameters to connect TX1 pipe(from IPA to WLAN second pipe) */ struct ipa_wdi_conn_in_params { ipa_notify_cb notify; @@ -204,6 +209,11 @@ struct ipa_wdi_conn_in_params { struct ipa_wdi_pipe_setup_info rx; struct ipa_wdi_pipe_setup_info_smmu rx_smmu; } u_rx; + bool is_tx1_used; + union { + struct ipa_wdi_pipe_setup_info tx; + struct ipa_wdi_pipe_setup_info_smmu tx_smmu; + } u_tx1; }; /** @@ -211,10 +221,12 @@ struct ipa_wdi_conn_in_params { * to WLAN driver * @tx_uc_db_pa: physical address of IPA uC doorbell for TX * @rx_uc_db_pa: physical address of IPA uC doorbell for RX + * @tx1_uc_db_pa: physical address of IPA uC doorbell for TX1 */ struct ipa_wdi_conn_out_params { phys_addr_t tx_uc_db_pa; phys_addr_t rx_uc_db_pa; + phys_addr_t tx1_uc_db_pa; }; /** diff --git a/include/linux/msm_mhi_dev.h b/include/linux/msm_mhi_dev.h index f086e848327f..42eb9fa05405 100644 --- a/include/linux/msm_mhi_dev.h +++ b/include/linux/msm_mhi_dev.h @@ -73,6 +73,7 @@ struct mhi_req { struct list_head list; union mhi_dev_ring_element_type *el; void (*client_cb)(void *req); + bool is_stale; }; /* SW channel client list */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 2d036930a3cd..b5f3d24ddb25 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -150,6 +150,8 @@ #define PCI_VENDOR_ID_DYNALINK 0x0675 #define PCI_DEVICE_ID_DYNALINK_IS64PH 0x1702 +#define PCI_VENDOR_ID_UBIQUITI 0x0777 + #define PCI_VENDOR_ID_BERKOM 0x0871 #define PCI_DEVICE_ID_BERKOM_A1T 0xffa1 #define PCI_DEVICE_ID_BERKOM_T_CONCEPT 0xffa2 diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 428278a44c7d..88efacce5b5c 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2015,2020 The Linux Foundation. All rights reserved. * Copyright (C) 2015 Linaro Ltd. * * This program is free software; you can redistribute it and/or modify @@ -23,6 +23,21 @@ struct qcom_scm_hdcp_req { u32 val; }; +struct qcom_scm_vmperm { + int vmid; + int perm; +}; + +#define QCOM_SCM_VMID_HLOS 0x3 +#define QCOM_SCM_VMID_MSS_MSA 0xF +#define QCOM_SCM_VMID_WLAN 0x18 +#define QCOM_SCM_VMID_WLAN_CE 0x19 +#define QCOM_SCM_PERM_READ 0x4 +#define QCOM_SCM_PERM_WRITE 0x2 +#define QCOM_SCM_PERM_EXEC 0x1 +#define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE) +#define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC) + #if IS_ENABLED(CONFIG_QCOM_SCM) extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); @@ -37,6 +52,9 @@ extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size); extern int qcom_scm_pas_auth_and_reset(u32 peripheral); extern int qcom_scm_pas_shutdown(u32 peripheral); +extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, + unsigned int *src, struct qcom_scm_vmperm *newvm, + int dest_cnt); extern void qcom_scm_cpu_power_down(u32 flags); extern u32 qcom_scm_get_version(void); extern int qcom_scm_set_remote_state(u32 state, u32 id); diff --git a/include/uapi/linux/esoc_ctrl.h b/include/uapi/linux/esoc_ctrl.h index 631fa449c386..d1f644ace36a 100644 --- a/include/uapi/linux/esoc_ctrl.h +++ b/include/uapi/linux/esoc_ctrl.h @@ -44,6 +44,7 @@ enum esoc_evt { ESOC_CMD_ENG_OFF, ESOC_INVALID_STATE, ESOC_RETRY_PON_EVT, + ESOC_BOOT_STATE, }; enum esoc_cmd { diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h index 5f0fd5ebb94a..3a50f6ca1619 100644 --- a/include/uapi/sound/compress_offload.h +++ b/include/uapi/sound/compress_offload.h @@ -137,6 +137,7 @@ struct snd_compr_audio_info { #define SNDRV_COMPRESS_RENDER_MODE_AUDIO_MASTER 0 #define SNDRV_COMPRESS_RENDER_MODE_STC_MASTER 1 #define SNDRV_COMPRESS_RENDER_MODE_TTP 2 +#define SNDRV_COMPRESS_RENDER_MODE_TTP_PASS_THROUGH 3 #define SNDRV_COMPRESS_CLK_REC_MODE_NONE 0 #define SNDRV_COMPRESS_CLK_REC_MODE_AUTO 1