Skip to content

Commit

Permalink
Merge pull request #2153 from vknecht/aosp/LE.UM.2.3.2.r1.4-stability…
Browse files Browse the repository at this point in the history
…-fixes

[2.3.2.r1.4] Backport stability fixes from 7.1.r1 branch (v2)
  • Loading branch information
jerpelea committed Jan 7, 2020
2 parents 4a54b7d + 10a3efb commit 861f7a1
Show file tree
Hide file tree
Showing 20 changed files with 113 additions and 52 deletions.
14 changes: 7 additions & 7 deletions drivers/char/adsprpc.c
Expand Up @@ -2541,20 +2541,20 @@ static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl)
AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
if (err)
goto bail;
if (me->channel[fl->cid].spd[session].pdrcount !=
me->channel[fl->cid].spd[session].prevpdrcount) {
if (fastrpc_mmap_remove_ssr(fl))
pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
me->channel[fl->cid].spd[session].prevpdrcount =
me->channel[fl->cid].spd[session].pdrcount;
}
if (!me->channel[fl->cid].spd[session].ispdup) {
VERIFY(err, 0);
if (err) {
err = -ENOTCONN;
goto bail;
}
}
if (me->channel[fl->cid].spd[session].pdrcount !=
me->channel[fl->cid].spd[session].prevpdrcount) {
if (fastrpc_mmap_remove_ssr(fl))
pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
me->channel[fl->cid].spd[session].prevpdrcount =
me->channel[fl->cid].spd[session].pdrcount;
}
bail:
return err;
}
Expand Down
13 changes: 12 additions & 1 deletion drivers/clk/qcom/clk-rcg2.c
Expand Up @@ -263,6 +263,7 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
const struct freq_tbl *f_curr;
u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
unsigned long recalc_rate;

if (rcg->flags & DFS_ENABLE_RCG)
return rcg->current_freq;
Expand Down Expand Up @@ -300,7 +301,17 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
hid_div &= mask;
}

return clk_rcg2_calc_rate(parent_rate, m, n, mode, hid_div);

recalc_rate = clk_rcg2_calc_rate(parent_rate, m, n, mode, hid_div);

/*
* Check the case when the RCG has been initialized to a non-CXO
* frequency.
*/
if (rcg->enable_safe_config && !rcg->current_freq)
rcg->current_freq = recalc_rate;

return recalc_rate;
}

static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
Expand Down
4 changes: 3 additions & 1 deletion drivers/crypto/msm/qce50.c
@@ -1,7 +1,7 @@
/*
* QTI Crypto Engine driver.
*
* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
Expand Down Expand Up @@ -6105,6 +6105,7 @@ void *qce_open(struct platform_device *pdev, int *rc)
dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
pce_dev->coh_vmem, pce_dev->coh_pmem);
err_iobase:
arm_iommu_detach_device(pce_dev->pdev);
if (pce_dev->enable_s1_smmu)
qce_iommu_release_iomapping(pce_dev);

Expand Down Expand Up @@ -6137,6 +6138,7 @@ int qce_close(void *handle)
kfree(pce_dev->dummyreq_in_buf);
kfree(pce_dev->iovec_vmem);

arm_iommu_detach_device(pce_dev->pdev);
if (pce_dev->enable_s1_smmu)
qce_iommu_release_iomapping(pce_dev);

Expand Down
10 changes: 7 additions & 3 deletions drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
Expand Up @@ -121,7 +121,7 @@ static ssize_t debugfs_state_info_read(struct file *file,
dsi_ctrl->clk_freq.pix_clk_rate,
dsi_ctrl->clk_freq.esc_clk_rate);

/* TODO: make sure that this does not exceed 4K */
len = min_t(size_t, len, SZ_4K);
if (copy_to_user(buff, buf, len)) {
kfree(buf);
return -EFAULT;
Expand Down Expand Up @@ -176,8 +176,7 @@ static ssize_t debugfs_reg_dump_read(struct file *file,
return rc;
}


/* TODO: make sure that this does not exceed 4K */
len = min_t(size_t, len, SZ_4K);
if (copy_to_user(buff, buf, len)) {
kfree(buf);
return -EFAULT;
Expand Down Expand Up @@ -583,20 +582,23 @@ static int dsi_ctrl_clocks_init(struct platform_device *pdev,
hs_link->byte_clk = devm_clk_get(&pdev->dev, "byte_clk");
if (IS_ERR(hs_link->byte_clk)) {
rc = PTR_ERR(hs_link->byte_clk);
hs_link->byte_clk = NULL;
pr_err("failed to get byte_clk, rc=%d\n", rc);
goto fail;
}

hs_link->pixel_clk = devm_clk_get(&pdev->dev, "pixel_clk");
if (IS_ERR(hs_link->pixel_clk)) {
rc = PTR_ERR(hs_link->pixel_clk);
hs_link->pixel_clk = NULL;
pr_err("failed to get pixel_clk, rc=%d\n", rc);
goto fail;
}

lp_link->esc_clk = devm_clk_get(&pdev->dev, "esc_clk");
if (IS_ERR(lp_link->esc_clk)) {
rc = PTR_ERR(lp_link->esc_clk);
lp_link->esc_clk = NULL;
pr_err("failed to get esc_clk, rc=%d\n", rc);
goto fail;
}
Expand All @@ -610,13 +612,15 @@ static int dsi_ctrl_clocks_init(struct platform_device *pdev,
rcg->byte_clk = devm_clk_get(&pdev->dev, "byte_clk_rcg");
if (IS_ERR(rcg->byte_clk)) {
rc = PTR_ERR(rcg->byte_clk);
rcg->byte_clk = NULL;
pr_err("failed to get byte_clk_rcg, rc=%d\n", rc);
goto fail;
}

rcg->pixel_clk = devm_clk_get(&pdev->dev, "pixel_clk_rcg");
if (IS_ERR(rcg->pixel_clk)) {
rc = PTR_ERR(rcg->pixel_clk);
rcg->pixel_clk = NULL;
pr_err("failed to get pixel_clk_rcg, rc=%d\n", rc);
goto fail;
}
Expand Down
4 changes: 2 additions & 2 deletions drivers/gpu/drm/msm/dsi-staging/dsi_display.c
Expand Up @@ -1318,7 +1318,7 @@ static ssize_t debugfs_esd_trigger_check(struct file *file,
atomic_read(&display->panel->esd_recovery_pending))
return user_len;

buf = kzalloc(user_len, GFP_KERNEL);
buf = kzalloc(user_len + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;

Expand Down Expand Up @@ -1372,7 +1372,7 @@ static ssize_t debugfs_alter_esd_check_mode(struct file *file,
if (*ppos)
return 0;

buf = kzalloc(len, GFP_KERNEL);
buf = kzalloc(len + 1, GFP_KERNEL);
if (ZERO_OR_NULL_PTR(buf))
return -ENOMEM;

Expand Down
6 changes: 4 additions & 2 deletions drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
Expand Up @@ -3149,9 +3149,11 @@ struct dsi_panel *dsi_panel_get(struct device *parent,
pr_err("failed to parse panel gpios, rc=%d\n", rc);

rc = dsi_panel_parse_bl_config(panel, of_node);
if (rc)
if (rc) {
pr_err("failed to parse backlight config, rc=%d\n", rc);

if (rc == -EPROBE_DEFER)
goto error;
}

rc = dsi_panel_parse_misc_features(panel, of_node);
if (rc)
Expand Down
8 changes: 8 additions & 0 deletions drivers/gpu/msm/kgsl_pwrctrl.c
Expand Up @@ -183,6 +183,12 @@ static unsigned int _adjust_pwrlevel(struct kgsl_pwrctrl *pwr, int level,
pwr->thermal_pwrlevel_floor,
pwr->min_pwrlevel);

/* Ensure that max/min pwrlevels are within thermal max/min limits */
max_pwrlevel = min_t(unsigned int, max_pwrlevel,
pwr->thermal_pwrlevel_floor);
min_pwrlevel = max_t(unsigned int, min_pwrlevel,
pwr->thermal_pwrlevel);

switch (pwrc->type) {
case KGSL_CONSTRAINT_PWRLEVEL: {
switch (pwrc->sub_type) {
Expand Down Expand Up @@ -2834,6 +2840,7 @@ _aware(struct kgsl_device *device)
case KGSL_STATE_RESET:
if (!kgsl_gmu_isenabled(device))
break;
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
status = gmu_start(device);
break;
case KGSL_STATE_INIT:
Expand Down Expand Up @@ -2892,6 +2899,7 @@ _aware(struct kgsl_device *device)
* to make sure next attempt to wake up
* GMU/GPU is indeed a fresh start.
*/
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
gmu_suspend(device);
gmu->unrecovered = true;
kgsl_pwrctrl_set_state(device, state);
Expand Down
1 change: 0 additions & 1 deletion drivers/iommu/dma-mapping-fast.c
Expand Up @@ -175,7 +175,6 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,
nbits, align);
if (unlikely(bit > mapping->num_4k_pages)) {
/* try wrapping */
mapping->next_start = 0; /* TODO: SHOULD I REALLY DO THIS?!? */
bit = bitmap_find_next_zero_area(
mapping->bitmap, mapping->num_4k_pages, 0, nbits,
align);
Expand Down
7 changes: 4 additions & 3 deletions drivers/platform/msm/ipa/ipa_v3/ipa_client.c
Expand Up @@ -1542,6 +1542,9 @@ int ipa3_release_gsi_channel(u32 clnt_hdl)
if (!ep->keep_ipa_awake)
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));

/* Set the disconnect in progress flag to avoid calling cb.*/
atomic_set(&ep->disconnect_in_progress, 1);

gsi_res = gsi_dealloc_channel(ep->gsi_chan_hdl);
if (gsi_res != GSI_STATUS_SUCCESS) {
IPAERR("Error deallocating channel: %d\n", gsi_res);
Expand Down Expand Up @@ -1844,9 +1847,7 @@ int ipa3_clear_endpoint_delay(u32 clnt_hdl)
/* Set disconnect in progress flag so further flow control events are
* not honored.
*/
spin_lock(&ipa3_ctx->disconnect_lock);
ep->disconnect_in_progress = true;
spin_unlock(&ipa3_ctx->disconnect_lock);
atomic_set(&ep->disconnect_in_progress, 1);

/* If flow is disabled at this point, restore the ep state.*/
ep_ctrl.ipa_ep_delay = false;
Expand Down
14 changes: 8 additions & 6 deletions drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
Expand Up @@ -2603,11 +2603,8 @@ void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
metadata = status.metadata;
ucp = status.ucp;
ep = &ipa3_ctx->ep[src_pipe];
if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes ||
!ep->valid ||
!ep->client_notify)) {
IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
src_pipe, ep->valid, ep->client_notify);
if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes)) {
IPAERR("drop pipe=%d\n", src_pipe);
dev_kfree_skb_any(rx_skb);
return;
}
Expand All @@ -2629,7 +2626,12 @@ void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
metadata, *(u32 *)rx_skb->cb);
IPADBG_LOW("ucp: %d\n", *(u8 *)(rx_skb->cb + 4));

ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
if (likely((!atomic_read(&ep->disconnect_in_progress)) &&
ep->valid && ep->client_notify))
ep->client_notify(ep->priv, IPA_RECEIVE,
(unsigned long)(rx_skb));
else
dev_kfree_skb_any(rx_skb);
}

static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
Expand Down
8 changes: 7 additions & 1 deletion drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
Expand Up @@ -117,6 +117,7 @@ static int ipa3_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr,
struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
{
u32 hdr_base_addr;
gfp_t flag = GFP_KERNEL;

mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4;

Expand All @@ -125,9 +126,14 @@ static int ipa3_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr,

IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end);

alloc:
mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
&mem->phys_base, GFP_KERNEL);
&mem->phys_base, flag);
if (!mem->base) {
if (flag == GFP_KERNEL) {
flag = GFP_ATOMIC;
goto alloc;
}
IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
return -ENOMEM;
}
Expand Down
2 changes: 1 addition & 1 deletion drivers/platform/msm/ipa/ipa_v3/ipa_i.h
Expand Up @@ -770,7 +770,7 @@ struct ipa3_ep_context {
bool keep_ipa_awake;
struct ipa3_wlan_stats wstats;
u32 uc_offload_state;
bool disconnect_in_progress;
atomic_t disconnect_in_progress;
u32 qmi_request_sent;
bool napi_enabled;
u32 eot_in_poll_err;
Expand Down
7 changes: 7 additions & 0 deletions drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
Expand Up @@ -1182,10 +1182,17 @@ static void ipa3_q6_clnt_svc_arrive(struct work_struct *work)
ipa_master_driver_init_complt_ind_msg_v01));
ind.master_driver_init_status.result =
IPA_QMI_RESULT_SUCCESS_V01;

if (unlikely(!ipa3_svc_handle)) {
IPAWANERR("Invalid svc handle.Ignore sending ind.");
return;
}

rc = qmi_send_ind(ipa3_svc_handle, curr_conn,
&ipa3_master_driver_complete_indication_desc,
&ind,
sizeof(ind));

IPAWANDBG("ipa_qmi_service_client good\n");
} else {
IPAWANERR("not send indication (%d)\n",
Expand Down
3 changes: 3 additions & 0 deletions drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
Expand Up @@ -549,6 +549,9 @@ int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
return -EFAULT;
}

atomic_set(&ep_ul->disconnect_in_progress, 1);
atomic_set(&ep_dl->disconnect_in_progress, 1);

if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
cmd.size = sizeof(*cmd_data_v4_0);
else
Expand Down
24 changes: 20 additions & 4 deletions drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
Expand Up @@ -3345,6 +3345,7 @@ static int ipa_fltrt_alloc_lcl_bdy(
struct ipahal_fltrt_alloc_imgs_params *params)
{
struct ipahal_fltrt_obj *obj;
gfp_t flag = GFP_KERNEL;

obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];

Expand Down Expand Up @@ -3377,10 +3378,15 @@ static int ipa_fltrt_alloc_lcl_bdy(
IPAHAL_DBG_LOW("nhash lcl tbl bdy total h/w size = %u\n",
params->nhash_bdy.size);

alloc1:
params->nhash_bdy.base = dma_alloc_coherent(
ipahal_ctx->ipa_pdev, params->nhash_bdy.size,
&params->nhash_bdy.phys_base, GFP_KERNEL);
&params->nhash_bdy.phys_base, flag);
if (!params->nhash_bdy.base) {
if (flag == GFP_KERNEL) {
flag = GFP_ATOMIC;
goto alloc1;
}
IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
params->nhash_bdy.size);
return -ENOMEM;
Expand Down Expand Up @@ -3408,10 +3414,15 @@ static int ipa_fltrt_alloc_lcl_bdy(
IPAHAL_DBG_LOW("hash lcl tbl bdy total h/w size = %u\n",
params->hash_bdy.size);

alloc2:
params->hash_bdy.base = dma_alloc_coherent(
ipahal_ctx->ipa_pdev, params->hash_bdy.size,
&params->hash_bdy.phys_base, GFP_KERNEL);
&params->hash_bdy.phys_base, flag);
if (!params->hash_bdy.base) {
if (flag == GFP_KERNEL) {
flag = GFP_ATOMIC;
goto alloc2;
}
IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
params->hash_bdy.size);
goto hash_bdy_fail;
Expand Down Expand Up @@ -3479,6 +3490,7 @@ int ipahal_fltrt_allocate_hw_tbl_imgs(
int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem)
{
struct ipahal_fltrt_obj *obj;
gfp_t flag = GFP_KERNEL;

IPAHAL_DBG_LOW("Entry\n");

Expand All @@ -3496,10 +3508,14 @@ int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem)

/* add word for rule-set terminator */
tbl_mem->size += obj->tbl_width;

alloc:
tbl_mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, tbl_mem->size,
&tbl_mem->phys_base, GFP_KERNEL);
&tbl_mem->phys_base, flag);
if (!tbl_mem->base) {
if (flag == GFP_KERNEL) {
flag = GFP_ATOMIC;
goto alloc;
}
IPAHAL_ERR("fail to alloc DMA buf of size %d\n",
tbl_mem->size);
return -ENOMEM;
Expand Down
4 changes: 2 additions & 2 deletions drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
Expand Up @@ -2597,8 +2597,8 @@ u32 ipahal_aggr_get_max_pkt_limit(void)
void ipahal_get_aggr_force_close_valmask(int ep_idx,
struct ipahal_reg_valmask *valmask)
{
u32 shft;
u32 bmsk;
u32 shft = 0;
u32 bmsk = 0;

if (!valmask) {
IPAHAL_ERR("Input error\n");
Expand Down

0 comments on commit 861f7a1

Please sign in to comment.