Skip to content
Permalink
Browse files

Cocinelle: kzalloc-simple: Testing

Testing all the fixes!

Signed-off-by: Himanshu Jha <himanshujha199640@gmail.com>
  • Loading branch information...
himanshujha199640 committed Dec 24, 2017
1 parent 11f5333 commit 24c13fe24c21a5997cbdb099d1da9f5e8e23c100
Showing with 229 additions and 377 deletions.
  1. +2 −6 arch/powerpc/kvm/book3s_hv.c
  2. +3 −5 arch/powerpc/platforms/pasemi/dma_lib.c
  3. +4 −5 arch/powerpc/sysdev/fsl_rmu.c
  4. +1 −3 arch/sh/mm/consistent.c
  5. +1 −2 arch/sparc/kernel/iommu.c
  6. +3 −5 drivers/crypto/amcc/crypto4xx_core.c
  7. +3 −4 drivers/crypto/ixp4xx_crypto.c
  8. +3 −4 drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
  9. +3 −3 drivers/gpu/drm/drm_pci.c
  10. +1 −2 drivers/gpu/drm/virtio/virtgpu_vq.c
  11. +4 −5 drivers/infiniband/hw/bnxt_re/qplib_res.c
  12. +3 −4 drivers/infiniband/hw/cxgb3/cxio_hal.c
  13. +3 −3 drivers/infiniband/hw/mthca/mthca_memfree.c
  14. +7 −12 drivers/infiniband/hw/ocrdma/ocrdma_hw.c
  15. +2 −4 drivers/infiniband/hw/ocrdma/ocrdma_stats.c
  16. +3 −5 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
  17. +2 −3 drivers/infiniband/hw/qedr/verbs.c
  18. +2 −3 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
  19. +1 −3 drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
  20. +2 −4 drivers/net/ethernet/broadcom/bcm63xx_enet.c
  21. +2 −3 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
  22. +2 −5 drivers/net/ethernet/cavium/liquidio/octeon_device.c
  23. +1 −3 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
  24. +2 −3 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
  25. +2 −3 drivers/net/ethernet/intel/ixgb/ixgb_main.c
  26. +2 −5 drivers/net/ethernet/mediatek/mtk_eth_soc.c
  27. +2 −4 drivers/net/ethernet/mellanox/mlxsw/pci.c
  28. +5 −7 drivers/net/ethernet/qlogic/qed/qed_cxt.c
  29. +1 −2 drivers/net/ethernet/qlogic/qed/qed_l2.c
  30. +3 −7 drivers/net/wan/fsl_ucc_hdlc.c
  31. +1 −3 drivers/net/wireless/ath/ath10k/wmi.c
  32. +2 −4 drivers/net/wireless/ath/wcn36xx/dxe.c
  33. +8 −10 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
  34. +1 −2 drivers/scsi/bfa/bfad.c
  35. +2 −6 drivers/scsi/bfa/bfad_debugfs.c
  36. +25 −35 drivers/scsi/bnx2fc/bnx2fc_hwi.c
  37. +22 −29 drivers/scsi/bnx2fc/bnx2fc_tgt.c
  38. +6 −8 drivers/scsi/bnx2i/bnx2i_hwi.c
  39. +8 −11 drivers/scsi/dpt_i2o.c
  40. +2 −8 drivers/scsi/fnic/fnic_debugfs.c
  41. +2 −7 drivers/scsi/fnic/fnic_trace.c
  42. +2 −4 drivers/scsi/hisi_sas/hisi_sas_main.c
  43. +6 −13 drivers/scsi/megaraid/megaraid_sas_base.c
  44. +2 −3 drivers/scsi/megaraid/megaraid_sas_fusion.c
  45. +3 −3 drivers/scsi/mpt3sas/mpt3sas_base.c
  46. +11 −16 drivers/scsi/mvsas/mv_init.c
  47. +3 −6 drivers/scsi/pmcraid.c
  48. +15 −27 drivers/scsi/qedi/qedi_main.c
  49. +2 −3 drivers/scsi/qla2xxx/qla_attr.c
  50. +3 −6 drivers/scsi/qla2xxx/qla_bsg.c
  51. +1 −4 drivers/scsi/qla2xxx/tcm_qla2xxx.c
  52. +2 −3 drivers/scsi/qla4xxx/ql4_init.c
  53. +9 −12 drivers/scsi/qla4xxx/ql4_mbx.c
  54. +2 −3 drivers/scsi/qla4xxx/ql4_nx.c
  55. +5 −7 drivers/scsi/qla4xxx/ql4_os.c
  56. +2 −4 drivers/scsi/scsi_debug.c
  57. +1 −2 drivers/scsi/snic/snic_trc.c
  58. +3 −4 drivers/usb/host/uhci-hcd.c
  59. +2 −5 drivers/usb/host/xhci-mem.c
  60. +1 −2 drivers/video/fbdev/auo_k190x.c
@@ -4381,15 +4381,11 @@ static int kvm_init_subcore_bitmap(void)
if (paca[first_cpu].sibling_subcore_state)
continue;

sibling_subcore_state =
kmalloc_node(sizeof(struct sibling_subcore_state),
GFP_KERNEL, node);
sibling_subcore_state = kzalloc_node(sizeof(struct sibling_subcore_state),
GFP_KERNEL, node);
if (!sibling_subcore_state)
return -ENOMEM;

memset(sibling_subcore_state, 0,
sizeof(struct sibling_subcore_state));

for (j = 0; j < threads_per_core; j++) {
int cpu = first_cpu + j;

@@ -255,15 +255,13 @@ int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size)

chan->ring_size = ring_size;

chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev,
ring_size * sizeof(u64),
&chan->ring_dma, GFP_KERNEL);
chan->ring_virt = dma_zalloc_coherent(&dma_pdev->dev,
ring_size * sizeof(u64),
&chan->ring_dma, GFP_KERNEL);

if (!chan->ring_virt)
return -ENOMEM;

memset(chan->ring_virt, 0, ring_size * sizeof(u64));

return 0;
}
EXPORT_SYMBOL(pasemi_dma_alloc_ring);
@@ -756,15 +756,14 @@ fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
}

/* Initialize outbound message descriptor ring */
rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
&rmu->msg_tx_ring.phys, GFP_KERNEL);
rmu->msg_tx_ring.virt = dma_zalloc_coherent(priv->dev,
rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
&rmu->msg_tx_ring.phys,
GFP_KERNEL);
if (!rmu->msg_tx_ring.virt) {
rc = -ENOMEM;
goto out_dma;
}
memset(rmu->msg_tx_ring.virt, 0,
rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE);
rmu->msg_tx_ring.tx_slot = 0;

/* Point dequeue/enqueue pointers at first entry in ring */
@@ -143,14 +143,12 @@ int __init platform_resource_setup_memory(struct platform_device *pdev,
if (!memsize)
return 0;

buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL);
buf = dma_zalloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL);
if (!buf) {
pr_warning("%s: unable to allocate memory\n", name);
return -ENOMEM;
}

memset(buf, 0, memsize);

r->flags = IORESOURCE_MEM;
r->start = dma_handle;
r->end = r->start + memsize - 1;
@@ -108,10 +108,9 @@ int iommu_table_init(struct iommu *iommu, int tsbsize,
/* Allocate and initialize the free area map. */
sz = num_tsb_entries / 8;
sz = (sz + 7UL) & ~7UL;
iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node);
if (!iommu->tbl.map)
return -ENOMEM;
memset(iommu->tbl.map, 0, sz);

iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
(tlb_type != hypervisor ? iommu_flushall : NULL),
@@ -275,14 +275,12 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
*/
static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
{
dev->gdr = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct ce_gd) * PPC4XX_NUM_GD,
&dev->gdr_pa, GFP_ATOMIC);
dev->gdr = dma_zalloc_coherent(dev->core_dev->device,
sizeof(struct ce_gd) * PPC4XX_NUM_GD,
&dev->gdr_pa, GFP_ATOMIC);
if (!dev->gdr)
return -ENOMEM;

memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);

return 0;
}

@@ -260,12 +260,11 @@ static int setup_crypt_desc(void)
{
struct device *dev = &pdev->dev;
BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
crypt_virt = dma_alloc_coherent(dev,
NPE_QLEN * sizeof(struct crypt_ctl),
&crypt_phys, GFP_ATOMIC);
crypt_virt = dma_zalloc_coherent(dev,
NPE_QLEN * sizeof(struct crypt_ctl),
&crypt_phys, GFP_ATOMIC);
if (!crypt_virt)
return -ENOMEM;
memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
return 0;
}

@@ -81,12 +81,11 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
/* add 8 bytes for the rptr/wptr shadows and
* add them to the end of the ring allocation.
*/
adev->irq.ih.ring = pci_alloc_consistent(adev->pdev,
adev->irq.ih.ring_size + 8,
&adev->irq.ih.rb_dma_addr);
adev->irq.ih.ring = pci_zalloc_consistent(adev->pdev,
adev->irq.ih.ring_size + 8,
&adev->irq.ih.rb_dma_addr);
if (adev->irq.ih.ring == NULL)
return -ENOMEM;
memset((void *)adev->irq.ih.ring, 0, adev->irq.ih.ring_size + 8);
adev->irq.ih.wptr_offs = (adev->irq.ih.ring_size / 4) + 0;
adev->irq.ih.rptr_offs = (adev->irq.ih.ring_size / 4) + 1;
}
@@ -61,15 +61,15 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
return NULL;

dmah->size = size;
dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
dmah->vaddr = dma_zalloc_coherent(&dev->pdev->dev, size,
&dmah->busaddr,
GFP_KERNEL | __GFP_COMP);

if (dmah->vaddr == NULL) {
kfree(dmah);
return NULL;
}

memset(dmah->vaddr, 0, size);

/* XXX - Is virt_to_page() legal for consistent mem? */
/* Reserve */
for (addr = (unsigned long)dmah->vaddr, sz = size;
@@ -96,10 +96,9 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
{
struct virtio_gpu_vbuffer *vbuf;

vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL);
vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
if (!vbuf)
return ERR_PTR(-ENOMEM);
memset(vbuf, 0, VBUFFER_SIZE);

BUG_ON(size > MAX_INLINE_CMD_SIZE);
vbuf->buf = (void *)vbuf + sizeof(*vbuf);
@@ -104,13 +104,12 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,

if (!sghead) {
for (i = 0; i < pages; i++) {
pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
pbl->pg_size,
&pbl->pg_map_arr[i],
GFP_KERNEL);
pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev,
pbl->pg_size,
&pbl->pg_map_arr[i],
GFP_KERNEL);
if (!pbl->pg_arr[i])
goto fail;
memset(pbl->pg_arr[i], 0, pbl->pg_size);
pbl->pg_count++;
}
} else {
@@ -291,13 +291,12 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
if (!wq->sq)
goto err3;

wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
depth * sizeof(union t3_wr),
&(wq->dma_addr), GFP_KERNEL);
wq->queue = dma_zalloc_coherent(&(rdev_p->rnic_info.pdev->dev),
depth * sizeof(union t3_wr),
&(wq->dma_addr), GFP_KERNEL);
if (!wq->queue)
goto err4;

memset(wq->queue, 0, depth * sizeof(union t3_wr));
dma_unmap_addr_set(wq, mapping, wq->dma_addr);
wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
if (!kernel_domain)
@@ -623,13 +623,13 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
page = dev->db_tab->page + end;

alloc:
page->db_rec = dma_alloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
&page->mapping, GFP_KERNEL);
page->db_rec = dma_zalloc_coherent(&dev->pdev->dev,
MTHCA_ICM_PAGE_SIZE,
&page->mapping, GFP_KERNEL);
if (!page->db_rec) {
ret = -ENOMEM;
goto out;
}
memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE);

ret = mthca_MAP_ICM_page(dev, page->mapping,
mthca_uarc_virt(dev, &dev->driver_uar, i));
@@ -380,11 +380,10 @@ static int ocrdma_alloc_q(struct ocrdma_dev *dev,
q->len = len;
q->entry_size = entry_size;
q->size = len * entry_size;
q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size,
&q->dma, GFP_KERNEL);
q->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, q->size,
&q->dma, GFP_KERNEL);
if (!q->va)
return -ENOMEM;
memset(q->va, 0, q->size);
return 0;
}

@@ -1819,12 +1818,11 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
return -ENOMEM;
ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
cq->va = dma_zalloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
if (!cq->va) {
status = -ENOMEM;
goto mem_err;
}
memset(cq->va, 0, cq->len);
page_size = cq->len / hw_pages;
cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
@@ -2212,10 +2210,9 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
qp->sq.max_cnt = max_wqe_allocated;
len = (hw_pages * hw_page_size);

qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
qp->sq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
if (!qp->sq.va)
return -EINVAL;
memset(qp->sq.va, 0, len);
qp->sq.len = len;
qp->sq.pa = pa;
qp->sq.entry_size = dev->attr.wqe_size;
@@ -2263,10 +2260,9 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
qp->rq.max_cnt = max_rqe_allocated;
len = (hw_pages * hw_page_size);

qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
qp->rq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
if (!qp->rq.va)
return -ENOMEM;
memset(qp->rq.va, 0, len);
qp->rq.pa = pa;
qp->rq.len = len;
qp->rq.entry_size = dev->attr.rqe_size;
@@ -2320,11 +2316,10 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
if (dev->attr.ird == 0)
return 0;

qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len,
&pa, GFP_KERNEL);
qp->ird_q_va = dma_zalloc_coherent(&pdev->dev, ird_q_len, &pa,
GFP_KERNEL);
if (!qp->ird_q_va)
return -ENOMEM;
memset(qp->ird_q_va, 0, ird_q_len);
ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
pa, ird_page_size);
for (; i < ird_q_len / dev->attr.rqe_size; i++) {
@@ -73,15 +73,13 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
sizeof(struct ocrdma_rdma_stats_resp));

mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
&mem->pa, GFP_KERNEL);
mem->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, mem->size,
&mem->pa, GFP_KERNEL);
if (!mem->va) {
pr_err("%s: stats mbox allocation failed\n", __func__);
return false;
}

memset(mem->va, 0, mem->size);

/* Alloc debugfs mem */
mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL);
if (!mem->debugfs_mem)
@@ -550,13 +550,12 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
INIT_LIST_HEAD(&ctx->mm_head);
mutex_init(&ctx->mm_list_lock);

ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
&ctx->ah_tbl.pa, GFP_KERNEL);
ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len,
&ctx->ah_tbl.pa, GFP_KERNEL);
if (!ctx->ah_tbl.va) {
kfree(ctx);
return ERR_PTR(-ENOMEM);
}
memset(ctx->ah_tbl.va, 0, map_len);
ctx->ah_tbl.len = map_len;

memset(&resp, 0, sizeof(resp));
@@ -885,13 +884,12 @@ static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
return -ENOMEM;

for (i = 0; i < mr->num_pbls; i++) {
va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
if (!va) {
ocrdma_free_mr_pbl_tbl(dev, mr);
status = -ENOMEM;
break;
}
memset(va, 0, dma_len);
mr->pbl_table[i].va = va;
mr->pbl_table[i].pa = pa;
}
@@ -604,12 +604,11 @@ static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
return ERR_PTR(-ENOMEM);

for (i = 0; i < pbl_info->num_pbls; i++) {
va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
&pa, flags);
va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
flags);
if (!va)
goto err;

memset(va, 0, pbl_info->pbl_size);
pbl_table[i].va = va;
pbl_table[i].pa = pa;
}
@@ -885,16 +885,15 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "device version %d, driver version %d\n",
dev->dsr_version, PVRDMA_VERSION);

dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
&dev->dsrbase, GFP_KERNEL);
dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr),
&dev->dsrbase, GFP_KERNEL);
if (!dev->dsr) {
dev_err(&pdev->dev, "failed to allocate shared region\n");
ret = -ENOMEM;
goto err_uar_unmap;
}

/* Setup the shared region */
memset(dev->dsr, 0, sizeof(*dev->dsr));
dev->dsr->driver_version = PVRDMA_VERSION;
dev->dsr->gos_info.gos_bits = sizeof(void *) == 4 ?
PVRDMA_GOS_BITS_32 :
@@ -49,16 +49,14 @@ int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data,
struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data;
struct device *dev = &ctx->dev->plat_dev->dev;

mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
mem->va = dma_zalloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);

if (!mem->va) {
mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev),
size);
return -ENOMEM;
}

memset(mem->va, 0, size);

mtk_v4l2_debug(3, "[%d] - va = %p", ctx->id, mem->va);
mtk_v4l2_debug(3, "[%d] - dma = 0x%lx", ctx->id,
(unsigned long)mem->dma_addr);

0 comments on commit 24c13fe

Please sign in to comment.
You can’t perform that action at this time.