Skip to content
Permalink
Browse files
scsi: ibmvscsi: Use dma_alloc_noncoherent() instead of get_zeroed_pag…
…e/dma_map_single()

Replacing get_zeroed_page/free_page/dma_map_single/dma_unmap_single()
with dma_alloc_noncoherent/dma_free_noncoherent() helps to reduce
code size, and simplify the code, and the hardware can keeep DMA
coherent itself.

Signed-off-by: Cai Huoqing <caihuoqing@baidu.com>
  • Loading branch information
Cai Huoqing authored and intel-lab-lkp committed Oct 12, 2021
1 parent f9473a6 commit ff4f34f9b1f3a0f14ed7472b8f4da0e12dc63d56
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 32 deletions.
@@ -869,8 +869,8 @@ static void ibmvfc_free_queue(struct ibmvfc_host *vhost,
{
struct device *dev = vhost->dev;

dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
free_page((unsigned long)queue->msgs.handle);
dma_free_noncoherent(dev, PAGE_SIZE, queue->msgs.handle,
queue->msg_token, DMA_BIDIRECTIONAL);
queue->msgs.handle = NULL;

ibmvfc_free_event_pool(vhost, queue);
@@ -5663,19 +5663,11 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
return -ENOMEM;
}

queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL);
queue->msgs.handle = dma_alloc_noncoherent(dev, PAGE_SIZE, &queue->msg_token,
DMA_BIDIRECTIONAL, GFP_KERNEL);
if (!queue->msgs.handle)
return -ENOMEM;

queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE,
DMA_BIDIRECTIONAL);

if (dma_mapping_error(dev, queue->msg_token)) {
free_page((unsigned long)queue->msgs.handle);
queue->msgs.handle = NULL;
return -ENOMEM;
}

queue->cur = 0;
queue->fmt = fmt;
queue->size = PAGE_SIZE / fmt_size;
@@ -151,10 +151,8 @@ static void ibmvscsi_release_crq_queue(struct crq_queue *queue,
msleep(100);
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
dma_unmap_single(hostdata->dev,
queue->msg_token,
queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
free_page((unsigned long)queue->msgs);
dma_free_noncoherent(hostdata->dev, PAGE_SIZE,
queue->msgs, queue->msg_token, DMA_BIDIRECTIONAL);
}

/**
@@ -331,18 +329,12 @@ static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
int retrc;
struct vio_dev *vdev = to_vio_dev(hostdata->dev);

queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);

if (!queue->msgs)
goto malloc_failed;
queue->size = PAGE_SIZE / sizeof(*queue->msgs);

queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
queue->size * sizeof(*queue->msgs),
DMA_BIDIRECTIONAL);

if (dma_mapping_error(hostdata->dev, queue->msg_token))
goto map_failed;
queue->msgs = dma_alloc_noncoherent(hostdata->dev,
PAGE_SIZE, &queue->msg_token,
DMA_BIDIRECTIONAL, GFP_KERNEL);
if (!queue->msg)
goto malloc_failed;

gather_partition_info();
set_adapter_info(hostdata);
@@ -395,11 +387,8 @@ static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
reg_crq_failed:
dma_unmap_single(hostdata->dev,
queue->msg_token,
queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
map_failed:
free_page((unsigned long)queue->msgs);
dma_free_noncoherent(hostdata->dev, PAGE_SIZE, queue->msg,
queue->msg_token, DMA_BIDIRECTIONAL);
malloc_failed:
return -1;
}

0 comments on commit ff4f34f

Please sign in to comment.