Skip to content

Commit

Permalink
nvme-pci: refactor nvme_unmap_data
Browse files Browse the repository at this point in the history
commit 9275c20 upstream.

Split out three helpers from nvme_unmap_data that will allow finer grained
unwinding from nvme_map_data.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Marc Orr <marcorr@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
Christoph Hellwig authored and gregkh committed Jan 27, 2021
1 parent 13bcd09 commit 8807226
Showing 1 changed file with 49 additions and 28 deletions.
77 changes: 49 additions & 28 deletions drivers/nvme/host/pci.c
Expand Up @@ -542,50 +542,71 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
return true;
}

static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
dma_addr_t dma_addr = iod->first_dma;
int i;

if (iod->dma_len) {
dma_unmap_page(dev->dev, dma_addr, iod->dma_len,
rq_dma_dir(req));
return;
for (i = 0; i < iod->npages; i++) {
__le64 *prp_list = nvme_pci_iod_list(req)[i];
dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);

dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
dma_addr = next_dma_addr;
}

WARN_ON_ONCE(!iod->nents);
}

if (is_pci_p2pdma_page(sg_page(iod->sg)))
pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
rq_dma_dir(req));
else
dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
{
const int last_sg = SGES_PER_PAGE - 1;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
dma_addr_t dma_addr = iod->first_dma;
int i;

for (i = 0; i < iod->npages; i++) {
struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr);

if (iod->npages == 0)
dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
dma_addr);
dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
dma_addr = next_dma_addr;
}

for (i = 0; i < iod->npages; i++) {
void *addr = nvme_pci_iod_list(req)[i];
}

if (iod->use_sgl) {
struct nvme_sgl_desc *sg_list = addr;
static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);

next_dma_addr =
le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
} else {
__le64 *prp_list = addr;
if (is_pci_p2pdma_page(sg_page(iod->sg)))
pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
rq_dma_dir(req));
else
dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
}

next_dma_addr = le64_to_cpu(prp_list[last_prp]);
}
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);

dma_pool_free(dev->prp_page_pool, addr, dma_addr);
dma_addr = next_dma_addr;
if (iod->dma_len) {
dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
rq_dma_dir(req));
return;
}

WARN_ON_ONCE(!iod->nents);

nvme_unmap_sg(dev, req);
if (iod->npages == 0)
dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
iod->first_dma);
else if (iod->use_sgl)
nvme_free_sgls(dev, req);
else
nvme_free_prps(dev, req);
mempool_free(iod->sg, dev->iod_mempool);
}

Expand Down

0 comments on commit 8807226

Please sign in to comment.