From ec6793f175760de6d919bcbba8e15868f55af1c2 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 27 May 2021 18:16:38 -0700 Subject: [PATCH] nvme-rdma: fix in-casule data send for chained sgls [ Upstream commit 12b2aaadb6d5ef77434e8db21f469f46fe2d392e ] We have only 2 inline sg entries and we allow 4 sg entries for the send wr sge. Larger sgls entries will be chained. However when we build in-capsule send wr sge, we iterate without taking into account that the sgl may be chained and still fit in-capsule (which can happen if the sgl is bigger than 2, but lower-equal to 4). Fix in-capsule data mapping to correctly iterate chained sgls. Fixes: 38e1800275d3 ("nvme-rdma: Avoid preallocating big SGL for data") Reported-by: Walker, Benjamin Signed-off-by: Sagi Grimberg Reviewed-by: Max Gurtovoy Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin --- drivers/nvme/host/rdma.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index be905d4fdb47f..ce8b3ce7582be 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1319,16 +1319,17 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue, int count) { struct nvme_sgl_desc *sg = &c->common.dptr.sgl; - struct scatterlist *sgl = req->data_sgl.sg_table.sgl; struct ib_sge *sge = &req->sge[1]; + struct scatterlist *sgl; u32 len = 0; int i; - for (i = 0; i < count; i++, sgl++, sge++) { + for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) { sge->addr = sg_dma_address(sgl); sge->length = sg_dma_len(sgl); sge->lkey = queue->device->pd->local_dma_lkey; len += sge->length; + sge++; } sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);