Skip to content

Commit

Permalink
RDMA/hns: Bugfix for incorrect association between dip_idx and dgid
Browse files Browse the repository at this point in the history
[ Upstream commit eb653ed ]

dip_idx and dgid should be a one-to-one mapping relationship, but when
qp_num loops back to the start number, it may happen that two different
dgid are assiociated to the same dip_idx incorrectly.

One solution is to store the qp_num that is not assigned to dip_idx in an
array. When a dip_idx needs to be allocated to a new dgid, an spare qp_num
is extracted and assigned to dip_idx.

Fixes: f91696f ("RDMA/hns: Support congestion control type selection according to the FW")
Link: https://lore.kernel.org/r/1629884592-23424-4-git-send-email-liangwenpeng@huawei.com
Signed-off-by: Junxian Huang <huangjunxian4@hisilicon.com>
Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
  • Loading branch information
Junxian Huang authored and gregkh committed Sep 18, 2021
1 parent c0ff8bc commit 93aa16e
Show file tree
Hide file tree
Showing 4 changed files with 31 additions and 5 deletions.
9 changes: 8 additions & 1 deletion drivers/infiniband/hw/hns/hns_roce_device.h
Expand Up @@ -496,6 +496,12 @@ struct hns_roce_bank {
u32 next; /* Next ID to allocate. */
};

struct hns_roce_idx_table {
u32 *spare_idx;
u32 head;
u32 tail;
};

struct hns_roce_qp_table {
struct hns_roce_hem_table qp_table;
struct hns_roce_hem_table irrl_table;
Expand All @@ -504,6 +510,7 @@ struct hns_roce_qp_table {
struct mutex scc_mutex;
struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
struct mutex bank_mutex;
struct hns_roce_idx_table idx_table;
};

struct hns_roce_cq_table {
Expand Down Expand Up @@ -1146,7 +1153,7 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);

Expand Down
9 changes: 8 additions & 1 deletion drivers/infiniband/hw/hns/hns_roce_hw_v2.c
Expand Up @@ -4507,12 +4507,18 @@ static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
{
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
u32 *spare_idx = hr_dev->qp_table.idx_table.spare_idx;
u32 *head = &hr_dev->qp_table.idx_table.head;
u32 *tail = &hr_dev->qp_table.idx_table.tail;
struct hns_roce_dip *hr_dip;
unsigned long flags;
int ret = 0;

spin_lock_irqsave(&hr_dev->dip_list_lock, flags);

spare_idx[*tail] = ibqp->qp_num;
*tail = (*tail == hr_dev->caps.num_qps - 1) ? 0 : (*tail + 1);

list_for_each_entry(hr_dip, &hr_dev->dip_list, node) {
if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16)) {
*dip_idx = hr_dip->dip_idx;
Expand All @@ -4530,7 +4536,8 @@ static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
}

memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
hr_dip->dip_idx = *dip_idx = ibqp->qp_num;
hr_dip->dip_idx = *dip_idx = spare_idx[*head];
*head = (*head == hr_dev->caps.num_qps - 1) ? 0 : (*head + 1);
list_add_tail(&hr_dip->node, &hr_dev->dip_list);

out:
Expand Down
8 changes: 6 additions & 2 deletions drivers/infiniband/hw/hns/hns_roce_main.c
Expand Up @@ -748,6 +748,12 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
goto err_uar_table_free;
}

ret = hns_roce_init_qp_table(hr_dev);
if (ret) {
dev_err(dev, "Failed to init qp_table.\n");
goto err_uar_table_free;
}

hns_roce_init_pd_table(hr_dev);

if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
Expand All @@ -757,8 +763,6 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)

hns_roce_init_cq_table(hr_dev);

hns_roce_init_qp_table(hr_dev);

if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
ret = hns_roce_init_srq_table(hr_dev);
if (ret) {
Expand Down
10 changes: 9 additions & 1 deletion drivers/infiniband/hw/hns/hns_roce_qp.c
Expand Up @@ -1423,12 +1423,17 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
return cur + nreq >= hr_wq->wqe_cnt;
}

void hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
unsigned int reserved_from_bot;
unsigned int i;

qp_table->idx_table.spare_idx = kcalloc(hr_dev->caps.num_qps,
sizeof(u32), GFP_KERNEL);
if (!qp_table->idx_table.spare_idx)
return -ENOMEM;

mutex_init(&qp_table->scc_mutex);
mutex_init(&qp_table->bank_mutex);
xa_init(&hr_dev->qp_table_xa);
Expand All @@ -1446,6 +1451,8 @@ void hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
HNS_ROCE_QP_BANK_NUM - 1;
hr_dev->qp_table.bank[i].next = hr_dev->qp_table.bank[i].min;
}

return 0;
}

void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
Expand All @@ -1454,4 +1461,5 @@ void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)

for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++)
ida_destroy(&hr_dev->qp_table.bank[i].ida);
kfree(hr_dev->qp_table.idx_table.spare_idx);
}

0 comments on commit 93aa16e

Please sign in to comment.