Skip to content

Commit

Permalink
IB/mlx5: Enable UAR to have DevX UID
Browse files Browse the repository at this point in the history
UID field was added to alloc_uar and dealloc_uar PRM command, to specify
DevX UID for UAR. This change enables firmware validating user access to
its own UAR resources.

For the kernel allocated UARs the UID will stay 0 as of today.

Signed-off-by: Meir Lichtinger <meirl@nvidia.com>
Reviewed-by: Yishai Hadas <yishaih@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
  • Loading branch information
Meir Lichtinger authored and rleon committed Sep 28, 2021
1 parent 8de1e9b commit d2c8a15
Show file tree
Hide file tree
Showing 5 changed files with 66 additions and 33 deletions.
26 changes: 26 additions & 0 deletions drivers/infiniband/hw/mlx5/cmd.c
Expand Up @@ -206,3 +206,29 @@ int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
kfree(in);
return err;
}

int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid)
{
u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {};
int err;

MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
MLX5_SET(alloc_uar_in, in, uid, uid);
err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out);
if (err)
return err;

*uarn = MLX5_GET(alloc_uar_out, out, uar);
return 0;
}

int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid)
{
u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {};

MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
MLX5_SET(dealloc_uar_in, in, uar, uarn);
MLX5_SET(dealloc_uar_in, in, uid, uid);
return mlx5_cmd_exec_in(dev, dealloc_uar, in);
}
2 changes: 2 additions & 0 deletions drivers/infiniband/hw/mlx5/cmd.h
Expand Up @@ -57,4 +57,6 @@ int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid);
int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid);
int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port);
int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid);
int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid);
#endif /* MLX5_IB_CMD_H */
55 changes: 31 additions & 24 deletions drivers/infiniband/hw/mlx5/main.c
Expand Up @@ -1643,7 +1643,8 @@ static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *conte

bfregi = &context->bfregi;
for (i = 0; i < bfregi->num_static_sys_pages; i++) {
err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
err = mlx5_cmd_uar_alloc(dev->mdev, &bfregi->sys_pages[i],
context->devx_uid);
if (err)
goto error;

Expand All @@ -1657,7 +1658,8 @@ static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *conte

error:
for (--i; i >= 0; i--)
if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
if (mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
context->devx_uid))
mlx5_ib_warn(dev, "failed to free uar %d\n", i);

return err;
Expand All @@ -1673,7 +1675,8 @@ static void deallocate_uars(struct mlx5_ib_dev *dev,
for (i = 0; i < bfregi->num_sys_pages; i++)
if (i < bfregi->num_static_sys_pages ||
bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
context->devx_uid);
}

int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
Expand Down Expand Up @@ -1891,6 +1894,13 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
return -EINVAL;

if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
err = mlx5_ib_devx_create(dev, true);
if (err < 0)
goto out_ctx;
context->devx_uid = err;
}

lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR;
bfregi = &context->bfregi;
Expand All @@ -1903,15 +1913,15 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
/* updates req->total_num_bfregs */
err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
if (err)
goto out_ctx;
goto out_devx;

mutex_init(&bfregi->lock);
bfregi->lib_uar_4k = lib_uar_4k;
bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
GFP_KERNEL);
if (!bfregi->count) {
err = -ENOMEM;
goto out_ctx;
goto out_devx;
}

bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
Expand All @@ -1927,17 +1937,10 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
goto out_sys_pages;

uar_done:
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
err = mlx5_ib_devx_create(dev, true);
if (err < 0)
goto out_uars;
context->devx_uid = err;
}

err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
context->devx_uid);
if (err)
goto out_devx;
goto out_uars;

INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
Expand Down Expand Up @@ -1972,9 +1975,6 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,

out_mdev:
mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
out_devx:
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
mlx5_ib_devx_destroy(dev, context->devx_uid);

out_uars:
deallocate_uars(dev, context);
Expand All @@ -1985,6 +1985,10 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
out_count:
kfree(bfregi->count);

out_devx:
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
mlx5_ib_devx_destroy(dev, context->devx_uid);

out_ctx:
return err;
}
Expand Down Expand Up @@ -2021,12 +2025,12 @@ static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
bfregi = &context->bfregi;
mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);

if (context->devx_uid)
mlx5_ib_devx_destroy(dev, context->devx_uid);

deallocate_uars(dev, context);
kfree(bfregi->sys_pages);
kfree(bfregi->count);

if (context->devx_uid)
mlx5_ib_devx_destroy(dev, context->devx_uid);
}

static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
Expand Down Expand Up @@ -2119,6 +2123,7 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
struct mlx5_var_table *var_table = &dev->var_table;
struct mlx5_ib_ucontext *context = to_mucontext(entry->ucontext);

switch (mentry->mmap_flag) {
case MLX5_IB_MMAP_TYPE_MEMIC:
Expand All @@ -2133,7 +2138,8 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
break;
case MLX5_IB_MMAP_TYPE_UAR_WC:
case MLX5_IB_MMAP_TYPE_UAR_NC:
mlx5_cmd_free_uar(dev->mdev, mentry->page_idx);
mlx5_cmd_uar_dealloc(dev->mdev, mentry->page_idx,
context->devx_uid);
kfree(mentry);
break;
default:
Expand Down Expand Up @@ -2211,7 +2217,8 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
bfregi->count[bfreg_dyn_idx]++;
mutex_unlock(&bfregi->lock);

err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index,
context->devx_uid);
if (err) {
mlx5_ib_warn(dev, "UAR alloc failed\n");
goto free_bfreg;
Expand Down Expand Up @@ -2240,7 +2247,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
if (!dyn_uar)
return err;

mlx5_cmd_free_uar(dev->mdev, idx);
mlx5_cmd_uar_dealloc(dev->mdev, idx, context->devx_uid);

free_bfreg:
mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
Expand Down Expand Up @@ -3489,7 +3496,7 @@ alloc_uar_entry(struct mlx5_ib_ucontext *c,
return ERR_PTR(-ENOMEM);

dev = to_mdev(c->ibucontext.device);
err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index, c->devx_uid);
if (err)
goto end;

Expand All @@ -3507,7 +3514,7 @@ alloc_uar_entry(struct mlx5_ib_ucontext *c,
return entry;

err_insert:
mlx5_cmd_free_uar(dev->mdev, uar_index);
mlx5_cmd_uar_dealloc(dev->mdev, uar_index, c->devx_uid);
end:
kfree(entry);
return ERR_PTR(err);
Expand Down
14 changes: 7 additions & 7 deletions drivers/net/ethernet/mellanox/mlx5/core/uar.c
Expand Up @@ -36,29 +36,29 @@
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"

int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
static int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
{
u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {};
int err;

MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out);
if (!err)
*uarn = MLX5_GET(alloc_uar_out, out, uar);
return err;
if (err)
return err;

*uarn = MLX5_GET(alloc_uar_out, out, uar);
return 0;
}
EXPORT_SYMBOL(mlx5_cmd_alloc_uar);

int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
static int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
{
u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {};

MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
MLX5_SET(dealloc_uar_in, in, uar, uarn);
return mlx5_cmd_exec_in(dev, dealloc_uar, in);
}
EXPORT_SYMBOL(mlx5_cmd_free_uar);

static int uars_per_sys_page(struct mlx5_core_dev *mdev)
{
Expand Down
2 changes: 0 additions & 2 deletions include/linux/mlx5/driver.h
Expand Up @@ -1005,8 +1005,6 @@ void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);

int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
void mlx5_health_flush(struct mlx5_core_dev *dev);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
Expand Down

0 comments on commit d2c8a15

Please sign in to comment.