Skip to content
This repository has been archived by the owner on Sep 5, 2023. It is now read-only.

Commit

Permalink
rpma: make rpma_mr_atomic_write() support native atomic write
Browse files Browse the repository at this point in the history
rpma_mr_atomic_write() uses native atomic write if it is supported
by the created QP.

Signed-off-by: Xiao Yang <yangx.jy@fujitsu.com>
  • Loading branch information
yangx-jy committed Dec 21, 2022
1 parent 6972dfa commit 5630242
Show file tree
Hide file tree
Showing 4 changed files with 202 additions and 45 deletions.
20 changes: 19 additions & 1 deletion src/mr.c
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020-2022, Intel Corporation */
/* Copyright 2021-2022, Fujitsu */
/* Copyright (c) 2021-2022, Fujitsu Limited */

/*
* mr.c -- librpma memory region-related implementations
Expand Down Expand Up @@ -197,6 +197,24 @@ rpma_mr_atomic_write(struct ibv_qp *qp, struct rpma_mr_remote *dst, size_t dst_o
{
RPMA_DEBUG_TRACE;

#ifdef IBV_WR_ATOMIC_WRITE_SUPPORTED
struct ibv_qp_ex *qpx = ibv_qp_to_qp_ex(qp);
/* check if the created QP supports native atomic write */
if (qpx && qpx->wr_atomic_write) {
ibv_wr_start(qpx);
qpx->wr_id = (uint64_t)op_context;
qpx->wr_flags = (flags & RPMA_F_COMPLETION_ON_SUCCESS) ? IBV_SEND_SIGNALED : 0;
RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {});
ibv_wr_atomic_write(qpx, dst->rkey, dst->raddr + dst_offset, src);
int ret = ibv_wr_complete(qpx);
if (ret) {
RPMA_LOG_ERROR_WITH_ERRNO(ret, "ibv_wr_complete()");
return RPMA_E_PROVIDER;
}

return 0;
}
#endif
struct ibv_send_wr wr = {0};
struct ibv_sge sge = {0};

Expand Down
54 changes: 54 additions & 0 deletions tests/unit/common/mocks-ibverbs.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@ struct ibv_cq Ibv_rcq;
struct ibv_cq Ibv_srq_rcq;
struct ibv_cq Ibv_cq_unknown;
struct ibv_qp Ibv_qp;
#ifdef IBV_WR_ATOMIC_WRITE_SUPPORTED
struct ibv_qp_ex Ibv_qp_ex;
#endif
struct ibv_mr Ibv_mr;
struct ibv_srq Ibv_srq;

Expand Down Expand Up @@ -429,3 +432,54 @@ ibv_destroy_srq(struct ibv_srq *srq)

return mock_type(int);
}

#ifdef IBV_WR_ATOMIC_WRITE_SUPPORTED
/*
* ibv_qp_to_qp_ex -- ibv_qp_to_qp_ex() mock
*/
struct ibv_qp_ex *
ibv_qp_to_qp_ex(struct ibv_qp *qp)
{
check_expected_ptr(qp);

return mock_type(struct ibv_qp_ex *);
}

/*
* ibv_wr_start -- ibv_wr_start() mock
*/
void
ibv_wr_start_mock(struct ibv_qp_ex *qp)
{
check_expected_ptr(qp);
}

/*
* ibv_wr_atomic_write_mock -- ibv_wr_atomic_write() mock
*/
void
ibv_wr_atomic_write_mock(struct ibv_qp_ex *qp, uint32_t rkey,
uint64_t remote_addr, const void *atomic_wr)
{
struct ibv_wr_atomic_write_mock_args *args =
mock_type(struct ibv_wr_atomic_write_mock_args *);

assert_int_equal(qp, args->qp);
assert_int_equal(qp->wr_id, args->wr_id);
assert_int_equal(qp->wr_flags, args->wr_flags);
assert_int_equal(rkey, args->rkey);
assert_int_equal(remote_addr, args->remote_addr);
assert_memory_equal(atomic_wr, args->atomic_wr, 8);
}

/*
* ibv_wr_complete_mock -- ibv_wr_complete() mock
*/
int
ibv_wr_complete_mock(struct ibv_qp_ex *qp)
{
check_expected(qp);

return mock_type(int);
}
#endif
30 changes: 29 additions & 1 deletion tests/unit/common/mocks-ibverbs.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright 2020-2022, Intel Corporation */
/* Copyright 2021-2022, Fujitsu */
/* Copyright (c) 2021-2022, Fujitsu Limited */

/*
* mocks-ibverbs.h -- the ibverbs mocks' header
Expand All @@ -22,6 +22,9 @@ extern struct ibv_cq Ibv_rcq;
extern struct ibv_cq Ibv_srq_rcq;
extern struct ibv_cq Ibv_cq_unknown;
extern struct ibv_qp Ibv_qp;
#ifdef IBV_WR_ATOMIC_WRITE_SUPPORTED
extern struct ibv_qp_ex Ibv_qp_ex;
#endif
extern struct ibv_mr Ibv_mr;
extern struct ibv_srq Ibv_srq;

Expand All @@ -34,6 +37,9 @@ extern struct ibv_srq Ibv_srq;
#define MOCK_IBV_CQ_UNKNOWN (struct ibv_cq *)&Ibv_cq_unknown
#define MOCK_IBV_PD (struct ibv_pd *)&Ibv_pd
#define MOCK_QP (struct ibv_qp *)&Ibv_qp
#ifdef IBV_WR_ATOMIC_WRITE_SUPPORTED
#define MOCK_QPX (struct ibv_qp_ex *)&Ibv_qp_ex
#endif
#define MOCK_MR (struct ibv_mr *)&Ibv_mr
#define MOCK_IBV_SRQ (struct ibv_srq *)&Ibv_srq

Expand Down Expand Up @@ -70,6 +76,17 @@ struct ibv_post_srq_recv_mock_args {
int ret;
};

#ifdef IBV_WR_ATOMIC_WRITE_SUPPORTED
struct ibv_wr_atomic_write_mock_args {
struct ibv_qp_ex *qp;
uint64_t wr_id;
uint32_t wr_flags;
uint32_t rkey;
uint64_t remote_addr;
const void *atomic_wr;
};
#endif

#ifdef ON_DEMAND_PAGING_SUPPORTED
int ibv_query_device_ex_mock(struct ibv_context *ibv_ctx,
const struct ibv_query_device_ex_input *input,
Expand Down Expand Up @@ -97,4 +114,15 @@ struct ibv_srq *ibv_create_srq(struct ibv_pd *pd, struct ibv_srq_init_attr *srq_

int ibv_destroy_srq(struct ibv_srq *srq);

#ifdef IBV_WR_ATOMIC_WRITE_SUPPORTED
struct ibv_qp_ex *ibv_qp_to_qp_ex(struct ibv_qp *qp);

void ibv_wr_start_mock(struct ibv_qp_ex *qp);

void ibv_wr_atomic_write_mock(struct ibv_qp_ex *qp, uint32_t rkey, uint64_t remote_addr,
const void *atomic_wr);

int ibv_wr_complete_mock(struct ibv_qp_ex *qp);
#endif

#endif /* MOCKS_IBVERBS_H */
143 changes: 100 additions & 43 deletions tests/unit/mr/mr-atomic_write.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2022, Intel Corporation */
/* Copyright (c) 2022, Fujitsu Limited */

/*
* mr-atomic_write.c -- rpma_mr_atomic_write() unit tests
Expand All @@ -19,29 +20,89 @@

static const char Mock_src[8];

#ifdef IBV_WR_ATOMIC_WRITE_SUPPORTED
static struct ibv_wr_atomic_write_mock_args atomic_write_args;
#endif
static struct ibv_post_send_mock_args args;

/*
* atomic_write__COMPL_ON_ERROR_failed_E_PROVIDER -
* rpma_mr_atomic_write failed with RPMA_E_PROVIDER
* when send_flags == IBV_SEND_INLINE | IBV_SEND_FENCE
* for RPMA_F_COMPLETION_ON_ERROR
* configure_atomic_write -- configure common mock for rpma_mr_atomic_write()
*/
static void
atomic_write__COMPL_ON_ERROR_failed_E_PROVIDER(void **mrs_ptr)
configure_mr_atomic_write(int flags, int ret)
{
/* configure mock */
#ifdef IBV_WR_ATOMIC_WRITE_SUPPORTED
expect_value(ibv_qp_to_qp_ex, qp, MOCK_QP);
will_return(ibv_qp_to_qp_ex, MOCK_QPX);
expect_value(ibv_wr_start_mock, qp, MOCK_QPX);
atomic_write_args.qp = MOCK_QPX;
atomic_write_args.wr_id = (uint64_t)MOCK_OP_CONTEXT;
atomic_write_args.wr_flags = (flags == RPMA_F_COMPLETION_ALWAYS) ? IBV_SEND_SIGNALED : 0;
atomic_write_args.rkey = MOCK_RKEY;
atomic_write_args.remote_addr = MOCK_RADDR + MOCK_DST_OFFSET;
atomic_write_args.atomic_wr = Mock_src;
will_return(ibv_wr_atomic_write_mock, &atomic_write_args);
expect_value(ibv_wr_complete_mock, qp, MOCK_QPX);
will_return(ibv_wr_complete_mock, ret);
#else
args.qp = MOCK_QP;
args.opcode = IBV_WR_RDMA_WRITE;
args.send_flags = IBV_SEND_INLINE | IBV_SEND_FENCE;
if (flags == RPMA_F_COMPLETION_ALWAYS)
args.send_flags |= IBV_SEND_SIGNALED;
args.wr_id = (uint64_t)MOCK_OP_CONTEXT;
args.remote_addr = MOCK_RADDR + MOCK_DST_OFFSET;
args.rkey = MOCK_RKEY;
args.ret = ret;
will_return(ibv_post_send_mock, &args);
#endif
}

#ifdef IBV_WR_ATOMIC_WRITE_SUPPORTED
/*
* atomic_write__qpx_NULL_success - rpma_mr_atomic_write
* succeeded when ibv_qp_to_qp_ex() returned NULL
*/
static void
atomic_write__qpx_NULL_success(void **mrs_ptr)
{
struct mrs *mrs = (struct mrs *)*mrs_ptr;

/* configure mocks */
struct ibv_post_send_mock_args args;
expect_value(ibv_qp_to_qp_ex, qp, MOCK_QP);
will_return(ibv_qp_to_qp_ex, NULL);
args.qp = MOCK_QP;
args.opcode = IBV_WR_RDMA_WRITE;
/* RPMA_F_COMPLETION_ON_ERROR */
args.send_flags = IBV_SEND_INLINE | IBV_SEND_FENCE;
args.send_flags = IBV_SEND_INLINE | IBV_SEND_FENCE | IBV_SEND_SIGNALED;
args.wr_id = (uint64_t)MOCK_OP_CONTEXT;
args.remote_addr = MOCK_RADDR + MOCK_DST_OFFSET;
args.rkey = MOCK_RKEY;
args.ret = MOCK_ERRNO;
args.ret = MOCK_OK;
will_return(ibv_post_send_mock, &args);

/* run test */
int ret = rpma_mr_atomic_write(MOCK_QP, mrs->remote, MOCK_DST_OFFSET,
Mock_src, RPMA_F_COMPLETION_ALWAYS, MOCK_OP_CONTEXT);

/* verify the results */
assert_int_equal(ret, MOCK_OK);
}
#endif

/*
* atomic_write__COMPL_ON_ERROR_failed_E_PROVIDER -
* rpma_mr_atomic_write failed with RPMA_E_PROVIDER
* when RPMA_F_COMPLETION_ON_ERROR is specified
*/
static void
atomic_write__COMPL_ON_ERROR_failed_E_PROVIDER(void **mrs_ptr)
{
struct mrs *mrs = (struct mrs *)*mrs_ptr;

/* configure mocks */
configure_mr_atomic_write(RPMA_F_COMPLETION_ON_ERROR, MOCK_ERRNO);

/* run test */
int ret = rpma_mr_atomic_write(MOCK_QP, mrs->remote, MOCK_DST_OFFSET,
Mock_src, RPMA_F_COMPLETION_ON_ERROR, MOCK_OP_CONTEXT);
Expand All @@ -53,27 +114,15 @@ atomic_write__COMPL_ON_ERROR_failed_E_PROVIDER(void **mrs_ptr)
/*
* atomic_write__COMPL_ON_SUCCESS_failed_E_PROVIDER -
* rpma_mr_atomic_write failed with RPMA_E_PROVIDER
* when send_flags == IBV_SEND_INLINE | IBV_SEND_FENCE
* | IBV_SEND_SIGNALED
* for RPMA_F_COMPLETION_ALWAYS
* when RPMA_F_COMPLETION_ALWAYS is specified
*/
static void
atomic_write__COMPL_ON_SUCCESS_failed_E_PROVIDER(void **mrs_ptr)
{
struct mrs *mrs = (struct mrs *)*mrs_ptr;

/* configure mocks */
struct ibv_post_send_mock_args args;
args.qp = MOCK_QP;
args.opcode = IBV_WR_RDMA_WRITE;
/* RPMA_F_COMPLETION_ALWAYS */
args.send_flags = IBV_SEND_INLINE | IBV_SEND_FENCE
| IBV_SEND_SIGNALED;
args.wr_id = (uint64_t)MOCK_OP_CONTEXT;
args.remote_addr = MOCK_RADDR + MOCK_DST_OFFSET;
args.rkey = MOCK_RKEY;
args.ret = MOCK_ERRNO;
will_return(ibv_post_send_mock, &args);
configure_mr_atomic_write(RPMA_F_COMPLETION_ALWAYS, MOCK_ERRNO);

/* run test */
int ret = rpma_mr_atomic_write(MOCK_QP, mrs->remote, MOCK_DST_OFFSET,
Expand All @@ -92,16 +141,7 @@ atomic_write__COMPLETION_ALWAYS_success(void **mrs_ptr)
struct mrs *mrs = (struct mrs *)*mrs_ptr;

/* configure mock */
struct ibv_post_send_mock_args args;
args.qp = MOCK_QP;
args.opcode = IBV_WR_RDMA_WRITE;
args.send_flags = IBV_SEND_INLINE | IBV_SEND_FENCE
| IBV_SEND_SIGNALED;
args.wr_id = (uint64_t)MOCK_OP_CONTEXT;
args.remote_addr = MOCK_RADDR + MOCK_DST_OFFSET;
args.rkey = MOCK_RKEY;
args.ret = MOCK_OK;
will_return(ibv_post_send_mock, &args);
configure_mr_atomic_write(RPMA_F_COMPLETION_ALWAYS, MOCK_OK);

/* run test */
int ret = rpma_mr_atomic_write(MOCK_QP, mrs->remote, MOCK_DST_OFFSET,
Expand All @@ -120,15 +160,7 @@ atomic_write__COMPLETION_ON_ERROR_success(void **mrs_ptr)
struct mrs *mrs = (struct mrs *)*mrs_ptr;

/* configure mock */
struct ibv_post_send_mock_args args;
args.qp = MOCK_QP;
args.opcode = IBV_WR_RDMA_WRITE;
args.send_flags = IBV_SEND_INLINE | IBV_SEND_FENCE;
args.wr_id = (uint64_t)MOCK_OP_CONTEXT;
args.remote_addr = MOCK_RADDR + MOCK_DST_OFFSET;
args.rkey = MOCK_RKEY;
args.ret = MOCK_OK;
will_return(ibv_post_send_mock, &args);
configure_mr_atomic_write(RPMA_F_COMPLETION_ON_ERROR, MOCK_OK);

/* run test */
int ret = rpma_mr_atomic_write(MOCK_QP, mrs->remote, MOCK_DST_OFFSET,
Expand All @@ -145,7 +177,26 @@ static int
group_setup_mr_atomic_write(void **unused)
{
/* configure global mocks */

#ifdef IBV_WR_ATOMIC_WRITE_SUPPORTED
/*
* ibv_wr_start(), ibv_wr_atomic_write() and ibv_wr_complete() are defined
* as static inline functions in the included header <infiniband/verbs.h>,
* so we cannot define them again. They are defined as:
* {
* return qp->wr_start(qp);
* }
* {
* return qp->wr_atomic_write(qp, rkey, remote_addr, atomic_wr);
* }
* {
* return qp->wr_complete(qp);
* }
* so we can set these three function pointers to our mock functions.
*/
Ibv_qp_ex.wr_start = ibv_wr_start_mock;
Ibv_qp_ex.wr_atomic_write = ibv_wr_atomic_write_mock;
Ibv_qp_ex.wr_complete = ibv_wr_complete_mock;
#endif
/*
* ibv_post_send() is defined as a static inline function
* in the included header <infiniband/verbs.h>,
Expand All @@ -164,6 +215,12 @@ group_setup_mr_atomic_write(void **unused)

static const struct CMUnitTest tests_mr__atomic_write[] = {
/* rpma_mr_atomic_write() unit tests */
#ifdef IBV_WR_ATOMIC_WRITE_SUPPORTED
cmocka_unit_test_setup_teardown(
atomic_write__qpx_NULL_success,
setup__mr_local_and_remote,
teardown__mr_local_and_remote),
#endif
cmocka_unit_test_setup_teardown(
atomic_write__COMPL_ON_ERROR_failed_E_PROVIDER,
setup__mr_local_and_remote,
Expand Down

0 comments on commit 5630242

Please sign in to comment.