Skip to content

Commit

Permalink
qedn: Add IO level nvme_req and fw_cq workqueues
Browse files Browse the repository at this point in the history
This patch will present the IO level workqueues:

- qedn_nvme_req_fp_wq(): process new requests, similar to
			 nvme_tcp_io_work(). The flow starts from
			 send_req() and will aggregate all the requests
			 on this CPU core.

- qedn_fw_cq_fp_wq():   process new FW completions, the flow starts from
			the IRQ handler and for a single interrupt it will
			process all the pending NVMeoF Completions under
			polling mode.

Acked-by: Igor Russkikh <irusskikh@marvell.com>
Signed-off-by: Prabhakar Kushwaha <pkushwaha@marvell.com>
Signed-off-by: Omkar Kulkarni <okulkarni@marvell.com>
Signed-off-by: Michal Kalderon <mkalderon@marvell.com>
Signed-off-by: Ariel Elior <aelior@marvell.com>
Signed-off-by: Shai Malin <smalin@marvell.com>
  • Loading branch information
smalin1 authored and intel-lab-lkp committed Apr 29, 2021
1 parent fcae660 commit cd00480
Show file tree
Hide file tree
Showing 5 changed files with 278 additions and 8 deletions.
2 changes: 1 addition & 1 deletion drivers/nvme/hw/qedn/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only

obj-$(CONFIG_NVME_QEDN) += qedn.o
qedn-y := qedn_main.o qedn_conn.o
qedn-y := qedn_main.o qedn_conn.o qedn_task.o
29 changes: 29 additions & 0 deletions drivers/nvme/hw/qedn/qedn.h
Expand Up @@ -47,6 +47,9 @@
#define QEDN_NON_ABORTIVE_TERMINATION 0
#define QEDN_ABORTIVE_TERMINATION 1

#define QEDN_FW_CQ_FP_WQ_WORKQUEUE "qedn_fw_cq_fp_wq"
#define QEDN_NVME_REQ_FP_WQ_WORKQUEUE "qedn_nvme_req_fp_wq"

/*
* TCP offload stack default configurations and defines.
* Future enhancements will allow controlling the configurable
Expand Down Expand Up @@ -100,6 +103,7 @@ struct qedn_fp_queue {
struct qedn_ctx *qedn;
struct qed_sb_info *sb_info;
unsigned int cpu;
struct work_struct fw_cq_fp_wq_entry;
u16 sb_id;
char irqname[QEDN_IRQ_NAME_LEN];
};
Expand Down Expand Up @@ -131,6 +135,8 @@ struct qedn_ctx {
struct qedn_fp_queue *fp_q_arr;
struct nvmetcp_glbl_queue_entry *fw_cq_array_virt;
dma_addr_t fw_cq_array_phy; /* Physical address of fw_cq_array_virt */
struct workqueue_struct *nvme_req_fp_wq;
struct workqueue_struct *fw_cq_fp_wq;
};

struct qedn_endpoint {
Expand Down Expand Up @@ -213,6 +219,25 @@ struct qedn_ctrl {

/* Connection level struct */
struct qedn_conn_ctx {
/* IO path */
struct workqueue_struct *nvme_req_fp_wq; /* ptr to qedn->nvme_req_fp_wq */
struct nvme_tcp_ofld_req *req; /* currently proccessed request */

struct list_head host_pend_req_list;
/* Spinlock to access pending request list */
spinlock_t nvme_req_lock;
unsigned int cpu;

/* Entry for registering to nvme_req_fp_wq */
struct work_struct nvme_req_fp_wq_entry;
/*
* Spinlock for accessing qedn_process_req as it can be called
* from multiple place like queue_rq, async, self requeued
*/
struct mutex nvme_req_mutex;
struct qedn_fp_queue *fp_q;
int qid;

struct qedn_ctx *qedn;
struct nvme_tcp_ofld_queue *queue;
struct nvme_tcp_ofld_ctrl *ctrl;
Expand Down Expand Up @@ -280,5 +305,9 @@ int qedn_wait_for_conn_est(struct qedn_conn_ctx *conn_ctx);
int qedn_set_con_state(struct qedn_conn_ctx *conn_ctx, enum qedn_conn_state new_state);
void qedn_terminate_connection(struct qedn_conn_ctx *conn_ctx, int abrt_flag);
__be16 qedn_get_in_port(struct sockaddr_storage *sa);
inline int qedn_validate_cccid_in_range(struct qedn_conn_ctx *conn_ctx, u16 cccid);
void qedn_queue_request(struct qedn_conn_ctx *qedn_conn, struct nvme_tcp_ofld_req *req);
void qedn_nvme_req_fp_wq_handler(struct work_struct *work);
void qedn_io_work_cq(struct qedn_ctx *qedn, struct nvmetcp_fw_cqe *cqe);

#endif /* _QEDN_H_ */
3 changes: 3 additions & 0 deletions drivers/nvme/hw/qedn/qedn_conn.c
Expand Up @@ -385,6 +385,9 @@ static int qedn_prep_and_offload_queue(struct qedn_conn_ctx *conn_ctx)
}

set_bit(QEDN_CONN_RESRC_FW_SQ, &conn_ctx->resrc_state);
INIT_LIST_HEAD(&conn_ctx->host_pend_req_list);
spin_lock_init(&conn_ctx->nvme_req_lock);

rc = qed_ops->acquire_conn(qedn->cdev,
&conn_ctx->conn_handle,
&conn_ctx->fw_cid,
Expand Down
114 changes: 107 additions & 7 deletions drivers/nvme/hw/qedn/qedn_main.c
Expand Up @@ -267,6 +267,18 @@ static int qedn_release_ctrl(struct nvme_tcp_ofld_ctrl *ctrl)
return 0;
}

static void qedn_set_ctrl_io_cpus(struct qedn_conn_ctx *conn_ctx, int qid)
{
struct qedn_ctx *qedn = conn_ctx->qedn;
struct qedn_fp_queue *fp_q = NULL;
int index;

index = qid ? (qid - 1) % qedn->num_fw_cqs : 0;
fp_q = &qedn->fp_q_arr[index];

conn_ctx->cpu = fp_q->cpu;
}

static int qedn_create_queue(struct nvme_tcp_ofld_queue *queue, int qid, size_t q_size)
{
struct nvme_tcp_ofld_ctrl *ctrl = queue->ctrl;
Expand All @@ -288,13 +300,18 @@ static int qedn_create_queue(struct nvme_tcp_ofld_queue *queue, int qid, size_t
conn_ctx->queue = queue;
conn_ctx->ctrl = ctrl;
conn_ctx->sq_depth = q_size;
qedn_set_ctrl_io_cpus(conn_ctx, qid);

init_waitqueue_head(&conn_ctx->conn_waitq);
atomic_set(&conn_ctx->est_conn_indicator, 0);
atomic_set(&conn_ctx->destroy_conn_indicator, 0);

spin_lock_init(&conn_ctx->conn_state_lock);

INIT_WORK(&conn_ctx->nvme_req_fp_wq_entry, qedn_nvme_req_fp_wq_handler);
conn_ctx->nvme_req_fp_wq = qedn->nvme_req_fp_wq;
conn_ctx->qid = qid;

qedn_initialize_endpoint(&conn_ctx->ep, qedn->local_mac_addr,
&ctrl->conn_params);

Expand Down Expand Up @@ -356,6 +373,7 @@ static void qedn_destroy_queue(struct nvme_tcp_ofld_queue *queue)
if (!conn_ctx)
return;

cancel_work_sync(&conn_ctx->nvme_req_fp_wq_entry);
qedn_terminate_connection(conn_ctx, QEDN_ABORTIVE_TERMINATION);

qedn_queue_wait_for_terminate_complete(conn_ctx);
Expand Down Expand Up @@ -385,12 +403,24 @@ static int qedn_init_req(struct nvme_tcp_ofld_req *req)

static void qedn_commit_rqs(struct nvme_tcp_ofld_queue *queue)
{
/* Placeholder - queue work */
struct qedn_conn_ctx *conn_ctx;

conn_ctx = (struct qedn_conn_ctx *)queue->private_data;

if (!list_empty(&conn_ctx->host_pend_req_list))
queue_work_on(conn_ctx->cpu, conn_ctx->nvme_req_fp_wq,
&conn_ctx->nvme_req_fp_wq_entry);
}

static int qedn_send_req(struct nvme_tcp_ofld_req *req)
{
/* Placeholder - qedn_send_req */
struct qedn_conn_ctx *qedn_conn = (struct qedn_conn_ctx *)req->queue->private_data;

/* Under the assumption that the cccid/tag will be in the range of 0 to sq_depth-1. */
if (!req->async && qedn_validate_cccid_in_range(qedn_conn, req->rq->tag))
return BLK_STS_NOTSUPP;

qedn_queue_request(qedn_conn, req);

return 0;
}
Expand Down Expand Up @@ -434,9 +464,59 @@ struct qedn_conn_ctx *qedn_get_conn_hash(struct qedn_ctx *qedn, u16 icid)
}

/* Fastpath IRQ handler */
void qedn_fw_cq_fp_handler(struct qedn_fp_queue *fp_q)
{
u16 sb_id, cq_prod_idx, cq_cons_idx;
struct qedn_ctx *qedn = fp_q->qedn;
struct nvmetcp_fw_cqe *cqe = NULL;

sb_id = fp_q->sb_id;
qed_sb_update_sb_idx(fp_q->sb_info);

/* rmb - to prevent missing new cqes */
rmb();

/* Read the latest cq_prod from the SB */
cq_prod_idx = *fp_q->cq_prod;
cq_cons_idx = qed_chain_get_cons_idx(&fp_q->cq_chain);

while (cq_cons_idx != cq_prod_idx) {
cqe = qed_chain_consume(&fp_q->cq_chain);
if (likely(cqe))
qedn_io_work_cq(qedn, cqe);
else
pr_err("Failed consuming cqe\n");

cq_cons_idx = qed_chain_get_cons_idx(&fp_q->cq_chain);

/* Check if new completions were posted */
if (unlikely(cq_prod_idx == cq_cons_idx)) {
/* rmb - to prevent missing new cqes */
rmb();

/* Update the latest cq_prod from the SB */
cq_prod_idx = *fp_q->cq_prod;
}
}
}

static void qedn_fw_cq_fq_wq_handler(struct work_struct *work)
{
struct qedn_fp_queue *fp_q = container_of(work, struct qedn_fp_queue, fw_cq_fp_wq_entry);

qedn_fw_cq_fp_handler(fp_q);
qed_sb_ack(fp_q->sb_info, IGU_INT_ENABLE, 1);
}

static irqreturn_t qedn_irq_handler(int irq, void *dev_id)
{
/* Placeholder */
struct qedn_fp_queue *fp_q = dev_id;
struct qedn_ctx *qedn = fp_q->qedn;

fp_q->cpu = smp_processor_id();

qed_sb_ack(fp_q->sb_info, IGU_INT_DISABLE, 0);
queue_work_on(fp_q->cpu, qedn->fw_cq_fp_wq, &fp_q->fw_cq_fp_wq_entry);

return IRQ_HANDLED;
}
Expand Down Expand Up @@ -584,6 +664,11 @@ static void qedn_free_function_queues(struct qedn_ctx *qedn)
int i;

/* Free workqueues */
destroy_workqueue(qedn->fw_cq_fp_wq);
qedn->fw_cq_fp_wq = NULL;

destroy_workqueue(qedn->nvme_req_fp_wq);
qedn->nvme_req_fp_wq = NULL;

/* Free the fast path queues*/
for (i = 0; i < qedn->num_fw_cqs; i++) {
Expand Down Expand Up @@ -651,7 +736,23 @@ static int qedn_alloc_function_queues(struct qedn_ctx *qedn)
u64 cq_phy_addr;
int i;

/* Place holder - IO-path workqueues */
qedn->fw_cq_fp_wq = alloc_workqueue(QEDN_FW_CQ_FP_WQ_WORKQUEUE,
WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
if (!qedn->fw_cq_fp_wq) {
rc = -ENODEV;
pr_err("Unable to create fastpath FW CQ workqueue!\n");

return rc;
}

qedn->nvme_req_fp_wq = alloc_workqueue(QEDN_NVME_REQ_FP_WQ_WORKQUEUE,
WQ_HIGHPRI | WQ_MEM_RECLAIM, 1);
if (!qedn->nvme_req_fp_wq) {
rc = -ENODEV;
pr_err("Unable to create fastpath qedn nvme workqueue!\n");

return rc;
}

qedn->fp_q_arr = kcalloc(qedn->num_fw_cqs,
sizeof(struct qedn_fp_queue), GFP_KERNEL);
Expand Down Expand Up @@ -679,7 +780,7 @@ static int qedn_alloc_function_queues(struct qedn_ctx *qedn)
chain_params.mode = QED_CHAIN_MODE_PBL,
chain_params.cnt_type = QED_CHAIN_CNT_TYPE_U16,
chain_params.num_elems = QEDN_FW_CQ_SIZE;
chain_params.elem_size = 64; /*Placeholder - sizeof(struct nvmetcp_fw_cqe)*/
chain_params.elem_size = sizeof(struct nvmetcp_fw_cqe);

rc = qed_ops->common->chain_alloc(qedn->cdev,
&fp_q->cq_chain,
Expand Down Expand Up @@ -708,8 +809,7 @@ static int qedn_alloc_function_queues(struct qedn_ctx *qedn)
sb = fp_q->sb_info->sb_virt;
fp_q->cq_prod = (u16 *)&sb->pi_array[QEDN_PROTO_CQ_PROD_IDX];
fp_q->qedn = qedn;

/* Placeholder - Init IO-path workqueue */
INIT_WORK(&fp_q->fw_cq_fp_wq_entry, qedn_fw_cq_fq_wq_handler);

/* Placeholder - Init IO-path resources */
}
Expand Down

0 comments on commit cd00480

Please sign in to comment.