Skip to content

Commit

Permalink
nvmet-fc: defer cleanup using RCU properly
Browse files Browse the repository at this point in the history
[ Upstream commit 4049dc9 ]

When the target executes a disconnect and the host triggers a reconnect
immediately, the reconnect command still finds an existing association.

The reconnect crashes later on because nvmet_fc_delete_target_assoc
blindly removes resources while the reconnect code wants to use it.

To address this, nvmet_fc_find_target_assoc should not be able to
lookup an association which is being removed. The association list
is already under RCU lifetime management, so let's properly use it
and remove the association from the list and wait for a grace period
before cleaning up all. This means we also can drop the RCU management
on the queues, because this is now handled via the association itself.

A second step split the execution context so that the initial disconnect
command can complete without running the reconnect code in the same
context. As usual, this is done by deferring the ->done to a workqueue.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Daniel Wagner <dwagner@suse.de>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
  • Loading branch information
igaw authored and gregkh committed Mar 1, 2024
1 parent 2baa727 commit ccd49ad
Showing 1 changed file with 37 additions and 46 deletions.
83 changes: 37 additions & 46 deletions drivers/nvme/target/fc.c
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ struct nvmet_fc_tgt_assoc {
struct nvmet_fc_hostport *hostport;
struct nvmet_fc_ls_iod *rcv_disconn;
struct list_head a_list;
struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1];
struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
struct kref ref;
struct work_struct del_work;
struct rcu_head rcu;
Expand Down Expand Up @@ -802,14 +802,11 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
if (!queue)
return NULL;

if (!nvmet_fc_tgt_a_get(assoc))
goto out_free_queue;

queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
assoc->tgtport->fc_target_port.port_num,
assoc->a_id, qid);
if (!queue->work_q)
goto out_a_put;
goto out_free_queue;

queue->qid = qid;
queue->sqsize = sqsize;
Expand All @@ -831,15 +828,13 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
goto out_fail_iodlist;

WARN_ON(assoc->queues[qid]);
rcu_assign_pointer(assoc->queues[qid], queue);
assoc->queues[qid] = queue;

return queue;

out_fail_iodlist:
nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
destroy_workqueue(queue->work_q);
out_a_put:
nvmet_fc_tgt_a_put(assoc);
out_free_queue:
kfree(queue);
return NULL;
Expand All @@ -852,12 +847,8 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
struct nvmet_fc_tgt_queue *queue =
container_of(ref, struct nvmet_fc_tgt_queue, ref);

rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);

nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);

nvmet_fc_tgt_a_put(queue->assoc);

destroy_workqueue(queue->work_q);

kfree_rcu(queue, rcu);
Expand Down Expand Up @@ -969,7 +960,7 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
rcu_read_lock();
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
queue = rcu_dereference(assoc->queues[qid]);
queue = assoc->queues[qid];
if (queue &&
(!atomic_read(&queue->connected) ||
!nvmet_fc_tgt_q_get(queue)))
Expand Down Expand Up @@ -1172,13 +1163,18 @@ nvmet_fc_target_assoc_free(struct kref *ref)
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
struct nvmet_fc_ls_iod *oldls;
unsigned long flags;
int i;

for (i = NVMET_NR_QUEUES; i >= 0; i--) {
if (assoc->queues[i])
nvmet_fc_delete_target_queue(assoc->queues[i]);
}

/* Send Disconnect now that all i/o has completed */
nvmet_fc_xmt_disconnect_assoc(assoc);

nvmet_fc_free_hostport(assoc->hostport);
spin_lock_irqsave(&tgtport->lock, flags);
list_del_rcu(&assoc->a_list);
oldls = assoc->rcv_disconn;
spin_unlock_irqrestore(&tgtport->lock, flags);
/* if pending Rcv Disconnect Association LS, send rsp now */
Expand Down Expand Up @@ -1208,7 +1204,7 @@ static void
nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
struct nvmet_fc_tgt_queue *queue;
unsigned long flags;
int i, terminating;

terminating = atomic_xchg(&assoc->terminating, 1);
Expand All @@ -1217,29 +1213,21 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
if (terminating)
return;

spin_lock_irqsave(&tgtport->lock, flags);
list_del_rcu(&assoc->a_list);
spin_unlock_irqrestore(&tgtport->lock, flags);

for (i = NVMET_NR_QUEUES; i >= 0; i--) {
rcu_read_lock();
queue = rcu_dereference(assoc->queues[i]);
if (!queue) {
rcu_read_unlock();
continue;
}
synchronize_rcu();

if (!nvmet_fc_tgt_q_get(queue)) {
rcu_read_unlock();
continue;
}
rcu_read_unlock();
nvmet_fc_delete_target_queue(queue);
nvmet_fc_tgt_q_put(queue);
/* ensure all in-flight I/Os have been processed */
for (i = NVMET_NR_QUEUES; i >= 0; i--) {
if (assoc->queues[i])
flush_workqueue(assoc->queues[i]->work_q);
}

dev_info(tgtport->dev,
"{%d:%d} Association deleted\n",
tgtport->fc_target_port.port_num, assoc->a_id);

nvmet_fc_tgt_a_put(assoc);
}

static struct nvmet_fc_tgt_assoc *
Expand Down Expand Up @@ -1492,9 +1480,8 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc))
continue;
if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
queue_work(nvmet_wq, &assoc->del_work);
nvmet_fc_tgt_a_put(assoc);
}
rcu_read_unlock();
}
Expand Down Expand Up @@ -1547,9 +1534,8 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
continue;
assoc->hostport->invalid = 1;
noassoc = false;
if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
queue_work(nvmet_wq, &assoc->del_work);
nvmet_fc_tgt_a_put(assoc);
}
spin_unlock_irqrestore(&tgtport->lock, flags);

Expand Down Expand Up @@ -1581,7 +1567,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)

rcu_read_lock();
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
queue = rcu_dereference(assoc->queues[0]);
queue = assoc->queues[0];
if (queue && queue->nvme_sq.ctrl == ctrl) {
if (nvmet_fc_tgt_a_get(assoc))
found_ctrl = true;
Expand All @@ -1593,9 +1579,8 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
nvmet_fc_tgtport_put(tgtport);

if (found_ctrl) {
if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
queue_work(nvmet_wq, &assoc->del_work);
nvmet_fc_tgt_a_put(assoc);
return;
}

Expand Down Expand Up @@ -1625,6 +1610,8 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
/* terminate any outstanding associations */
__nvmet_fc_free_assocs(tgtport);

flush_workqueue(nvmet_wq);

/*
* should terminate LS's as well. However, LS's will be generated
* at the tail end of association termination, so they likely don't
Expand Down Expand Up @@ -1870,9 +1857,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
FCNVME_LS_DISCONNECT_ASSOC);

/* release get taken in nvmet_fc_find_target_assoc */
nvmet_fc_tgt_a_put(assoc);

/*
* The rules for LS response says the response cannot
* go back until ABTS's have been sent for all outstanding
Expand All @@ -1887,8 +1871,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
assoc->rcv_disconn = iod;
spin_unlock_irqrestore(&tgtport->lock, flags);

nvmet_fc_delete_target_assoc(assoc);

if (oldls) {
dev_info(tgtport->dev,
"{%d:%d} Multiple Disconnect Association LS's "
Expand All @@ -1904,6 +1886,9 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
nvmet_fc_xmt_ls_rsp(tgtport, oldls);
}

queue_work(nvmet_wq, &assoc->del_work);
nvmet_fc_tgt_a_put(assoc);

return false;
}

Expand Down Expand Up @@ -2902,6 +2887,9 @@ nvmet_fc_remove_port(struct nvmet_port *port)

nvmet_fc_portentry_unbind(pe);

/* terminate any outstanding associations */
__nvmet_fc_free_assocs(pe->tgtport);

kfree(pe);
}

Expand Down Expand Up @@ -2933,6 +2921,9 @@ static int __init nvmet_fc_init_module(void)

static void __exit nvmet_fc_exit_module(void)
{
/* ensure any shutdown operation, e.g. delete ctrls have finished */
flush_workqueue(nvmet_wq);

/* sanity check - all lports should be removed */
if (!list_empty(&nvmet_fc_target_list))
pr_warn("%s: targetport list not empty\n", __func__);
Expand Down

0 comments on commit ccd49ad

Please sign in to comment.