Skip to content

Commit b89b6a8

Browse files
Bhaumik BhattMani-Sadhasivam
authored andcommitted
bus: mhi: host: Add spinlock to protect WP access when queueing TREs
Protect WP accesses such that multiple threads queueing buffers for incoming data do not race. Meanwhile, if CONFIG_TRACE_IRQFLAGS is enabled, irq will be enabled once __local_bh_enable_ip is called as part of write_unlock_bh. Hence, let's take irqsave lock after TRE is generated to avoid running write_unlock_bh when irqsave lock is held. Cc: stable@vger.kernel.org Fixes: 189ff97 ("bus: mhi: core: Add support for data transfer") Signed-off-by: Bhaumik Bhatt <bbhatt@codeaurora.org> Signed-off-by: Qiang Yu <quic_qianyu@quicinc.com> Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com> Tested-by: Jeffrey Hugo <quic_jhugo@quicinc.com> Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> Link: https://lore.kernel.org/r/1702276972-41296-2-git-send-email-quic_qianyu@quicinc.com Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
1 parent 327ec5f commit b89b6a8

File tree

1 file changed

+13
-9
lines changed

1 file changed

+13
-9
lines changed

drivers/bus/mhi/host/main.c

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1124,17 +1124,15 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
11241124
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
11251125
return -EIO;
11261126

1127-
read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
1128-
11291127
ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
1130-
if (unlikely(ret)) {
1131-
ret = -EAGAIN;
1132-
goto exit_unlock;
1133-
}
1128+
if (unlikely(ret))
1129+
return -EAGAIN;
11341130

11351131
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
11361132
if (unlikely(ret))
1137-
goto exit_unlock;
1133+
return ret;
1134+
1135+
read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
11381136

11391137
/* Packet is queued, take a usage ref to exit M3 if necessary
11401138
* for host->device buffer, balanced put is done on buffer completion
@@ -1154,7 +1152,6 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
11541152
if (dir == DMA_FROM_DEVICE)
11551153
mhi_cntrl->runtime_put(mhi_cntrl);
11561154

1157-
exit_unlock:
11581155
read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
11591156

11601157
return ret;
@@ -1206,6 +1203,9 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
12061203
int eot, eob, chain, bei;
12071204
int ret;
12081205

1206+
/* Protect accesses for reading and incrementing WP */
1207+
write_lock_bh(&mhi_chan->lock);
1208+
12091209
buf_ring = &mhi_chan->buf_ring;
12101210
tre_ring = &mhi_chan->tre_ring;
12111211

@@ -1223,8 +1223,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
12231223

12241224
if (!info->pre_mapped) {
12251225
ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
1226-
if (ret)
1226+
if (ret) {
1227+
write_unlock_bh(&mhi_chan->lock);
12271228
return ret;
1229+
}
12281230
}
12291231

12301232
eob = !!(flags & MHI_EOB);
@@ -1241,6 +1243,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
12411243
mhi_add_ring_element(mhi_cntrl, tre_ring);
12421244
mhi_add_ring_element(mhi_cntrl, buf_ring);
12431245

1246+
write_unlock_bh(&mhi_chan->lock);
1247+
12441248
return 0;
12451249
}
12461250

0 commit comments

Comments
 (0)