Skip to content

Commit 1bf3b02

Browse files
matnymangregkh
authored andcommitted
xhci: dbc: decouple endpoint allocation from initialization
[ Upstream commit 220a0ff ] Decouple allocation of endpoint ring buffer from initialization of the buffer, and initialization of endpoint context parts from from the rest of the contexts. It allows driver to clear up and reinitialize endpoint rings after disconnect without reallocating everything. This is a prerequisite for the next patch that prevents the transfer ring from filling up with cancelled (no-op) TRBs if a debug cable is reconnected several times without transferring anything. Cc: stable@vger.kernel.org Fixes: dfba217 ("usb: xhci: Add DbC support in xHCI driver") Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com> Link: https://lore.kernel.org/r/20250902105306.877476-2-mathias.nyman@linux.intel.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Sasha Levin <sashal@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent d6ef1c6 commit 1bf3b02

File tree

1 file changed

+46
-25
lines changed

1 file changed

+46
-25
lines changed

drivers/usb/host/xhci-dbgcap.c

Lines changed: 46 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -86,13 +86,34 @@ static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
8686
return string_length;
8787
}
8888

89+
static void xhci_dbc_init_ep_contexts(struct xhci_dbc *dbc)
90+
{
91+
struct xhci_ep_ctx *ep_ctx;
92+
unsigned int max_burst;
93+
dma_addr_t deq;
94+
95+
max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
96+
97+
/* Populate bulk out endpoint context: */
98+
ep_ctx = dbc_bulkout_ctx(dbc);
99+
deq = dbc_bulkout_enq(dbc);
100+
ep_ctx->ep_info = 0;
101+
ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
102+
ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
103+
104+
/* Populate bulk in endpoint context: */
105+
ep_ctx = dbc_bulkin_ctx(dbc);
106+
deq = dbc_bulkin_enq(dbc);
107+
ep_ctx->ep_info = 0;
108+
ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
109+
ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
110+
}
111+
89112
static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
90113
{
91114
struct dbc_info_context *info;
92-
struct xhci_ep_ctx *ep_ctx;
93115
u32 dev_info;
94-
dma_addr_t deq, dma;
95-
unsigned int max_burst;
116+
dma_addr_t dma;
96117

97118
if (!dbc)
98119
return;
@@ -106,20 +127,8 @@ static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
106127
info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
107128
info->length = cpu_to_le32(string_length);
108129

109-
/* Populate bulk out endpoint context: */
110-
ep_ctx = dbc_bulkout_ctx(dbc);
111-
max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
112-
deq = dbc_bulkout_enq(dbc);
113-
ep_ctx->ep_info = 0;
114-
ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
115-
ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
116-
117-
/* Populate bulk in endpoint context: */
118-
ep_ctx = dbc_bulkin_ctx(dbc);
119-
deq = dbc_bulkin_enq(dbc);
120-
ep_ctx->ep_info = 0;
121-
ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
122-
ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
130+
/* Populate bulk in and out endpoint contexts: */
131+
xhci_dbc_init_ep_contexts(dbc);
123132

124133
/* Set DbC context and info registers: */
125134
lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
@@ -421,6 +430,23 @@ dbc_alloc_ctx(struct device *dev, gfp_t flags)
421430
return ctx;
422431
}
423432

433+
static void xhci_dbc_ring_init(struct xhci_ring *ring)
434+
{
435+
struct xhci_segment *seg = ring->first_seg;
436+
437+
/* clear all trbs on ring in case of old ring */
438+
memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
439+
440+
/* Only event ring does not use link TRB */
441+
if (ring->type != TYPE_EVENT) {
442+
union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
443+
444+
trb->link.segment_ptr = cpu_to_le64(ring->first_seg->dma);
445+
trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
446+
}
447+
xhci_initialize_ring_info(ring, 1);
448+
}
449+
424450
static struct xhci_ring *
425451
xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
426452
{
@@ -449,15 +475,10 @@ xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
449475

450476
seg->dma = dma;
451477

452-
/* Only event ring does not use link TRB */
453-
if (type != TYPE_EVENT) {
454-
union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
455-
456-
trb->link.segment_ptr = cpu_to_le64(dma);
457-
trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
458-
}
459478
INIT_LIST_HEAD(&ring->td_list);
460-
xhci_initialize_ring_info(ring, 1);
479+
480+
xhci_dbc_ring_init(ring);
481+
461482
return ring;
462483
dma_fail:
463484
kfree(seg);

0 commit comments

Comments
 (0)