Skip to content

Commit 3744ebd

Browse files
agarg2008Sasha Levin
authored andcommitted
gve: fix incorrect buffer cleanup in gve_tx_clean_pending_packets for QPL
commit fb868db upstream. In DQ-QPL mode, gve_tx_clean_pending_packets() incorrectly uses the RDA buffer cleanup path. It iterates num_bufs times and attempts to unmap entries in the dma array. This leads to two issues: 1. The dma array shares storage with tx_qpl_buf_ids (union). Interpreting buffer IDs as DMA addresses results in attempting to unmap incorrect memory locations. 2. num_bufs in QPL mode (counting 2K chunks) can significantly exceed the size of the dma array, causing out-of-bounds access warnings (trace below is how we noticed this issue). UBSAN: array-index-out-of-bounds in drivers/net/ethernet/drivers/net/ethernet/google/gve/gve_tx_dqo.c:178:5 index 18 is out of range for type 'dma_addr_t[18]' (aka 'unsigned long long[18]') Workqueue: gve gve_service_task [gve] Call Trace: <TASK> dump_stack_lvl+0x33/0xa0 __ubsan_handle_out_of_bounds+0xdc/0x110 gve_tx_stop_ring_dqo+0x182/0x200 [gve] gve_close+0x1be/0x450 [gve] gve_reset+0x99/0x120 [gve] gve_service_task+0x61/0x100 [gve] process_scheduled_works+0x1e9/0x380 Fix this by properly checking for QPL mode and delegating to gve_free_tx_qpl_bufs() to reclaim the buffers. Cc: stable@vger.kernel.org Fixes: a6fb8d5 ("gve: Tx path for DQO-QPL") Signed-off-by: Ankit Garg <nktgrg@google.com> Reviewed-by: Jordan Rhee <jordanrhee@google.com> Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com> Signed-off-by: Joshua Washington <joshwash@google.com> Reviewed-by: Simon Horman <horms@kernel.org> Link: https://patch.msgid.link/20260220215324.1631350-1-joshwash@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 3de7c10 commit 3744ebd

File tree

1 file changed

+25
-31
lines changed

1 file changed

+25
-31
lines changed

drivers/net/ethernet/google/gve/gve_tx_dqo.c

Lines changed: 25 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -167,6 +167,25 @@ gve_free_pending_packet(struct gve_tx_ring *tx,
167167
}
168168
}
169169

170+
static void gve_unmap_packet(struct device *dev,
171+
struct gve_tx_pending_packet_dqo *pkt)
172+
{
173+
int i;
174+
175+
if (!pkt->num_bufs)
176+
return;
177+
178+
/* SKB linear portion is guaranteed to be mapped */
179+
dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
180+
dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
181+
for (i = 1; i < pkt->num_bufs; i++) {
182+
netmem_dma_unmap_page_attrs(dev, dma_unmap_addr(pkt, dma[i]),
183+
dma_unmap_len(pkt, len[i]),
184+
DMA_TO_DEVICE, 0);
185+
}
186+
pkt->num_bufs = 0;
187+
}
188+
170189
/* gve_tx_free_desc - Cleans up all pending tx requests and buffers.
171190
*/
172191
static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
@@ -176,21 +195,12 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
176195
for (i = 0; i < tx->dqo.num_pending_packets; i++) {
177196
struct gve_tx_pending_packet_dqo *cur_state =
178197
&tx->dqo.pending_packets[i];
179-
int j;
180-
181-
for (j = 0; j < cur_state->num_bufs; j++) {
182-
if (j == 0) {
183-
dma_unmap_single(tx->dev,
184-
dma_unmap_addr(cur_state, dma[j]),
185-
dma_unmap_len(cur_state, len[j]),
186-
DMA_TO_DEVICE);
187-
} else {
188-
dma_unmap_page(tx->dev,
189-
dma_unmap_addr(cur_state, dma[j]),
190-
dma_unmap_len(cur_state, len[j]),
191-
DMA_TO_DEVICE);
192-
}
193-
}
198+
199+
if (tx->dqo.qpl)
200+
gve_free_tx_qpl_bufs(tx, cur_state);
201+
else
202+
gve_unmap_packet(tx->dev, cur_state);
203+
194204
if (cur_state->skb) {
195205
dev_consume_skb_any(cur_state->skb);
196206
cur_state->skb = NULL;
@@ -1160,22 +1170,6 @@ static void remove_from_list(struct gve_tx_ring *tx,
11601170
}
11611171
}
11621172

1163-
static void gve_unmap_packet(struct device *dev,
1164-
struct gve_tx_pending_packet_dqo *pkt)
1165-
{
1166-
int i;
1167-
1168-
/* SKB linear portion is guaranteed to be mapped */
1169-
dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
1170-
dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
1171-
for (i = 1; i < pkt->num_bufs; i++) {
1172-
netmem_dma_unmap_page_attrs(dev, dma_unmap_addr(pkt, dma[i]),
1173-
dma_unmap_len(pkt, len[i]),
1174-
DMA_TO_DEVICE, 0);
1175-
}
1176-
pkt->num_bufs = 0;
1177-
}
1178-
11791173
/* Completion types and expected behavior:
11801174
* No Miss compl + Packet compl = Packet completed normally.
11811175
* Miss compl + Re-inject compl = Packet completed normally.

0 commit comments

Comments
 (0)