Skip to content

Commit 9cfeeb5

Browse files
Yangchun Fudavem330
authored andcommitted
gve: Fixes DMA synchronization.
Synces the DMA buffer properly in order for CPU and device to see the most up-to-data data. Signed-off-by: Yangchun Fu <yangchun@google.com> Reviewed-by: Catherine Sullivan <csully@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent a904a06 commit 9cfeeb5

File tree

2 files changed

+24
-2
lines changed

2 files changed

+24
-2
lines changed

drivers/net/ethernet/google/gve/gve_rx.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -289,6 +289,8 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
289289

290290
len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
291291
page_info = &rx->data.page_info[idx];
292+
dma_sync_single_for_cpu(&priv->pdev->dev, rx->data.qpl->page_buses[idx],
293+
PAGE_SIZE, DMA_FROM_DEVICE);
292294

293295
/* gvnic can only receive into registered segments. If the buffer
294296
* can't be recycled, our only choice is to copy the data out of

drivers/net/ethernet/google/gve/gve_tx.c

Lines changed: 22 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -390,7 +390,21 @@ static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
390390
seg_desc->seg.seg_addr = cpu_to_be64(addr);
391391
}
392392

393-
static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
393+
static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
394+
u64 iov_offset, u64 iov_len)
395+
{
396+
dma_addr_t dma;
397+
u64 addr;
398+
399+
for (addr = iov_offset; addr < iov_offset + iov_len;
400+
addr += PAGE_SIZE) {
401+
dma = page_buses[addr / PAGE_SIZE];
402+
dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE);
403+
}
404+
}
405+
406+
static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb,
407+
struct device *dev)
394408
{
395409
int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
396410
union gve_tx_desc *pkt_desc, *seg_desc;
@@ -432,6 +446,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
432446
skb_copy_bits(skb, 0,
433447
tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
434448
hlen);
449+
gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses,
450+
info->iov[hdr_nfrags - 1].iov_offset,
451+
info->iov[hdr_nfrags - 1].iov_len);
435452
copy_offset = hlen;
436453

437454
for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
@@ -445,6 +462,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
445462
skb_copy_bits(skb, copy_offset,
446463
tx->tx_fifo.base + info->iov[i].iov_offset,
447464
info->iov[i].iov_len);
465+
gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses,
466+
info->iov[i].iov_offset,
467+
info->iov[i].iov_len);
448468
copy_offset += info->iov[i].iov_len;
449469
}
450470

@@ -473,7 +493,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
473493
gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
474494
return NETDEV_TX_BUSY;
475495
}
476-
nsegs = gve_tx_add_skb(tx, skb);
496+
nsegs = gve_tx_add_skb(tx, skb, &priv->pdev->dev);
477497

478498
netdev_tx_sent_queue(tx->netdev_txq, skb->len);
479499
skb_tx_timestamp(skb);

0 commit comments

Comments
 (0)