|
44 | 44 |
|
45 | 45 | #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ |
46 | 46 | | MACB_BIT(ISR_ROVR)) |
| 47 | +#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ |
| 48 | + | MACB_BIT(ISR_RLE) \ |
| 49 | + | MACB_BIT(TXERR)) |
| 50 | +#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) |
| 51 | + |
| 52 | +/* |
| 53 | + * Graceful stop timeouts in us. We should allow up to |
| 54 | + * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) |
| 55 | + */ |
| 56 | +#define MACB_HALT_TIMEOUT 1230 |
47 | 57 |
|
48 | 58 | /* Ring buffer accessors */ |
49 | 59 | static unsigned int macb_tx_ring_wrap(unsigned int index) |
@@ -339,66 +349,113 @@ static void macb_update_stats(struct macb *bp) |
339 | 349 | *p += __raw_readl(reg); |
340 | 350 | } |
341 | 351 |
|
342 | | -static void macb_tx(struct macb *bp) |
| 352 | +static int macb_halt_tx(struct macb *bp) |
343 | 353 | { |
344 | | - unsigned int tail; |
345 | | - unsigned int head; |
346 | | - u32 status; |
| 354 | + unsigned long halt_time, timeout; |
| 355 | + u32 status; |
347 | 356 |
|
348 | | - status = macb_readl(bp, TSR); |
349 | | - macb_writel(bp, TSR, status); |
| 357 | + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); |
350 | 358 |
|
351 | | - netdev_vdbg(bp->dev, "macb_tx status = 0x%03lx\n", (unsigned long)status); |
| 359 | + timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT); |
| 360 | + do { |
| 361 | + halt_time = jiffies; |
| 362 | + status = macb_readl(bp, TSR); |
| 363 | + if (!(status & MACB_BIT(TGO))) |
| 364 | + return 0; |
352 | 365 |
|
353 | | - if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) { |
354 | | - int i; |
355 | | - netdev_err(bp->dev, "TX %s, resetting buffers\n", |
356 | | - status & MACB_BIT(UND) ? |
357 | | - "underrun" : "retry limit exceeded"); |
| 366 | + usleep_range(10, 250); |
| 367 | + } while (time_before(halt_time, timeout)); |
358 | 368 |
|
359 | | - /* Transfer ongoing, disable transmitter, to avoid confusion */ |
360 | | - if (status & MACB_BIT(TGO)) |
361 | | - macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE)); |
| 369 | + return -ETIMEDOUT; |
| 370 | +} |
362 | 371 |
|
363 | | - head = bp->tx_head; |
| 372 | +static void macb_tx_error_task(struct work_struct *work) |
| 373 | +{ |
| 374 | + struct macb *bp = container_of(work, struct macb, tx_error_task); |
| 375 | + struct macb_tx_skb *tx_skb; |
| 376 | + struct sk_buff *skb; |
| 377 | + unsigned int tail; |
364 | 378 |
|
365 | | - /*Mark all the buffer as used to avoid sending a lost buffer*/ |
366 | | - for (i = 0; i < TX_RING_SIZE; i++) |
367 | | - bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); |
| 379 | + netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n", |
| 380 | + bp->tx_tail, bp->tx_head); |
368 | 381 |
|
369 | | - /* Add wrap bit */ |
370 | | - bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); |
| 382 | + /* Make sure nobody is trying to queue up new packets */ |
| 383 | + netif_stop_queue(bp->dev); |
371 | 384 |
|
372 | | - /* free transmit buffer in upper layer*/ |
373 | | - for (tail = bp->tx_tail; tail != head; tail++) { |
374 | | - struct macb_tx_skb *tx_skb; |
375 | | - struct sk_buff *skb; |
| 385 | + /* |
| 386 | + * Stop transmission now |
| 387 | + * (in case we have just queued new packets) |
| 388 | + */ |
| 389 | + if (macb_halt_tx(bp)) |
| 390 | + /* Just complain for now, reinitializing TX path can be good */ |
| 391 | + netdev_err(bp->dev, "BUG: halt tx timed out\n"); |
376 | 392 |
|
377 | | - rmb(); |
| 393 | + /* No need for the lock here as nobody will interrupt us anymore */ |
378 | 394 |
|
379 | | - tx_skb = macb_tx_skb(bp, tail); |
380 | | - skb = tx_skb->skb; |
| 395 | + /* |
| 396 | + * Treat frames in TX queue including the ones that caused the error. |
| 397 | + * Free transmit buffers in upper layer. |
| 398 | + */ |
| 399 | + for (tail = bp->tx_tail; tail != bp->tx_head; tail++) { |
| 400 | + struct macb_dma_desc *desc; |
| 401 | + u32 ctrl; |
381 | 402 |
|
382 | | - dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, |
383 | | - skb->len, DMA_TO_DEVICE); |
384 | | - tx_skb->skb = NULL; |
385 | | - dev_kfree_skb_irq(skb); |
386 | | - } |
| 403 | + desc = macb_tx_desc(bp, tail); |
| 404 | + ctrl = desc->ctrl; |
| 405 | + tx_skb = macb_tx_skb(bp, tail); |
| 406 | + skb = tx_skb->skb; |
387 | 407 |
|
388 | | - bp->tx_head = bp->tx_tail = 0; |
| 408 | + if (ctrl & MACB_BIT(TX_USED)) { |
| 409 | + netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", |
| 410 | + macb_tx_ring_wrap(tail), skb->data); |
| 411 | + bp->stats.tx_packets++; |
| 412 | + bp->stats.tx_bytes += skb->len; |
| 413 | + } else { |
| 414 | + /* |
| 415 | + * "Buffers exhausted mid-frame" errors may only happen |
| 416 | + * if the driver is buggy, so complain loudly about those. |
| 417 | + * Statistics are updated by hardware. |
| 418 | + */ |
| 419 | + if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) |
| 420 | + netdev_err(bp->dev, |
| 421 | + "BUG: TX buffers exhausted mid-frame\n"); |
389 | 422 |
|
390 | | - /* Enable the transmitter again */ |
391 | | - if (status & MACB_BIT(TGO)) |
392 | | - macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE)); |
| 423 | + desc->ctrl = ctrl | MACB_BIT(TX_USED); |
| 424 | + } |
| 425 | + |
| 426 | + dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len, |
| 427 | + DMA_TO_DEVICE); |
| 428 | + tx_skb->skb = NULL; |
| 429 | + dev_kfree_skb(skb); |
393 | 430 | } |
394 | 431 |
|
395 | | - if (!(status & MACB_BIT(COMP))) |
396 | | - /* |
397 | | - * This may happen when a buffer becomes complete |
398 | | - * between reading the ISR and scanning the |
399 | | - * descriptors. Nothing to worry about. |
400 | | - */ |
401 | | - return; |
| 432 | + /* Make descriptor updates visible to hardware */ |
| 433 | + wmb(); |
| 434 | + |
| 435 | + /* Reinitialize the TX desc queue */ |
| 436 | + macb_writel(bp, TBQP, bp->tx_ring_dma); |
| 437 | + /* Make TX ring reflect state of hardware */ |
| 438 | + bp->tx_head = bp->tx_tail = 0; |
| 439 | + |
| 440 | + /* Now we are ready to start transmission again */ |
| 441 | + netif_wake_queue(bp->dev); |
| 442 | + |
| 443 | + /* Housework before enabling TX IRQ */ |
| 444 | + macb_writel(bp, TSR, macb_readl(bp, TSR)); |
| 445 | + macb_writel(bp, IER, MACB_TX_INT_FLAGS); |
| 446 | +} |
| 447 | + |
| 448 | +static void macb_tx_interrupt(struct macb *bp) |
| 449 | +{ |
| 450 | + unsigned int tail; |
| 451 | + unsigned int head; |
| 452 | + u32 status; |
| 453 | + |
| 454 | + status = macb_readl(bp, TSR); |
| 455 | + macb_writel(bp, TSR, status); |
| 456 | + |
| 457 | + netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", |
| 458 | + (unsigned long)status); |
402 | 459 |
|
403 | 460 | head = bp->tx_head; |
404 | 461 | for (tail = bp->tx_tail; tail != head; tail++) { |
@@ -638,9 +695,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) |
638 | 695 | } |
639 | 696 | } |
640 | 697 |
|
641 | | - if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND) | |
642 | | - MACB_BIT(ISR_RLE))) |
643 | | - macb_tx(bp); |
| 698 | + if (unlikely(status & (MACB_TX_ERR_FLAGS))) { |
| 699 | + macb_writel(bp, IDR, MACB_TX_INT_FLAGS); |
| 700 | + schedule_work(&bp->tx_error_task); |
| 701 | + break; |
| 702 | + } |
| 703 | + |
| 704 | + if (status & MACB_BIT(TCOMP)) |
| 705 | + macb_tx_interrupt(bp); |
644 | 706 |
|
645 | 707 | /* |
646 | 708 | * Link change detection isn't possible with RMII, so we'll |
@@ -970,13 +1032,8 @@ static void macb_init_hw(struct macb *bp) |
970 | 1032 | macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); |
971 | 1033 |
|
972 | 1034 | /* Enable interrupts */ |
973 | | - macb_writel(bp, IER, (MACB_BIT(RCOMP) |
974 | | - | MACB_BIT(RXUBR) |
975 | | - | MACB_BIT(ISR_TUND) |
976 | | - | MACB_BIT(ISR_RLE) |
977 | | - | MACB_BIT(TXERR) |
978 | | - | MACB_BIT(TCOMP) |
979 | | - | MACB_BIT(ISR_ROVR) |
| 1035 | + macb_writel(bp, IER, (MACB_RX_INT_FLAGS |
| 1036 | + | MACB_TX_INT_FLAGS |
980 | 1037 | | MACB_BIT(HRESP))); |
981 | 1038 |
|
982 | 1039 | } |
@@ -1428,6 +1485,7 @@ static int __init macb_probe(struct platform_device *pdev) |
1428 | 1485 | bp->dev = dev; |
1429 | 1486 |
|
1430 | 1487 | spin_lock_init(&bp->lock); |
| 1488 | + INIT_WORK(&bp->tx_error_task, macb_tx_error_task); |
1431 | 1489 |
|
1432 | 1490 | bp->pclk = clk_get(&pdev->dev, "pclk"); |
1433 | 1491 | if (IS_ERR(bp->pclk)) { |
|
0 commit comments