Commit 49b11bc3d43eb287fc9d78e1a892e97288980d49
Committed by
David S. Miller
1 parent
db17f39564
Exists in
master
and in
7 other branches
SGISEEQ: use cached memory access to make driver work on IP28
- Use inline functions for dma_sync_* instead of macros - added Kconfig change to make selection for similair SGI boxes easier Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Acked-by: Ralf Baechle <ralf@linux-mips.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Showing 2 changed files with 35 additions and 31 deletions Side-by-side Diff
drivers/net/Kconfig
... | ... | @@ -1797,7 +1797,7 @@ |
1797 | 1797 | |
1798 | 1798 | config SGISEEQ |
1799 | 1799 | tristate "SGI Seeq ethernet controller support" |
1800 | - depends on SGI_IP22 | |
1800 | + depends on SGI_HAS_SEEQ | |
1801 | 1801 | help |
1802 | 1802 | Say Y here if you have an Seeq based Ethernet network card. This is |
1803 | 1803 | used in many Silicon Graphics machines. |
drivers/net/sgiseeq.c
... | ... | @@ -56,14 +56,6 @@ |
56 | 56 | (dma_addr_t)((unsigned long)(v) - \ |
57 | 57 | (unsigned long)((sp)->rx_desc))) |
58 | 58 | |
59 | -#define DMA_SYNC_DESC_CPU(dev, addr) \ | |
60 | - do { dma_cache_sync((dev)->dev.parent, (void *)addr, \ | |
61 | - sizeof(struct sgiseeq_rx_desc), DMA_FROM_DEVICE); } while (0) | |
62 | - | |
63 | -#define DMA_SYNC_DESC_DEV(dev, addr) \ | |
64 | - do { dma_cache_sync((dev)->dev.parent, (void *)addr, \ | |
65 | - sizeof(struct sgiseeq_rx_desc), DMA_TO_DEVICE); } while (0) | |
66 | - | |
67 | 59 | /* Copy frames shorter than rx_copybreak, otherwise pass on up in |
68 | 60 | * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha). |
69 | 61 | */ |
... | ... | @@ -116,6 +108,18 @@ |
116 | 108 | spinlock_t tx_lock; |
117 | 109 | }; |
118 | 110 | |
111 | +static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr) | |
112 | +{ | |
113 | + dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), | |
114 | + DMA_FROM_DEVICE); | |
115 | +} | |
116 | + | |
117 | +static inline void dma_sync_desc_dev(struct net_device *dev, void *addr) | |
118 | +{ | |
119 | + dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), | |
120 | + DMA_TO_DEVICE); | |
121 | +} | |
122 | + | |
119 | 123 | static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs) |
120 | 124 | { |
121 | 125 | hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ; |
... | ... | @@ -184,7 +188,7 @@ |
184 | 188 | /* Setup tx ring. */ |
185 | 189 | for(i = 0; i < SEEQ_TX_BUFFERS; i++) { |
186 | 190 | sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT; |
187 | - DMA_SYNC_DESC_DEV(dev, &sp->tx_desc[i]); | |
191 | + dma_sync_desc_dev(dev, &sp->tx_desc[i]); | |
188 | 192 | } |
189 | 193 | |
190 | 194 | /* And now the rx ring. */ |
191 | 195 | |
... | ... | @@ -203,10 +207,10 @@ |
203 | 207 | sp->rx_desc[i].rdma.pbuf = dma_addr; |
204 | 208 | } |
205 | 209 | sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT; |
206 | - DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[i]); | |
210 | + dma_sync_desc_dev(dev, &sp->rx_desc[i]); | |
207 | 211 | } |
208 | 212 | sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR; |
209 | - DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[i - 1]); | |
213 | + dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]); | |
210 | 214 | return 0; |
211 | 215 | } |
212 | 216 | |
... | ... | @@ -341,7 +345,7 @@ |
341 | 345 | |
342 | 346 | /* Service every received packet. */ |
343 | 347 | rd = &sp->rx_desc[sp->rx_new]; |
344 | - DMA_SYNC_DESC_CPU(dev, rd); | |
348 | + dma_sync_desc_cpu(dev, rd); | |
345 | 349 | while (!(rd->rdma.cntinfo & HPCDMA_OWN)) { |
346 | 350 | len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3; |
347 | 351 | dma_unmap_single(dev->dev.parent, rd->rdma.pbuf, |
348 | 352 | |
349 | 353 | |
350 | 354 | |
351 | 355 | |
... | ... | @@ -397,16 +401,16 @@ |
397 | 401 | /* Return the entry to the ring pool. */ |
398 | 402 | rd->rdma.cntinfo = RCNTINFO_INIT; |
399 | 403 | sp->rx_new = NEXT_RX(sp->rx_new); |
400 | - DMA_SYNC_DESC_DEV(dev, rd); | |
404 | + dma_sync_desc_dev(dev, rd); | |
401 | 405 | rd = &sp->rx_desc[sp->rx_new]; |
402 | - DMA_SYNC_DESC_CPU(dev, rd); | |
406 | + dma_sync_desc_cpu(dev, rd); | |
403 | 407 | } |
404 | - DMA_SYNC_DESC_CPU(dev, &sp->rx_desc[orig_end]); | |
408 | + dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]); | |
405 | 409 | sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR); |
406 | - DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[orig_end]); | |
407 | - DMA_SYNC_DESC_CPU(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); | |
410 | + dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]); | |
411 | + dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); | |
408 | 412 | sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR; |
409 | - DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); | |
413 | + dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); | |
410 | 414 | rx_maybe_restart(sp, hregs, sregs); |
411 | 415 | } |
412 | 416 | |
413 | 417 | |
... | ... | @@ -433,12 +437,12 @@ |
433 | 437 | * is not active! |
434 | 438 | */ |
435 | 439 | td = &sp->tx_desc[i]; |
436 | - DMA_SYNC_DESC_CPU(dev, td); | |
440 | + dma_sync_desc_cpu(dev, td); | |
437 | 441 | while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) == |
438 | 442 | (HPCDMA_XIU | HPCDMA_ETXD)) { |
439 | 443 | i = NEXT_TX(i); |
440 | 444 | td = &sp->tx_desc[i]; |
441 | - DMA_SYNC_DESC_CPU(dev, td); | |
445 | + dma_sync_desc_cpu(dev, td); | |
442 | 446 | } |
443 | 447 | if (td->tdma.cntinfo & HPCDMA_XIU) { |
444 | 448 | hregs->tx_ndptr = VIRT_TO_DMA(sp, td); |
... | ... | @@ -470,7 +474,7 @@ |
470 | 474 | for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) { |
471 | 475 | td = &sp->tx_desc[j]; |
472 | 476 | |
473 | - DMA_SYNC_DESC_CPU(dev, td); | |
477 | + dma_sync_desc_cpu(dev, td); | |
474 | 478 | if (!(td->tdma.cntinfo & (HPCDMA_XIU))) |
475 | 479 | break; |
476 | 480 | if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) { |
... | ... | @@ -488,7 +492,7 @@ |
488 | 492 | dev_kfree_skb_any(td->skb); |
489 | 493 | td->skb = NULL; |
490 | 494 | } |
491 | - DMA_SYNC_DESC_DEV(dev, td); | |
495 | + dma_sync_desc_dev(dev, td); | |
492 | 496 | } |
493 | 497 | } |
494 | 498 | |
... | ... | @@ -598,7 +602,7 @@ |
598 | 602 | dev->stats.tx_bytes += len; |
599 | 603 | entry = sp->tx_new; |
600 | 604 | td = &sp->tx_desc[entry]; |
601 | - DMA_SYNC_DESC_CPU(dev, td); | |
605 | + dma_sync_desc_cpu(dev, td); | |
602 | 606 | |
603 | 607 | /* Create entry. There are so many races with adding a new |
604 | 608 | * descriptor to the chain: |
605 | 609 | |
606 | 610 | |
... | ... | @@ -618,14 +622,14 @@ |
618 | 622 | len, DMA_TO_DEVICE); |
619 | 623 | td->tdma.cntinfo = (len & HPCDMA_BCNT) | |
620 | 624 | HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX; |
621 | - DMA_SYNC_DESC_DEV(dev, td); | |
625 | + dma_sync_desc_dev(dev, td); | |
622 | 626 | if (sp->tx_old != sp->tx_new) { |
623 | 627 | struct sgiseeq_tx_desc *backend; |
624 | 628 | |
625 | 629 | backend = &sp->tx_desc[PREV_TX(sp->tx_new)]; |
626 | - DMA_SYNC_DESC_CPU(dev, backend); | |
630 | + dma_sync_desc_cpu(dev, backend); | |
627 | 631 | backend->tdma.cntinfo &= ~HPCDMA_EOX; |
628 | - DMA_SYNC_DESC_DEV(dev, backend); | |
632 | + dma_sync_desc_dev(dev, backend); | |
629 | 633 | } |
630 | 634 | sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */ |
631 | 635 | |
632 | 636 | |
... | ... | @@ -681,11 +685,11 @@ |
681 | 685 | while (i < (nbufs - 1)) { |
682 | 686 | buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1); |
683 | 687 | buf[i].tdma.pbuf = 0; |
684 | - DMA_SYNC_DESC_DEV(dev, &buf[i]); | |
688 | + dma_sync_desc_dev(dev, &buf[i]); | |
685 | 689 | i++; |
686 | 690 | } |
687 | 691 | buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf); |
688 | - DMA_SYNC_DESC_DEV(dev, &buf[i]); | |
692 | + dma_sync_desc_dev(dev, &buf[i]); | |
689 | 693 | } |
690 | 694 | |
691 | 695 | static inline void setup_rx_ring(struct net_device *dev, |
692 | 696 | |
... | ... | @@ -698,12 +702,12 @@ |
698 | 702 | while (i < (nbufs - 1)) { |
699 | 703 | buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1); |
700 | 704 | buf[i].rdma.pbuf = 0; |
701 | - DMA_SYNC_DESC_DEV(dev, &buf[i]); | |
705 | + dma_sync_desc_dev(dev, &buf[i]); | |
702 | 706 | i++; |
703 | 707 | } |
704 | 708 | buf[i].rdma.pbuf = 0; |
705 | 709 | buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf); |
706 | - DMA_SYNC_DESC_DEV(dev, &buf[i]); | |
710 | + dma_sync_desc_dev(dev, &buf[i]); | |
707 | 711 | } |
708 | 712 | |
709 | 713 | static int __init sgiseeq_probe(struct platform_device *pdev) |