Commit d3fa72e4556ec1f04e46a0d561d9e785ecaa173d
Committed by
Linus Torvalds
1 parent
f67637ee4b
Exists in
master
and in
39 other branches
[PATCH] Pass struct dev pointer to dma_cache_sync()
Pass struct dev pointer to dma_cache_sync() dma_cache_sync() is ill-designed in that it does not have a struct device pointer argument which makes proper support for systems that consist of a mix of coherent and non-coherent DMA devices hard. Change dma_cache_sync to take a struct device pointer as first argument and fix all its callers to pass it. Signed-off-by: Ralf Baechle <ralf@linux-mips.org> Cc: James Bottomley <James.Bottomley@steeleye.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Greg KH <greg@kroah.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 27 changed files with 137 additions and 126 deletions Side-by-side Diff
- Documentation/DMA-API.txt
- arch/avr32/mm/dma-coherent.c
- arch/mips/mm/dma-coherent.c
- arch/mips/mm/dma-ip27.c
- arch/mips/mm/dma-ip32.c
- arch/mips/mm/dma-noncoherent.c
- drivers/net/lasi_82596.c
- drivers/scsi/53c700.c
- drivers/scsi/53c700.h
- drivers/serial/mpsc.c
- include/asm-alpha/dma-mapping.h
- include/asm-avr32/dma-mapping.h
- include/asm-cris/dma-mapping.h
- include/asm-frv/dma-mapping.h
- include/asm-generic/dma-mapping.h
- include/asm-i386/dma-mapping.h
- include/asm-ia64/dma-mapping.h
- include/asm-m68k/dma-mapping.h
- include/asm-mips/dma-mapping.h
- include/asm-parisc/dma-mapping.h
- include/asm-powerpc/dma-mapping.h
- include/asm-sh/dma-mapping.h
- include/asm-sh64/dma-mapping.h
- include/asm-sparc64/dma-mapping.h
- include/asm-um/dma-mapping.h
- include/asm-x86_64/dma-mapping.h
- include/asm-xtensa/dma-mapping.h
Documentation/DMA-API.txt
... | ... | @@ -459,7 +459,7 @@ |
459 | 459 | memory you intend to sync partially. |
460 | 460 | |
461 | 461 | void |
462 | -dma_cache_sync(void *vaddr, size_t size, | |
462 | +dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
463 | 463 | enum dma_data_direction direction) |
464 | 464 | |
465 | 465 | Do a partial sync of memory that was allocated by |
arch/avr32/mm/dma-coherent.c
... | ... | @@ -11,7 +11,7 @@ |
11 | 11 | #include <asm/addrspace.h> |
12 | 12 | #include <asm/cacheflush.h> |
13 | 13 | |
14 | -void dma_cache_sync(void *vaddr, size_t size, int direction) | |
14 | +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction) | |
15 | 15 | { |
16 | 16 | /* |
17 | 17 | * No need to sync an uncached area |
arch/mips/mm/dma-coherent.c
arch/mips/mm/dma-ip27.c
arch/mips/mm/dma-ip32.c
... | ... | @@ -370,7 +370,8 @@ |
370 | 370 | |
371 | 371 | EXPORT_SYMBOL(dma_is_consistent); |
372 | 372 | |
373 | -void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) | |
373 | +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
374 | + enum dma_data_direction direction) | |
374 | 375 | { |
375 | 376 | if (direction == DMA_NONE) |
376 | 377 | return; |
arch/mips/mm/dma-noncoherent.c
... | ... | @@ -306,7 +306,8 @@ |
306 | 306 | |
307 | 307 | EXPORT_SYMBOL(dma_is_consistent); |
308 | 308 | |
309 | -void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) | |
309 | +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
310 | + enum dma_data_direction direction) | |
310 | 311 | { |
311 | 312 | if (direction == DMA_NONE) |
312 | 313 | return; |
drivers/net/lasi_82596.c
... | ... | @@ -119,14 +119,14 @@ |
119 | 119 | #define DEB(x,y) if (i596_debug & (x)) { y; } |
120 | 120 | |
121 | 121 | |
122 | -#define CHECK_WBACK(addr,len) \ | |
123 | - do { dma_cache_sync((void *)addr, len, DMA_TO_DEVICE); } while (0) | |
122 | +#define CHECK_WBACK(priv, addr,len) \ | |
123 | + do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_TO_DEVICE); } while (0) | |
124 | 124 | |
125 | -#define CHECK_INV(addr,len) \ | |
126 | - do { dma_cache_sync((void *)addr, len, DMA_FROM_DEVICE); } while(0) | |
125 | +#define CHECK_INV(priv, addr,len) \ | |
126 | + do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_FROM_DEVICE); } while(0) | |
127 | 127 | |
128 | -#define CHECK_WBACK_INV(addr,len) \ | |
129 | - do { dma_cache_sync((void *)addr, len, DMA_BIDIRECTIONAL); } while (0) | |
128 | +#define CHECK_WBACK_INV(priv, addr,len) \ | |
129 | + do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0) | |
130 | 130 | |
131 | 131 | |
132 | 132 | #define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/ |
133 | 133 | |
... | ... | @@ -449,10 +449,10 @@ |
449 | 449 | |
450 | 450 | static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str) |
451 | 451 | { |
452 | - CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp)); | |
452 | + CHECK_INV(lp, &(lp->iscp), sizeof(struct i596_iscp)); | |
453 | 453 | while (--delcnt && lp->iscp.stat) { |
454 | 454 | udelay(10); |
455 | - CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp)); | |
455 | + CHECK_INV(lp, &(lp->iscp), sizeof(struct i596_iscp)); | |
456 | 456 | } |
457 | 457 | if (!delcnt) { |
458 | 458 | printk("%s: %s, iscp.stat %04x, didn't clear\n", |
459 | 459 | |
... | ... | @@ -466,10 +466,10 @@ |
466 | 466 | |
467 | 467 | static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str) |
468 | 468 | { |
469 | - CHECK_INV(&(lp->scb), sizeof(struct i596_scb)); | |
469 | + CHECK_INV(lp, &(lp->scb), sizeof(struct i596_scb)); | |
470 | 470 | while (--delcnt && lp->scb.command) { |
471 | 471 | udelay(10); |
472 | - CHECK_INV(&(lp->scb), sizeof(struct i596_scb)); | |
472 | + CHECK_INV(lp, &(lp->scb), sizeof(struct i596_scb)); | |
473 | 473 | } |
474 | 474 | if (!delcnt) { |
475 | 475 | printk("%s: %s, status %4.4x, cmd %4.4x.\n", |
... | ... | @@ -522,7 +522,7 @@ |
522 | 522 | rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size); |
523 | 523 | rbd = rbd->v_next; |
524 | 524 | } while (rbd != lp->rbd_head); |
525 | - CHECK_INV(lp, sizeof(struct i596_private)); | |
525 | + CHECK_INV(lp, lp, sizeof(struct i596_private)); | |
526 | 526 | } |
527 | 527 | |
528 | 528 | |
... | ... | @@ -592,7 +592,7 @@ |
592 | 592 | rfd->b_next = WSWAPrfd(virt_to_dma(lp,lp->rfds)); |
593 | 593 | rfd->cmd = CMD_EOL|CMD_FLEX; |
594 | 594 | |
595 | - CHECK_WBACK_INV(lp, sizeof(struct i596_private)); | |
595 | + CHECK_WBACK_INV(lp, lp, sizeof(struct i596_private)); | |
596 | 596 | } |
597 | 597 | |
598 | 598 | static inline void remove_rx_bufs(struct net_device *dev) |
... | ... | @@ -629,7 +629,7 @@ |
629 | 629 | lp->rbd_head = lp->rbds; |
630 | 630 | lp->rfds[0].rbd = WSWAPrbd(virt_to_dma(lp,lp->rbds)); |
631 | 631 | |
632 | - CHECK_WBACK_INV(lp, sizeof(struct i596_private)); | |
632 | + CHECK_WBACK_INV(lp, lp, sizeof(struct i596_private)); | |
633 | 633 | } |
634 | 634 | |
635 | 635 | |
... | ... | @@ -663,8 +663,8 @@ |
663 | 663 | |
664 | 664 | DEB(DEB_INIT, printk("%s: starting i82596.\n", dev->name)); |
665 | 665 | |
666 | - CHECK_WBACK(&(lp->scp), sizeof(struct i596_scp)); | |
667 | - CHECK_WBACK(&(lp->iscp), sizeof(struct i596_iscp)); | |
666 | + CHECK_WBACK(lp, &(lp->scp), sizeof(struct i596_scp)); | |
667 | + CHECK_WBACK(lp, &(lp->iscp), sizeof(struct i596_iscp)); | |
668 | 668 | |
669 | 669 | MPU_PORT(dev, PORT_ALTSCP, virt_to_dma(lp,&lp->scp)); |
670 | 670 | |
671 | 671 | |
672 | 672 | |
673 | 673 | |
... | ... | @@ -678,25 +678,25 @@ |
678 | 678 | rebuild_rx_bufs(dev); |
679 | 679 | |
680 | 680 | lp->scb.command = 0; |
681 | - CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb)); | |
681 | + CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb)); | |
682 | 682 | |
683 | 683 | enable_irq(dev->irq); /* enable IRQs from LAN */ |
684 | 684 | |
685 | 685 | DEB(DEB_INIT, printk("%s: queuing CmdConfigure\n", dev->name)); |
686 | 686 | memcpy(lp->cf_cmd.i596_config, init_setup, 14); |
687 | 687 | lp->cf_cmd.cmd.command = CmdConfigure; |
688 | - CHECK_WBACK(&(lp->cf_cmd), sizeof(struct cf_cmd)); | |
688 | + CHECK_WBACK(lp, &(lp->cf_cmd), sizeof(struct cf_cmd)); | |
689 | 689 | i596_add_cmd(dev, &lp->cf_cmd.cmd); |
690 | 690 | |
691 | 691 | DEB(DEB_INIT, printk("%s: queuing CmdSASetup\n", dev->name)); |
692 | 692 | memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6); |
693 | 693 | lp->sa_cmd.cmd.command = CmdSASetup; |
694 | - CHECK_WBACK(&(lp->sa_cmd), sizeof(struct sa_cmd)); | |
694 | + CHECK_WBACK(lp, &(lp->sa_cmd), sizeof(struct sa_cmd)); | |
695 | 695 | i596_add_cmd(dev, &lp->sa_cmd.cmd); |
696 | 696 | |
697 | 697 | DEB(DEB_INIT, printk("%s: queuing CmdTDR\n", dev->name)); |
698 | 698 | lp->tdr_cmd.cmd.command = CmdTDR; |
699 | - CHECK_WBACK(&(lp->tdr_cmd), sizeof(struct tdr_cmd)); | |
699 | + CHECK_WBACK(lp, &(lp->tdr_cmd), sizeof(struct tdr_cmd)); | |
700 | 700 | i596_add_cmd(dev, &lp->tdr_cmd.cmd); |
701 | 701 | |
702 | 702 | spin_lock_irqsave (&lp->lock, flags); |
... | ... | @@ -708,7 +708,7 @@ |
708 | 708 | DEB(DEB_INIT, printk("%s: Issuing RX_START\n", dev->name)); |
709 | 709 | lp->scb.command = RX_START; |
710 | 710 | lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds)); |
711 | - CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb)); | |
711 | + CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb)); | |
712 | 712 | |
713 | 713 | CA(dev); |
714 | 714 | |
715 | 715 | |
... | ... | @@ -740,13 +740,13 @@ |
740 | 740 | |
741 | 741 | rfd = lp->rfd_head; /* Ref next frame to check */ |
742 | 742 | |
743 | - CHECK_INV(rfd, sizeof(struct i596_rfd)); | |
743 | + CHECK_INV(lp, rfd, sizeof(struct i596_rfd)); | |
744 | 744 | while ((rfd->stat) & STAT_C) { /* Loop while complete frames */ |
745 | 745 | if (rfd->rbd == I596_NULL) |
746 | 746 | rbd = NULL; |
747 | 747 | else if (rfd->rbd == lp->rbd_head->b_addr) { |
748 | 748 | rbd = lp->rbd_head; |
749 | - CHECK_INV(rbd, sizeof(struct i596_rbd)); | |
749 | + CHECK_INV(lp, rbd, sizeof(struct i596_rbd)); | |
750 | 750 | } |
751 | 751 | else { |
752 | 752 | printk("%s: rbd chain broken!\n", dev->name); |
... | ... | @@ -790,7 +790,7 @@ |
790 | 790 | dma_addr = dma_map_single(lp->dev, newskb->data, PKT_BUF_SZ, DMA_FROM_DEVICE); |
791 | 791 | rbd->v_data = newskb->data; |
792 | 792 | rbd->b_data = WSWAPchar(dma_addr); |
793 | - CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd)); | |
793 | + CHECK_WBACK_INV(lp, rbd, sizeof(struct i596_rbd)); | |
794 | 794 | } |
795 | 795 | else |
796 | 796 | skb = dev_alloc_skb(pkt_len + 2); |
... | ... | @@ -842,7 +842,7 @@ |
842 | 842 | if (rbd != NULL && (rbd->count & 0x4000)) { |
843 | 843 | rbd->count = 0; |
844 | 844 | lp->rbd_head = rbd->v_next; |
845 | - CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd)); | |
845 | + CHECK_WBACK_INV(lp, rbd, sizeof(struct i596_rbd)); | |
846 | 846 | } |
847 | 847 | |
848 | 848 | /* Tidy the frame descriptor, marking it as end of list */ |
849 | 849 | |
... | ... | @@ -860,10 +860,10 @@ |
860 | 860 | |
861 | 861 | lp->scb.rfd = rfd->b_next; |
862 | 862 | lp->rfd_head = rfd->v_next; |
863 | - CHECK_WBACK_INV(rfd->v_prev, sizeof(struct i596_rfd)); | |
864 | - CHECK_WBACK_INV(rfd, sizeof(struct i596_rfd)); | |
863 | + CHECK_WBACK_INV(lp, rfd->v_prev, sizeof(struct i596_rfd)); | |
864 | + CHECK_WBACK_INV(lp, rfd, sizeof(struct i596_rfd)); | |
865 | 865 | rfd = lp->rfd_head; |
866 | - CHECK_INV(rfd, sizeof(struct i596_rfd)); | |
866 | + CHECK_INV(lp, rfd, sizeof(struct i596_rfd)); | |
867 | 867 | } |
868 | 868 | |
869 | 869 | DEB(DEB_RXFRAME, printk("frames %d\n", frames)); |
870 | 870 | |
... | ... | @@ -902,12 +902,12 @@ |
902 | 902 | ptr->v_next = NULL; |
903 | 903 | ptr->b_next = I596_NULL; |
904 | 904 | } |
905 | - CHECK_WBACK_INV(ptr, sizeof(struct i596_cmd)); | |
905 | + CHECK_WBACK_INV(lp, ptr, sizeof(struct i596_cmd)); | |
906 | 906 | } |
907 | 907 | |
908 | 908 | wait_cmd(dev, lp, 100, "i596_cleanup_cmd timed out"); |
909 | 909 | lp->scb.cmd = I596_NULL; |
910 | - CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb)); | |
910 | + CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb)); | |
911 | 911 | } |
912 | 912 | |
913 | 913 | |
... | ... | @@ -925,7 +925,7 @@ |
925 | 925 | |
926 | 926 | /* FIXME: this command might cause an lpmc */ |
927 | 927 | lp->scb.command = CUC_ABORT | RX_ABORT; |
928 | - CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb)); | |
928 | + CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb)); | |
929 | 929 | CA(dev); |
930 | 930 | |
931 | 931 | /* wait for shutdown */ |
932 | 932 | |
933 | 933 | |
... | ... | @@ -951,20 +951,20 @@ |
951 | 951 | cmd->command |= (CMD_EOL | CMD_INTR); |
952 | 952 | cmd->v_next = NULL; |
953 | 953 | cmd->b_next = I596_NULL; |
954 | - CHECK_WBACK(cmd, sizeof(struct i596_cmd)); | |
954 | + CHECK_WBACK(lp, cmd, sizeof(struct i596_cmd)); | |
955 | 955 | |
956 | 956 | spin_lock_irqsave (&lp->lock, flags); |
957 | 957 | |
958 | 958 | if (lp->cmd_head != NULL) { |
959 | 959 | lp->cmd_tail->v_next = cmd; |
960 | 960 | lp->cmd_tail->b_next = WSWAPcmd(virt_to_dma(lp,&cmd->status)); |
961 | - CHECK_WBACK(lp->cmd_tail, sizeof(struct i596_cmd)); | |
961 | + CHECK_WBACK(lp, lp->cmd_tail, sizeof(struct i596_cmd)); | |
962 | 962 | } else { |
963 | 963 | lp->cmd_head = cmd; |
964 | 964 | wait_cmd(dev, lp, 100, "i596_add_cmd timed out"); |
965 | 965 | lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&cmd->status)); |
966 | 966 | lp->scb.command = CUC_START; |
967 | - CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb)); | |
967 | + CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb)); | |
968 | 968 | CA(dev); |
969 | 969 | } |
970 | 970 | lp->cmd_tail = cmd; |
971 | 971 | |
... | ... | @@ -998,12 +998,12 @@ |
998 | 998 | data = virt_to_dma(lp,tint); |
999 | 999 | |
1000 | 1000 | tint[1] = -1; |
1001 | - CHECK_WBACK(tint,PAGE_SIZE); | |
1001 | + CHECK_WBACK(lp, tint, PAGE_SIZE); | |
1002 | 1002 | |
1003 | 1003 | MPU_PORT(dev, 1, data); |
1004 | 1004 | |
1005 | 1005 | for(data = 1000000; data; data--) { |
1006 | - CHECK_INV(tint,PAGE_SIZE); | |
1006 | + CHECK_INV(lp, tint, PAGE_SIZE); | |
1007 | 1007 | if(tint[1] != -1) |
1008 | 1008 | break; |
1009 | 1009 | |
... | ... | @@ -1061,7 +1061,7 @@ |
1061 | 1061 | /* Issue a channel attention signal */ |
1062 | 1062 | DEB(DEB_ERRORS, printk("Kicking board.\n")); |
1063 | 1063 | lp->scb.command = CUC_START | RX_START; |
1064 | - CHECK_WBACK_INV(&(lp->scb), sizeof(struct i596_scb)); | |
1064 | + CHECK_WBACK_INV(lp, &(lp->scb), sizeof(struct i596_scb)); | |
1065 | 1065 | CA (dev); |
1066 | 1066 | lp->last_restart = lp->stats.tx_packets; |
1067 | 1067 | } |
... | ... | @@ -1118,8 +1118,8 @@ |
1118 | 1118 | tbd->data = WSWAPchar(tx_cmd->dma_addr); |
1119 | 1119 | |
1120 | 1120 | DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued")); |
1121 | - CHECK_WBACK_INV(tx_cmd, sizeof(struct tx_cmd)); | |
1122 | - CHECK_WBACK_INV(tbd, sizeof(struct i596_tbd)); | |
1121 | + CHECK_WBACK_INV(lp, tx_cmd, sizeof(struct tx_cmd)); | |
1122 | + CHECK_WBACK_INV(lp, tbd, sizeof(struct i596_tbd)); | |
1123 | 1123 | i596_add_cmd(dev, &tx_cmd->cmd); |
1124 | 1124 | |
1125 | 1125 | lp->stats.tx_packets++; |
... | ... | @@ -1228,7 +1228,7 @@ |
1228 | 1228 | lp->dma_addr = dma_addr; |
1229 | 1229 | lp->dev = gen_dev; |
1230 | 1230 | |
1231 | - CHECK_WBACK_INV(dev->mem_start, sizeof(struct i596_private)); | |
1231 | + CHECK_WBACK_INV(lp, dev->mem_start, sizeof(struct i596_private)); | |
1232 | 1232 | |
1233 | 1233 | i = register_netdev(dev); |
1234 | 1234 | if (i) { |
... | ... | @@ -1295,7 +1295,7 @@ |
1295 | 1295 | DEB(DEB_INTS, printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700)); |
1296 | 1296 | |
1297 | 1297 | while (lp->cmd_head != NULL) { |
1298 | - CHECK_INV(lp->cmd_head, sizeof(struct i596_cmd)); | |
1298 | + CHECK_INV(lp, lp->cmd_head, sizeof(struct i596_cmd)); | |
1299 | 1299 | if (!(lp->cmd_head->status & STAT_C)) |
1300 | 1300 | break; |
1301 | 1301 | |
... | ... | @@ -1358,7 +1358,7 @@ |
1358 | 1358 | } |
1359 | 1359 | ptr->v_next = NULL; |
1360 | 1360 | ptr->b_next = I596_NULL; |
1361 | - CHECK_WBACK(ptr, sizeof(struct i596_cmd)); | |
1361 | + CHECK_WBACK(lp, ptr, sizeof(struct i596_cmd)); | |
1362 | 1362 | lp->last_cmd = jiffies; |
1363 | 1363 | } |
1364 | 1364 | |
1365 | 1365 | |
... | ... | @@ -1372,13 +1372,13 @@ |
1372 | 1372 | |
1373 | 1373 | ptr->command &= 0x1fff; |
1374 | 1374 | ptr = ptr->v_next; |
1375 | - CHECK_WBACK_INV(prev, sizeof(struct i596_cmd)); | |
1375 | + CHECK_WBACK_INV(lp, prev, sizeof(struct i596_cmd)); | |
1376 | 1376 | } |
1377 | 1377 | |
1378 | 1378 | if ((lp->cmd_head != NULL)) |
1379 | 1379 | ack_cmd |= CUC_START; |
1380 | 1380 | lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&lp->cmd_head->status)); |
1381 | - CHECK_WBACK_INV(&lp->scb, sizeof(struct i596_scb)); | |
1381 | + CHECK_WBACK_INV(lp, &lp->scb, sizeof(struct i596_scb)); | |
1382 | 1382 | } |
1383 | 1383 | if ((status & 0x1000) || (status & 0x4000)) { |
1384 | 1384 | if ((status & 0x4000)) |
... | ... | @@ -1397,7 +1397,7 @@ |
1397 | 1397 | } |
1398 | 1398 | wait_cmd(dev, lp, 100, "i596 interrupt, timeout"); |
1399 | 1399 | lp->scb.command = ack_cmd; |
1400 | - CHECK_WBACK(&lp->scb, sizeof(struct i596_scb)); | |
1400 | + CHECK_WBACK(lp, &lp->scb, sizeof(struct i596_scb)); | |
1401 | 1401 | |
1402 | 1402 | /* DANGER: I suspect that some kind of interrupt |
1403 | 1403 | acknowledgement aside from acking the 82596 might be needed |
... | ... | @@ -1426,7 +1426,7 @@ |
1426 | 1426 | |
1427 | 1427 | wait_cmd(dev, lp, 100, "close1 timed out"); |
1428 | 1428 | lp->scb.command = CUC_ABORT | RX_ABORT; |
1429 | - CHECK_WBACK(&lp->scb, sizeof(struct i596_scb)); | |
1429 | + CHECK_WBACK(lp, &lp->scb, sizeof(struct i596_scb)); | |
1430 | 1430 | |
1431 | 1431 | CA(dev); |
1432 | 1432 | |
... | ... | @@ -1486,7 +1486,7 @@ |
1486 | 1486 | dev->name); |
1487 | 1487 | else { |
1488 | 1488 | lp->cf_cmd.cmd.command = CmdConfigure; |
1489 | - CHECK_WBACK_INV(&lp->cf_cmd, sizeof(struct cf_cmd)); | |
1489 | + CHECK_WBACK_INV(lp, &lp->cf_cmd, sizeof(struct cf_cmd)); | |
1490 | 1490 | i596_add_cmd(dev, &lp->cf_cmd.cmd); |
1491 | 1491 | } |
1492 | 1492 | } |
... | ... | @@ -1514,7 +1514,7 @@ |
1514 | 1514 | DEB(DEB_MULTI, printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n", |
1515 | 1515 | dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5])); |
1516 | 1516 | } |
1517 | - CHECK_WBACK_INV(&lp->mc_cmd, sizeof(struct mc_cmd)); | |
1517 | + CHECK_WBACK_INV(lp, &lp->mc_cmd, sizeof(struct mc_cmd)); | |
1518 | 1518 | i596_add_cmd(dev, &cmd->cmd); |
1519 | 1519 | } |
1520 | 1520 | } |
drivers/scsi/53c700.c
... | ... | @@ -362,11 +362,11 @@ |
362 | 362 | for (j = 0; j < PATCHES; j++) |
363 | 363 | script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]); |
364 | 364 | /* now patch up fixed addresses. */ |
365 | - script_patch_32(script, MessageLocation, | |
365 | + script_patch_32(hostdata->dev, script, MessageLocation, | |
366 | 366 | pScript + MSGOUT_OFFSET); |
367 | - script_patch_32(script, StatusAddress, | |
367 | + script_patch_32(hostdata->dev, script, StatusAddress, | |
368 | 368 | pScript + STATUS_OFFSET); |
369 | - script_patch_32(script, ReceiveMsgAddress, | |
369 | + script_patch_32(hostdata->dev, script, ReceiveMsgAddress, | |
370 | 370 | pScript + MSGIN_OFFSET); |
371 | 371 | |
372 | 372 | hostdata->script = script; |
... | ... | @@ -821,8 +821,9 @@ |
821 | 821 | shost_printk(KERN_WARNING, host, |
822 | 822 | "Unexpected SDTR msg\n"); |
823 | 823 | hostdata->msgout[0] = A_REJECT_MSG; |
824 | - dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE); | |
825 | - script_patch_16(hostdata->script, MessageCount, 1); | |
824 | + dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE); | |
825 | + script_patch_16(hostdata->dev, hostdata->script, | |
826 | + MessageCount, 1); | |
826 | 827 | /* SendMsgOut returns, so set up the return |
827 | 828 | * address */ |
828 | 829 | resume_offset = hostdata->pScript + Ent_SendMessageWithATN; |
... | ... | @@ -833,8 +834,9 @@ |
833 | 834 | printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n", |
834 | 835 | host->host_no, pun, lun); |
835 | 836 | hostdata->msgout[0] = A_REJECT_MSG; |
836 | - dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE); | |
837 | - script_patch_16(hostdata->script, MessageCount, 1); | |
837 | + dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE); | |
838 | + script_patch_16(hostdata->dev, hostdata->script, MessageCount, | |
839 | + 1); | |
838 | 840 | resume_offset = hostdata->pScript + Ent_SendMessageWithATN; |
839 | 841 | |
840 | 842 | break; |
... | ... | @@ -847,8 +849,9 @@ |
847 | 849 | printk("\n"); |
848 | 850 | /* just reject it */ |
849 | 851 | hostdata->msgout[0] = A_REJECT_MSG; |
850 | - dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE); | |
851 | - script_patch_16(hostdata->script, MessageCount, 1); | |
852 | + dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE); | |
853 | + script_patch_16(hostdata->dev, hostdata->script, MessageCount, | |
854 | + 1); | |
852 | 855 | /* SendMsgOut returns, so set up the return |
853 | 856 | * address */ |
854 | 857 | resume_offset = hostdata->pScript + Ent_SendMessageWithATN; |
... | ... | @@ -929,8 +932,9 @@ |
929 | 932 | printk("\n"); |
930 | 933 | /* just reject it */ |
931 | 934 | hostdata->msgout[0] = A_REJECT_MSG; |
932 | - dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE); | |
933 | - script_patch_16(hostdata->script, MessageCount, 1); | |
935 | + dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE); | |
936 | + script_patch_16(hostdata->dev, hostdata->script, MessageCount, | |
937 | + 1); | |
934 | 938 | /* SendMsgOut returns, so set up the return |
935 | 939 | * address */ |
936 | 940 | resume_offset = hostdata->pScript + Ent_SendMessageWithATN; |
... | ... | @@ -939,7 +943,7 @@ |
939 | 943 | } |
940 | 944 | NCR_700_writel(temp, host, TEMP_REG); |
941 | 945 | /* set us up to receive another message */ |
942 | - dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE); | |
946 | + dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE); | |
943 | 947 | return resume_offset; |
944 | 948 | } |
945 | 949 | |
... | ... | @@ -1019,9 +1023,9 @@ |
1019 | 1023 | slot->SG[1].ins = bS_to_host(SCRIPT_RETURN); |
1020 | 1024 | slot->SG[1].pAddr = 0; |
1021 | 1025 | slot->resume_offset = hostdata->pScript; |
1022 | - dma_cache_sync(slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE); | |
1023 | - dma_cache_sync(SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE); | |
1024 | - | |
1026 | + dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE); | |
1027 | + dma_cache_sync(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE); | |
1028 | + | |
1025 | 1029 | /* queue the command for reissue */ |
1026 | 1030 | slot->state = NCR_700_SLOT_QUEUED; |
1027 | 1031 | slot->flags = NCR_700_FLAG_AUTOSENSE; |
1028 | 1032 | |
... | ... | @@ -1136,11 +1140,12 @@ |
1136 | 1140 | hostdata->cmd = slot->cmnd; |
1137 | 1141 | |
1138 | 1142 | /* re-patch for this command */ |
1139 | - script_patch_32_abs(hostdata->script, CommandAddress, | |
1140 | - slot->pCmd); | |
1141 | - script_patch_16(hostdata->script, | |
1143 | + script_patch_32_abs(hostdata->dev, hostdata->script, | |
1144 | + CommandAddress, slot->pCmd); | |
1145 | + script_patch_16(hostdata->dev, hostdata->script, | |
1142 | 1146 | CommandCount, slot->cmnd->cmd_len); |
1143 | - script_patch_32_abs(hostdata->script, SGScriptStartAddress, | |
1147 | + script_patch_32_abs(hostdata->dev, hostdata->script, | |
1148 | + SGScriptStartAddress, | |
1144 | 1149 | to32bit(&slot->pSG[0].ins)); |
1145 | 1150 | |
1146 | 1151 | /* Note: setting SXFER only works if we're |
1147 | 1152 | |
1148 | 1153 | |
... | ... | @@ -1150,13 +1155,13 @@ |
1150 | 1155 | * should therefore always clear ACK */ |
1151 | 1156 | NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device), |
1152 | 1157 | host, SXFER_REG); |
1153 | - dma_cache_sync(hostdata->msgin, | |
1158 | + dma_cache_sync(hostdata->dev, hostdata->msgin, | |
1154 | 1159 | MSG_ARRAY_SIZE, DMA_FROM_DEVICE); |
1155 | - dma_cache_sync(hostdata->msgout, | |
1160 | + dma_cache_sync(hostdata->dev, hostdata->msgout, | |
1156 | 1161 | MSG_ARRAY_SIZE, DMA_TO_DEVICE); |
1157 | 1162 | /* I'm just being paranoid here, the command should |
1158 | 1163 | * already have been flushed from the cache */ |
1159 | - dma_cache_sync(slot->cmnd->cmnd, | |
1164 | + dma_cache_sync(hostdata->dev, slot->cmnd->cmnd, | |
1160 | 1165 | slot->cmnd->cmd_len, DMA_TO_DEVICE); |
1161 | 1166 | |
1162 | 1167 | |
... | ... | @@ -1220,7 +1225,7 @@ |
1220 | 1225 | hostdata->reselection_id = reselection_id; |
1221 | 1226 | /* just in case we have a stale simple tag message, clear it */ |
1222 | 1227 | hostdata->msgin[1] = 0; |
1223 | - dma_cache_sync(hostdata->msgin, | |
1228 | + dma_cache_sync(hostdata->dev, hostdata->msgin, | |
1224 | 1229 | MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL); |
1225 | 1230 | if(hostdata->tag_negotiated & (1<<reselection_id)) { |
1226 | 1231 | resume_offset = hostdata->pScript + Ent_GetReselectionWithTag; |
... | ... | @@ -1336,7 +1341,7 @@ |
1336 | 1341 | hostdata->cmd = NULL; |
1337 | 1342 | /* clear any stale simple tag message */ |
1338 | 1343 | hostdata->msgin[1] = 0; |
1339 | - dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, | |
1344 | + dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, | |
1340 | 1345 | DMA_BIDIRECTIONAL); |
1341 | 1346 | |
1342 | 1347 | if(id == 0xff) { |
1343 | 1348 | |
1344 | 1349 | |
1345 | 1350 | |
1346 | 1351 | |
1347 | 1352 | |
1348 | 1353 | |
... | ... | @@ -1433,29 +1438,30 @@ |
1433 | 1438 | NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION); |
1434 | 1439 | } |
1435 | 1440 | |
1436 | - script_patch_16(hostdata->script, MessageCount, count); | |
1441 | + script_patch_16(hostdata->dev, hostdata->script, MessageCount, count); | |
1437 | 1442 | |
1438 | 1443 | |
1439 | - script_patch_ID(hostdata->script, | |
1444 | + script_patch_ID(hostdata->dev, hostdata->script, | |
1440 | 1445 | Device_ID, 1<<scmd_id(SCp)); |
1441 | 1446 | |
1442 | - script_patch_32_abs(hostdata->script, CommandAddress, | |
1447 | + script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress, | |
1443 | 1448 | slot->pCmd); |
1444 | - script_patch_16(hostdata->script, CommandCount, SCp->cmd_len); | |
1449 | + script_patch_16(hostdata->dev, hostdata->script, CommandCount, | |
1450 | + SCp->cmd_len); | |
1445 | 1451 | /* finally plumb the beginning of the SG list into the script |
1446 | 1452 | * */ |
1447 | - script_patch_32_abs(hostdata->script, SGScriptStartAddress, | |
1448 | - to32bit(&slot->pSG[0].ins)); | |
1453 | + script_patch_32_abs(hostdata->dev, hostdata->script, | |
1454 | + SGScriptStartAddress, to32bit(&slot->pSG[0].ins)); | |
1449 | 1455 | NCR_700_clear_fifo(SCp->device->host); |
1450 | 1456 | |
1451 | 1457 | if(slot->resume_offset == 0) |
1452 | 1458 | slot->resume_offset = hostdata->pScript; |
1453 | 1459 | /* now perform all the writebacks and invalidates */ |
1454 | - dma_cache_sync(hostdata->msgout, count, DMA_TO_DEVICE); | |
1455 | - dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, | |
1460 | + dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE); | |
1461 | + dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, | |
1456 | 1462 | DMA_FROM_DEVICE); |
1457 | - dma_cache_sync(SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE); | |
1458 | - dma_cache_sync(hostdata->status, 1, DMA_FROM_DEVICE); | |
1463 | + dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE); | |
1464 | + dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE); | |
1459 | 1465 | |
1460 | 1466 | /* set the synchronous period/offset */ |
1461 | 1467 | NCR_700_writeb(NCR_700_get_SXFER(SCp->device), |
... | ... | @@ -1631,7 +1637,7 @@ |
1631 | 1637 | slot->SG[i].ins = bS_to_host(SCRIPT_NOP); |
1632 | 1638 | slot->SG[i].pAddr = 0; |
1633 | 1639 | } |
1634 | - dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE); | |
1640 | + dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE); | |
1635 | 1641 | /* and pretend we disconnected after |
1636 | 1642 | * the command phase */ |
1637 | 1643 | resume_offset = hostdata->pScript + Ent_MsgInDuringData; |
1638 | 1644 | |
... | ... | @@ -1897,9 +1903,9 @@ |
1897 | 1903 | } |
1898 | 1904 | slot->SG[i].ins = bS_to_host(SCRIPT_RETURN); |
1899 | 1905 | slot->SG[i].pAddr = 0; |
1900 | - dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE); | |
1906 | + dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE); | |
1901 | 1907 | DEBUG((" SETTING %08lx to %x\n", |
1902 | - (&slot->pSG[i].ins), | |
1908 | + (&slot->pSG[i].ins), | |
1903 | 1909 | slot->SG[i].ins)); |
1904 | 1910 | } |
1905 | 1911 | slot->resume_offset = 0; |
drivers/scsi/53c700.h
... | ... | @@ -415,31 +415,31 @@ |
415 | 415 | #define NCR_710_MIN_XFERP 0 |
416 | 416 | #define NCR_700_MIN_PERIOD 25 /* for SDTR message, 100ns */ |
417 | 417 | |
418 | -#define script_patch_32(script, symbol, value) \ | |
418 | +#define script_patch_32(dev, script, symbol, value) \ | |
419 | 419 | { \ |
420 | 420 | int i; \ |
421 | 421 | for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ |
422 | 422 | __u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]) + value; \ |
423 | 423 | (script)[A_##symbol##_used[i]] = bS_to_host(val); \ |
424 | - dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ | |
424 | + dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ | |
425 | 425 | DEBUG((" script, patching %s at %d to 0x%lx\n", \ |
426 | 426 | #symbol, A_##symbol##_used[i], (value))); \ |
427 | 427 | } \ |
428 | 428 | } |
429 | 429 | |
430 | -#define script_patch_32_abs(script, symbol, value) \ | |
430 | +#define script_patch_32_abs(dev, script, symbol, value) \ | |
431 | 431 | { \ |
432 | 432 | int i; \ |
433 | 433 | for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ |
434 | 434 | (script)[A_##symbol##_used[i]] = bS_to_host(value); \ |
435 | - dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ | |
435 | + dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ | |
436 | 436 | DEBUG((" script, patching %s at %d to 0x%lx\n", \ |
437 | 437 | #symbol, A_##symbol##_used[i], (value))); \ |
438 | 438 | } \ |
439 | 439 | } |
440 | 440 | |
441 | 441 | /* Used for patching the SCSI ID in the SELECT instruction */ |
442 | -#define script_patch_ID(script, symbol, value) \ | |
442 | +#define script_patch_ID(dev, script, symbol, value) \ | |
443 | 443 | { \ |
444 | 444 | int i; \ |
445 | 445 | for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ |
446 | 446 | |
... | ... | @@ -447,13 +447,13 @@ |
447 | 447 | val &= 0xff00ffff; \ |
448 | 448 | val |= ((value) & 0xff) << 16; \ |
449 | 449 | (script)[A_##symbol##_used[i]] = bS_to_host(val); \ |
450 | - dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ | |
450 | + dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ | |
451 | 451 | DEBUG((" script, patching ID field %s at %d to 0x%x\n", \ |
452 | 452 | #symbol, A_##symbol##_used[i], val)); \ |
453 | 453 | } \ |
454 | 454 | } |
455 | 455 | |
456 | -#define script_patch_16(script, symbol, value) \ | |
456 | +#define script_patch_16(dev, script, symbol, value) \ | |
457 | 457 | { \ |
458 | 458 | int i; \ |
459 | 459 | for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ |
... | ... | @@ -461,7 +461,7 @@ |
461 | 461 | val &= 0xffff0000; \ |
462 | 462 | val |= ((value) & 0xffff); \ |
463 | 463 | (script)[A_##symbol##_used[i]] = bS_to_host(val); \ |
464 | - dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ | |
464 | + dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ | |
465 | 465 | DEBUG((" script, patching short field %s at %d to 0x%x\n", \ |
466 | 466 | #symbol, A_##symbol##_used[i], val)); \ |
467 | 467 | } \ |
drivers/serial/mpsc.c
... | ... | @@ -555,7 +555,7 @@ |
555 | 555 | if (!mpsc_sdma_tx_active(pi)) { |
556 | 556 | txre = (struct mpsc_tx_desc *)(pi->txr + |
557 | 557 | (pi->txr_tail * MPSC_TXRE_SIZE)); |
558 | - dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE); | |
558 | + dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE); | |
559 | 559 | #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) |
560 | 560 | if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ |
561 | 561 | invalidate_dcache_range((ulong)txre, |
... | ... | @@ -931,7 +931,7 @@ |
931 | 931 | } |
932 | 932 | txre->link = cpu_to_be32(pi->txr_p); /* Wrap last back to first */ |
933 | 933 | |
934 | - dma_cache_sync((void *) pi->dma_region, MPSC_DMA_ALLOC_SIZE, | |
934 | + dma_cache_sync(pi->port.dev, (void *) pi->dma_region, MPSC_DMA_ALLOC_SIZE, | |
935 | 935 | DMA_BIDIRECTIONAL); |
936 | 936 | #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) |
937 | 937 | if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ |
... | ... | @@ -1005,7 +1005,7 @@ |
1005 | 1005 | |
1006 | 1006 | rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE)); |
1007 | 1007 | |
1008 | - dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE); | |
1008 | + dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE); | |
1009 | 1009 | #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) |
1010 | 1010 | if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ |
1011 | 1011 | invalidate_dcache_range((ulong)rxre, |
... | ... | @@ -1029,7 +1029,7 @@ |
1029 | 1029 | } |
1030 | 1030 | |
1031 | 1031 | bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE); |
1032 | - dma_cache_sync((void *) bp, MPSC_RXBE_SIZE, DMA_FROM_DEVICE); | |
1032 | + dma_cache_sync(pi->port.dev, (void *) bp, MPSC_RXBE_SIZE, DMA_FROM_DEVICE); | |
1033 | 1033 | #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) |
1034 | 1034 | if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ |
1035 | 1035 | invalidate_dcache_range((ulong)bp, |
... | ... | @@ -1098,7 +1098,7 @@ |
1098 | 1098 | SDMA_DESC_CMDSTAT_F | |
1099 | 1099 | SDMA_DESC_CMDSTAT_L); |
1100 | 1100 | wmb(); |
1101 | - dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL); | |
1101 | + dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL); | |
1102 | 1102 | #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) |
1103 | 1103 | if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ |
1104 | 1104 | flush_dcache_range((ulong)rxre, |
... | ... | @@ -1109,7 +1109,7 @@ |
1109 | 1109 | pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1); |
1110 | 1110 | rxre = (struct mpsc_rx_desc *)(pi->rxr + |
1111 | 1111 | (pi->rxr_posn * MPSC_RXRE_SIZE)); |
1112 | - dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE); | |
1112 | + dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE); | |
1113 | 1113 | #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) |
1114 | 1114 | if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ |
1115 | 1115 | invalidate_dcache_range((ulong)rxre, |
... | ... | @@ -1143,7 +1143,7 @@ |
1143 | 1143 | SDMA_DESC_CMDSTAT_EI |
1144 | 1144 | : 0)); |
1145 | 1145 | wmb(); |
1146 | - dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_BIDIRECTIONAL); | |
1146 | + dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_BIDIRECTIONAL); | |
1147 | 1147 | #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) |
1148 | 1148 | if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ |
1149 | 1149 | flush_dcache_range((ulong)txre, |
... | ... | @@ -1192,7 +1192,7 @@ |
1192 | 1192 | else /* All tx data copied into ring bufs */ |
1193 | 1193 | return; |
1194 | 1194 | |
1195 | - dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL); | |
1195 | + dma_cache_sync(pi->port.dev, (void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL); | |
1196 | 1196 | #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) |
1197 | 1197 | if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ |
1198 | 1198 | flush_dcache_range((ulong)bp, |
... | ... | @@ -1217,7 +1217,7 @@ |
1217 | 1217 | txre = (struct mpsc_tx_desc *)(pi->txr + |
1218 | 1218 | (pi->txr_tail * MPSC_TXRE_SIZE)); |
1219 | 1219 | |
1220 | - dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE); | |
1220 | + dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE); | |
1221 | 1221 | #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) |
1222 | 1222 | if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ |
1223 | 1223 | invalidate_dcache_range((ulong)txre, |
... | ... | @@ -1235,7 +1235,7 @@ |
1235 | 1235 | |
1236 | 1236 | txre = (struct mpsc_tx_desc *)(pi->txr + |
1237 | 1237 | (pi->txr_tail * MPSC_TXRE_SIZE)); |
1238 | - dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, | |
1238 | + dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, | |
1239 | 1239 | DMA_FROM_DEVICE); |
1240 | 1240 | #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) |
1241 | 1241 | if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ |
... | ... | @@ -1652,7 +1652,7 @@ |
1652 | 1652 | count--; |
1653 | 1653 | } |
1654 | 1654 | |
1655 | - dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL); | |
1655 | + dma_cache_sync(pi->port.dev, (void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL); | |
1656 | 1656 | #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) |
1657 | 1657 | if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ |
1658 | 1658 | flush_dcache_range((ulong)bp, |
include/asm-alpha/dma-mapping.h
... | ... | @@ -60,7 +60,7 @@ |
60 | 60 | #define dma_sync_single_range(dev, addr, off, size, dir) do { } while (0) |
61 | 61 | #define dma_sync_sg_for_cpu(dev, sg, nents, dir) do { } while (0) |
62 | 62 | #define dma_sync_sg_for_device(dev, sg, nents, dir) do { } while (0) |
63 | -#define dma_cache_sync(va, size, dir) do { } while (0) | |
63 | +#define dma_cache_sync(dev, va, size, dir) do { } while (0) | |
64 | 64 | |
65 | 65 | #define dma_get_cache_alignment() L1_CACHE_BYTES |
66 | 66 |
include/asm-avr32/dma-mapping.h
... | ... | @@ -8,7 +8,8 @@ |
8 | 8 | #include <asm/cacheflush.h> |
9 | 9 | #include <asm/io.h> |
10 | 10 | |
11 | -extern void dma_cache_sync(void *vaddr, size_t size, int direction); | |
11 | +extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
12 | + int direction); | |
12 | 13 | |
13 | 14 | /* |
14 | 15 | * Return whether the given device DMA address mask can be supported |
include/asm-cris/dma-mapping.h
include/asm-frv/dma-mapping.h
... | ... | @@ -175,7 +175,7 @@ |
175 | 175 | #define dma_is_consistent(d, h) (1) |
176 | 176 | |
177 | 177 | static inline |
178 | -void dma_cache_sync(void *vaddr, size_t size, | |
178 | +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
179 | 179 | enum dma_data_direction direction) |
180 | 180 | { |
181 | 181 | flush_write_buffers(); |
include/asm-generic/dma-mapping.h
... | ... | @@ -295,7 +295,7 @@ |
295 | 295 | } |
296 | 296 | |
297 | 297 | static inline void |
298 | -dma_cache_sync(void *vaddr, size_t size, | |
298 | +dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
299 | 299 | enum dma_data_direction direction) |
300 | 300 | { |
301 | 301 | /* could define this in terms of the dma_cache ... operations, |
include/asm-i386/dma-mapping.h
... | ... | @@ -159,7 +159,7 @@ |
159 | 159 | #define dma_is_consistent(d, h) (1) |
160 | 160 | |
161 | 161 | static inline void |
162 | -dma_cache_sync(void *vaddr, size_t size, | |
162 | +dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
163 | 163 | enum dma_data_direction direction) |
164 | 164 | { |
165 | 165 | flush_write_buffers(); |
include/asm-ia64/dma-mapping.h
... | ... | @@ -50,7 +50,8 @@ |
50 | 50 | extern int dma_get_cache_alignment(void); |
51 | 51 | |
52 | 52 | static inline void |
53 | -dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir) | |
53 | +dma_cache_sync (struct device *dev, void *vaddr, size_t size, | |
54 | + enum dma_data_direction dir) | |
54 | 55 | { |
55 | 56 | /* |
56 | 57 | * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to |
include/asm-m68k/dma-mapping.h
... | ... | @@ -41,7 +41,7 @@ |
41 | 41 | { |
42 | 42 | dma_free_coherent(dev, size, addr, handle); |
43 | 43 | } |
44 | -static inline void dma_cache_sync(void *vaddr, size_t size, | |
44 | +static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
45 | 45 | enum dma_data_direction dir) |
46 | 46 | { |
47 | 47 | /* we use coherent allocation, so not much to do here. */ |
include/asm-mips/dma-mapping.h
... | ... | @@ -65,7 +65,7 @@ |
65 | 65 | |
66 | 66 | extern int dma_is_consistent(struct device *dev, dma_addr_t dma_addr); |
67 | 67 | |
68 | -extern void dma_cache_sync(void *vaddr, size_t size, | |
68 | +extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
69 | 69 | enum dma_data_direction direction); |
70 | 70 | |
71 | 71 | #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY |
include/asm-parisc/dma-mapping.h
include/asm-powerpc/dma-mapping.h
... | ... | @@ -378,7 +378,7 @@ |
378 | 378 | dma_sync_single_for_device(dev, dma_handle, offset + size, direction); |
379 | 379 | } |
380 | 380 | |
381 | -static inline void dma_cache_sync(void *vaddr, size_t size, | |
381 | +static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
382 | 382 | enum dma_data_direction direction) |
383 | 383 | { |
384 | 384 | BUG_ON(direction == DMA_NONE); |
include/asm-sh/dma-mapping.h
... | ... | @@ -53,7 +53,7 @@ |
53 | 53 | consistent_free(vaddr, size); |
54 | 54 | } |
55 | 55 | |
56 | -static inline void dma_cache_sync(void *vaddr, size_t size, | |
56 | +static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
57 | 57 | enum dma_data_direction dir) |
58 | 58 | { |
59 | 59 | consistent_sync(vaddr, size, (int)dir); |
include/asm-sh64/dma-mapping.h
... | ... | @@ -35,7 +35,7 @@ |
35 | 35 | consistent_free(NULL, size, vaddr, dma_handle); |
36 | 36 | } |
37 | 37 | |
38 | -static inline void dma_cache_sync(void *vaddr, size_t size, | |
38 | +static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
39 | 39 | enum dma_data_direction dir) |
40 | 40 | { |
41 | 41 | dma_cache_wback_inv((unsigned long)vaddr, size); |
include/asm-sparc64/dma-mapping.h
... | ... | @@ -210,7 +210,7 @@ |
210 | 210 | } |
211 | 211 | |
212 | 212 | static inline void |
213 | -dma_cache_sync(void *vaddr, size_t size, | |
213 | +dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
214 | 214 | enum dma_data_direction direction) |
215 | 215 | { |
216 | 216 | /* could define this in terms of the dma_cache ... operations, |
include/asm-um/dma-mapping.h
include/asm-x86_64/dma-mapping.h
... | ... | @@ -185,7 +185,8 @@ |
185 | 185 | extern int dma_set_mask(struct device *dev, u64 mask); |
186 | 186 | |
187 | 187 | static inline void |
188 | -dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir) | |
188 | +dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
189 | + enum dma_data_direction dir) | |
189 | 190 | { |
190 | 191 | flush_write_buffers(); |
191 | 192 | } |
include/asm-xtensa/dma-mapping.h
... | ... | @@ -173,7 +173,7 @@ |
173 | 173 | #define dma_is_consistent(d, h) (1) |
174 | 174 | |
175 | 175 | static inline void |
176 | -dma_cache_sync(void *vaddr, size_t size, | |
176 | +dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
177 | 177 | enum dma_data_direction direction) |
178 | 178 | { |
179 | 179 | consistent_sync(vaddr, size, direction); |