Commit 9a88cbb5227970757881b1a65be01dea61fe2584
1 parent
f65e4fa8e0
Exists in
master
and in
7 other branches
[MIPS] Unify dma-{coherent,noncoherent.ip27,ip32}
Platforms will now have to supply a function dma_device_is_coherent which returns if a particular device participates in the coherence domain. For most platforms this function will always return 0 or 1. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Showing 14 changed files with 649 additions and 1282 deletions Side-by-side Diff
- arch/mips/Kconfig
- arch/mips/mm/Makefile
- arch/mips/mm/dma-coherent.c
- arch/mips/mm/dma-default.c
- arch/mips/mm/dma-ip27.c
- arch/mips/mm/dma-ip32.c
- arch/mips/mm/dma-noncoherent.c
- arch/mips/pci/Makefile
- arch/mips/pci/pci-dac.c
- include/asm-mips/mach-generic/dma-coherence.h
- include/asm-mips/mach-generic/kmalloc.h
- include/asm-mips/mach-ip27/dma-coherence.h
- include/asm-mips/mach-ip32/dma-coherence.h
- include/asm-mips/mach-jazz/dma-coherence.h
arch/mips/Kconfig
... | ... | @@ -598,8 +598,6 @@ |
598 | 598 | select ARC |
599 | 599 | select ARC32 |
600 | 600 | select BOOT_ELF32 |
601 | - select OWN_DMA | |
602 | - select DMA_IP32 | |
603 | 601 | select DMA_NONCOHERENT |
604 | 602 | select HW_HAS_PCI |
605 | 603 | select R5000_CPU_SCACHE |
... | ... | @@ -881,9 +879,6 @@ |
881 | 879 | select DMA_NEED_PCI_MAP_STATE |
882 | 880 | |
883 | 881 | config DMA_NEED_PCI_MAP_STATE |
884 | - bool | |
885 | - | |
886 | -config OWN_DMA | |
887 | 882 | bool |
888 | 883 | |
889 | 884 | config EARLY_PRINTK |
arch/mips/mm/Makefile
... | ... | @@ -2,8 +2,8 @@ |
2 | 2 | # Makefile for the Linux/MIPS-specific parts of the memory manager. |
3 | 3 | # |
4 | 4 | |
5 | -obj-y += cache.o extable.o fault.o init.o pgtable.o \ | |
6 | - tlbex.o tlbex-fault.o | |
5 | +obj-y += cache.o dma-default.o extable.o fault.o \ | |
6 | + init.o pgtable.o tlbex.o tlbex-fault.o | |
7 | 7 | |
8 | 8 | obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o |
9 | 9 | obj-$(CONFIG_64BIT) += pgtable-64.o |
... | ... | @@ -31,16 +31,6 @@ |
31 | 31 | obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o |
32 | 32 | obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o |
33 | 33 | obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o |
34 | - | |
35 | -# | |
36 | -# Choose one DMA coherency model | |
37 | -# | |
38 | -ifndef CONFIG_OWN_DMA | |
39 | -obj-$(CONFIG_DMA_COHERENT) += dma-coherent.o | |
40 | -obj-$(CONFIG_DMA_NONCOHERENT) += dma-noncoherent.o | |
41 | -endif | |
42 | -obj-$(CONFIG_DMA_IP27) += dma-ip27.o | |
43 | -obj-$(CONFIG_DMA_IP32) += dma-ip32.o | |
44 | 34 | |
45 | 35 | EXTRA_AFLAGS := $(CFLAGS) |
arch/mips/mm/dma-coherent.c
1 | -/* | |
2 | - * This file is subject to the terms and conditions of the GNU General Public | |
3 | - * License. See the file "COPYING" in the main directory of this archive | |
4 | - * for more details. | |
5 | - * | |
6 | - * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | |
7 | - * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> | |
8 | - * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | |
9 | - */ | |
10 | -#include <linux/types.h> | |
11 | -#include <linux/dma-mapping.h> | |
12 | -#include <linux/mm.h> | |
13 | -#include <linux/module.h> | |
14 | -#include <linux/string.h> | |
15 | - | |
16 | -#include <asm/cache.h> | |
17 | -#include <asm/io.h> | |
18 | - | |
19 | -void *dma_alloc_noncoherent(struct device *dev, size_t size, | |
20 | - dma_addr_t * dma_handle, gfp_t gfp) | |
21 | -{ | |
22 | - void *ret; | |
23 | - /* ignore region specifiers */ | |
24 | - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | |
25 | - | |
26 | - if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | |
27 | - gfp |= GFP_DMA; | |
28 | - ret = (void *) __get_free_pages(gfp, get_order(size)); | |
29 | - | |
30 | - if (ret != NULL) { | |
31 | - memset(ret, 0, size); | |
32 | - *dma_handle = virt_to_phys(ret); | |
33 | - } | |
34 | - | |
35 | - return ret; | |
36 | -} | |
37 | - | |
38 | -EXPORT_SYMBOL(dma_alloc_noncoherent); | |
39 | - | |
40 | -void *dma_alloc_coherent(struct device *dev, size_t size, | |
41 | - dma_addr_t * dma_handle, gfp_t gfp) | |
42 | - __attribute__((alias("dma_alloc_noncoherent"))); | |
43 | - | |
44 | -EXPORT_SYMBOL(dma_alloc_coherent); | |
45 | - | |
46 | -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | |
47 | - dma_addr_t dma_handle) | |
48 | -{ | |
49 | - unsigned long addr = (unsigned long) vaddr; | |
50 | - | |
51 | - free_pages(addr, get_order(size)); | |
52 | -} | |
53 | - | |
54 | -EXPORT_SYMBOL(dma_free_noncoherent); | |
55 | - | |
56 | -void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |
57 | - dma_addr_t dma_handle) __attribute__((alias("dma_free_noncoherent"))); | |
58 | - | |
59 | -EXPORT_SYMBOL(dma_free_coherent); | |
60 | - | |
61 | -dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | |
62 | - enum dma_data_direction direction) | |
63 | -{ | |
64 | - BUG_ON(direction == DMA_NONE); | |
65 | - | |
66 | - return __pa(ptr); | |
67 | -} | |
68 | - | |
69 | -EXPORT_SYMBOL(dma_map_single); | |
70 | - | |
71 | -void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
72 | - enum dma_data_direction direction) | |
73 | -{ | |
74 | - BUG_ON(direction == DMA_NONE); | |
75 | -} | |
76 | - | |
77 | -EXPORT_SYMBOL(dma_unmap_single); | |
78 | - | |
79 | -int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
80 | - enum dma_data_direction direction) | |
81 | -{ | |
82 | - int i; | |
83 | - | |
84 | - BUG_ON(direction == DMA_NONE); | |
85 | - | |
86 | - for (i = 0; i < nents; i++, sg++) { | |
87 | - sg->dma_address = (dma_addr_t)page_to_phys(sg->page) + sg->offset; | |
88 | - } | |
89 | - | |
90 | - return nents; | |
91 | -} | |
92 | - | |
93 | -EXPORT_SYMBOL(dma_map_sg); | |
94 | - | |
95 | -dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
96 | - unsigned long offset, size_t size, enum dma_data_direction direction) | |
97 | -{ | |
98 | - BUG_ON(direction == DMA_NONE); | |
99 | - | |
100 | - return page_to_phys(page) + offset; | |
101 | -} | |
102 | - | |
103 | -EXPORT_SYMBOL(dma_map_page); | |
104 | - | |
105 | -void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | |
106 | - enum dma_data_direction direction) | |
107 | -{ | |
108 | - BUG_ON(direction == DMA_NONE); | |
109 | -} | |
110 | - | |
111 | -EXPORT_SYMBOL(dma_unmap_page); | |
112 | - | |
113 | -void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |
114 | - enum dma_data_direction direction) | |
115 | -{ | |
116 | - BUG_ON(direction == DMA_NONE); | |
117 | -} | |
118 | - | |
119 | -EXPORT_SYMBOL(dma_unmap_sg); | |
120 | - | |
121 | -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
122 | - size_t size, enum dma_data_direction direction) | |
123 | -{ | |
124 | - BUG_ON(direction == DMA_NONE); | |
125 | -} | |
126 | - | |
127 | -EXPORT_SYMBOL(dma_sync_single_for_cpu); | |
128 | - | |
129 | -void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | |
130 | - size_t size, enum dma_data_direction direction) | |
131 | -{ | |
132 | - BUG_ON(direction == DMA_NONE); | |
133 | -} | |
134 | - | |
135 | -EXPORT_SYMBOL(dma_sync_single_for_device); | |
136 | - | |
137 | -void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
138 | - unsigned long offset, size_t size, | |
139 | - enum dma_data_direction direction) | |
140 | -{ | |
141 | - BUG_ON(direction == DMA_NONE); | |
142 | -} | |
143 | - | |
144 | -EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | |
145 | - | |
146 | -void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | |
147 | - unsigned long offset, size_t size, | |
148 | - enum dma_data_direction direction) | |
149 | -{ | |
150 | - BUG_ON(direction == DMA_NONE); | |
151 | -} | |
152 | - | |
153 | -EXPORT_SYMBOL(dma_sync_single_range_for_device); | |
154 | - | |
155 | -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | |
156 | - enum dma_data_direction direction) | |
157 | -{ | |
158 | - BUG_ON(direction == DMA_NONE); | |
159 | -} | |
160 | - | |
161 | -EXPORT_SYMBOL(dma_sync_sg_for_cpu); | |
162 | - | |
163 | -void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | |
164 | - enum dma_data_direction direction) | |
165 | -{ | |
166 | - BUG_ON(direction == DMA_NONE); | |
167 | -} | |
168 | - | |
169 | -EXPORT_SYMBOL(dma_sync_sg_for_device); | |
170 | - | |
171 | -int dma_mapping_error(dma_addr_t dma_addr) | |
172 | -{ | |
173 | - return 0; | |
174 | -} | |
175 | - | |
176 | -EXPORT_SYMBOL(dma_mapping_error); | |
177 | - | |
178 | -int dma_supported(struct device *dev, u64 mask) | |
179 | -{ | |
180 | - /* | |
181 | - * we fall back to GFP_DMA when the mask isn't all 1s, | |
182 | - * so we can't guarantee allocations that must be | |
183 | - * within a tighter range than GFP_DMA.. | |
184 | - */ | |
185 | - if (mask < 0x00ffffff) | |
186 | - return 0; | |
187 | - | |
188 | - return 1; | |
189 | -} | |
190 | - | |
191 | -EXPORT_SYMBOL(dma_supported); | |
192 | - | |
193 | -int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) | |
194 | -{ | |
195 | - return 1; | |
196 | -} | |
197 | - | |
198 | -EXPORT_SYMBOL(dma_is_consistent); | |
199 | - | |
200 | -void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
201 | - enum dma_data_direction direction) | |
202 | -{ | |
203 | - BUG_ON(direction == DMA_NONE); | |
204 | -} | |
205 | - | |
206 | -EXPORT_SYMBOL(dma_cache_sync); | |
207 | - | |
208 | -/* The DAC routines are a PCIism.. */ | |
209 | - | |
210 | -#ifdef CONFIG_PCI | |
211 | - | |
212 | -#include <linux/pci.h> | |
213 | - | |
214 | -dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, | |
215 | - struct page *page, unsigned long offset, int direction) | |
216 | -{ | |
217 | - return (dma64_addr_t)page_to_phys(page) + offset; | |
218 | -} | |
219 | - | |
220 | -EXPORT_SYMBOL(pci_dac_page_to_dma); | |
221 | - | |
222 | -struct page *pci_dac_dma_to_page(struct pci_dev *pdev, | |
223 | - dma64_addr_t dma_addr) | |
224 | -{ | |
225 | - return mem_map + (dma_addr >> PAGE_SHIFT); | |
226 | -} | |
227 | - | |
228 | -EXPORT_SYMBOL(pci_dac_dma_to_page); | |
229 | - | |
230 | -unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, | |
231 | - dma64_addr_t dma_addr) | |
232 | -{ | |
233 | - return dma_addr & ~PAGE_MASK; | |
234 | -} | |
235 | - | |
236 | -EXPORT_SYMBOL(pci_dac_dma_to_offset); | |
237 | - | |
238 | -void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, | |
239 | - dma64_addr_t dma_addr, size_t len, int direction) | |
240 | -{ | |
241 | - BUG_ON(direction == PCI_DMA_NONE); | |
242 | -} | |
243 | - | |
244 | -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu); | |
245 | - | |
246 | -void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, | |
247 | - dma64_addr_t dma_addr, size_t len, int direction) | |
248 | -{ | |
249 | - BUG_ON(direction == PCI_DMA_NONE); | |
250 | -} | |
251 | - | |
252 | -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device); | |
253 | - | |
254 | -#endif /* CONFIG_PCI */ |
arch/mips/mm/dma-default.c
1 | +/* | |
2 | + * This file is subject to the terms and conditions of the GNU General Public | |
3 | + * License. See the file "COPYING" in the main directory of this archive | |
4 | + * for more details. | |
5 | + * | |
6 | + * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | |
7 | + * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> | |
8 | + * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | |
9 | + */ | |
10 | + | |
11 | +#include <linux/types.h> | |
12 | +#include <linux/dma-mapping.h> | |
13 | +#include <linux/mm.h> | |
14 | +#include <linux/module.h> | |
15 | +#include <linux/string.h> | |
16 | + | |
17 | +#include <asm/cache.h> | |
18 | +#include <asm/io.h> | |
19 | + | |
20 | +#include <dma-coherence.h> | |
21 | + | |
22 | +/* | |
23 | + * Warning on the terminology - Linux calls an uncached area coherent; | |
24 | + * MIPS terminology calls memory areas with hardware maintained coherency | |
25 | + * coherent. | |
26 | + */ | |
27 | + | |
28 | +static inline int cpu_is_noncoherent_r10000(struct device *dev) | |
29 | +{ | |
30 | + return !plat_device_is_coherent(dev) && | |
31 | + (current_cpu_data.cputype == CPU_R10000 && | |
32 | + current_cpu_data.cputype == CPU_R12000); | |
33 | +} | |
34 | + | |
35 | +void *dma_alloc_noncoherent(struct device *dev, size_t size, | |
36 | + dma_addr_t * dma_handle, gfp_t gfp) | |
37 | +{ | |
38 | + void *ret; | |
39 | + | |
40 | + /* ignore region specifiers */ | |
41 | + gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | |
42 | + | |
43 | + if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | |
44 | + gfp |= GFP_DMA; | |
45 | + ret = (void *) __get_free_pages(gfp, get_order(size)); | |
46 | + | |
47 | + if (ret != NULL) { | |
48 | + memset(ret, 0, size); | |
49 | + *dma_handle = plat_map_dma_mem(dev, ret, size); | |
50 | + } | |
51 | + | |
52 | + return ret; | |
53 | +} | |
54 | + | |
55 | +EXPORT_SYMBOL(dma_alloc_noncoherent); | |
56 | + | |
57 | +void *dma_alloc_coherent(struct device *dev, size_t size, | |
58 | + dma_addr_t * dma_handle, gfp_t gfp) | |
59 | +{ | |
60 | + void *ret; | |
61 | + | |
62 | + /* ignore region specifiers */ | |
63 | + gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | |
64 | + | |
65 | + if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | |
66 | + gfp |= GFP_DMA; | |
67 | + ret = (void *) __get_free_pages(gfp, get_order(size)); | |
68 | + | |
69 | + if (ret) { | |
70 | + memset(ret, 0, size); | |
71 | + *dma_handle = plat_map_dma_mem(dev, ret, size); | |
72 | + | |
73 | + if (!plat_device_is_coherent(dev)) { | |
74 | + dma_cache_wback_inv((unsigned long) ret, size); | |
75 | + ret = UNCAC_ADDR(ret); | |
76 | + } | |
77 | + } | |
78 | + | |
79 | + return ret; | |
80 | +} | |
81 | + | |
82 | +EXPORT_SYMBOL(dma_alloc_coherent); | |
83 | + | |
84 | +void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | |
85 | + dma_addr_t dma_handle) | |
86 | +{ | |
87 | + free_pages((unsigned long) vaddr, get_order(size)); | |
88 | +} | |
89 | + | |
90 | +EXPORT_SYMBOL(dma_free_noncoherent); | |
91 | + | |
92 | +void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |
93 | + dma_addr_t dma_handle) | |
94 | +{ | |
95 | + unsigned long addr = (unsigned long) vaddr; | |
96 | + | |
97 | + if (!plat_device_is_coherent(dev)) | |
98 | + addr = CAC_ADDR(addr); | |
99 | + | |
100 | + free_pages(addr, get_order(size)); | |
101 | +} | |
102 | + | |
103 | +EXPORT_SYMBOL(dma_free_coherent); | |
104 | + | |
105 | +static inline void __dma_sync(unsigned long addr, size_t size, | |
106 | + enum dma_data_direction direction) | |
107 | +{ | |
108 | + switch (direction) { | |
109 | + case DMA_TO_DEVICE: | |
110 | + dma_cache_wback(addr, size); | |
111 | + break; | |
112 | + | |
113 | + case DMA_FROM_DEVICE: | |
114 | + dma_cache_inv(addr, size); | |
115 | + break; | |
116 | + | |
117 | + case DMA_BIDIRECTIONAL: | |
118 | + dma_cache_wback_inv(addr, size); | |
119 | + break; | |
120 | + | |
121 | + default: | |
122 | + BUG(); | |
123 | + } | |
124 | +} | |
125 | + | |
126 | +dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | |
127 | + enum dma_data_direction direction) | |
128 | +{ | |
129 | + unsigned long addr = (unsigned long) ptr; | |
130 | + | |
131 | + if (!plat_device_is_coherent(dev)) | |
132 | + __dma_sync(addr, size, direction); | |
133 | + | |
134 | + return plat_map_dma_mem(dev, ptr, size); | |
135 | +} | |
136 | + | |
137 | +EXPORT_SYMBOL(dma_map_single); | |
138 | + | |
139 | +void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
140 | + enum dma_data_direction direction) | |
141 | +{ | |
142 | + if (cpu_is_noncoherent_r10000(dev)) | |
143 | + __dma_sync(plat_dma_addr_to_phys(dma_addr) + PAGE_OFFSET, size, | |
144 | + direction); | |
145 | + | |
146 | + plat_unmap_dma_mem(dma_addr); | |
147 | +} | |
148 | + | |
149 | +EXPORT_SYMBOL(dma_unmap_single); | |
150 | + | |
151 | +int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
152 | + enum dma_data_direction direction) | |
153 | +{ | |
154 | + int i; | |
155 | + | |
156 | + BUG_ON(direction == DMA_NONE); | |
157 | + | |
158 | + for (i = 0; i < nents; i++, sg++) { | |
159 | + unsigned long addr; | |
160 | + | |
161 | + addr = (unsigned long) page_address(sg->page); | |
162 | + if (!plat_device_is_coherent(dev) && addr) | |
163 | + __dma_sync(addr + sg->offset, sg->length, direction); | |
164 | + sg->dma_address = plat_map_dma_mem_page(dev, sg->page) + | |
165 | + sg->offset; | |
166 | + } | |
167 | + | |
168 | + return nents; | |
169 | +} | |
170 | + | |
171 | +EXPORT_SYMBOL(dma_map_sg); | |
172 | + | |
173 | +dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
174 | + unsigned long offset, size_t size, enum dma_data_direction direction) | |
175 | +{ | |
176 | + BUG_ON(direction == DMA_NONE); | |
177 | + | |
178 | + if (!plat_device_is_coherent(dev)) { | |
179 | + unsigned long addr; | |
180 | + | |
181 | + addr = (unsigned long) page_address(page) + offset; | |
182 | + dma_cache_wback_inv(addr, size); | |
183 | + } | |
184 | + | |
185 | + return plat_map_dma_mem_page(dev, page) + offset; | |
186 | +} | |
187 | + | |
188 | +EXPORT_SYMBOL(dma_map_page); | |
189 | + | |
190 | +void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | |
191 | + enum dma_data_direction direction) | |
192 | +{ | |
193 | + BUG_ON(direction == DMA_NONE); | |
194 | + | |
195 | + if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) { | |
196 | + unsigned long addr; | |
197 | + | |
198 | + addr = plat_dma_addr_to_phys(dma_address); | |
199 | + dma_cache_wback_inv(addr, size); | |
200 | + } | |
201 | + | |
202 | + plat_unmap_dma_mem(dma_address); | |
203 | +} | |
204 | + | |
205 | +EXPORT_SYMBOL(dma_unmap_page); | |
206 | + | |
207 | +void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |
208 | + enum dma_data_direction direction) | |
209 | +{ | |
210 | + unsigned long addr; | |
211 | + int i; | |
212 | + | |
213 | + BUG_ON(direction == DMA_NONE); | |
214 | + | |
215 | + for (i = 0; i < nhwentries; i++, sg++) { | |
216 | + if (!plat_device_is_coherent(dev) && | |
217 | + direction != DMA_TO_DEVICE) { | |
218 | + addr = (unsigned long) page_address(sg->page); | |
219 | + if (addr) | |
220 | + __dma_sync(addr + sg->offset, sg->length, | |
221 | + direction); | |
222 | + } | |
223 | + plat_unmap_dma_mem(sg->dma_address); | |
224 | + } | |
225 | +} | |
226 | + | |
227 | +EXPORT_SYMBOL(dma_unmap_sg); | |
228 | + | |
229 | +void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
230 | + size_t size, enum dma_data_direction direction) | |
231 | +{ | |
232 | + BUG_ON(direction == DMA_NONE); | |
233 | + | |
234 | + if (cpu_is_noncoherent_r10000(dev)) { | |
235 | + unsigned long addr; | |
236 | + | |
237 | + addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle); | |
238 | + __dma_sync(addr, size, direction); | |
239 | + } | |
240 | +} | |
241 | + | |
242 | +EXPORT_SYMBOL(dma_sync_single_for_cpu); | |
243 | + | |
244 | +void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | |
245 | + size_t size, enum dma_data_direction direction) | |
246 | +{ | |
247 | + BUG_ON(direction == DMA_NONE); | |
248 | + | |
249 | + if (cpu_is_noncoherent_r10000(dev)) { | |
250 | + unsigned long addr; | |
251 | + | |
252 | + addr = plat_dma_addr_to_phys(dma_handle); | |
253 | + __dma_sync(addr, size, direction); | |
254 | + } | |
255 | +} | |
256 | + | |
257 | +EXPORT_SYMBOL(dma_sync_single_for_device); | |
258 | + | |
259 | +void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
260 | + unsigned long offset, size_t size, enum dma_data_direction direction) | |
261 | +{ | |
262 | + BUG_ON(direction == DMA_NONE); | |
263 | + | |
264 | + if (cpu_is_noncoherent_r10000(dev)) { | |
265 | + unsigned long addr; | |
266 | + | |
267 | + addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle); | |
268 | + __dma_sync(addr + offset, size, direction); | |
269 | + } | |
270 | +} | |
271 | + | |
272 | +EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | |
273 | + | |
274 | +void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | |
275 | + unsigned long offset, size_t size, enum dma_data_direction direction) | |
276 | +{ | |
277 | + BUG_ON(direction == DMA_NONE); | |
278 | + | |
279 | + if (cpu_is_noncoherent_r10000(dev)) { | |
280 | + unsigned long addr; | |
281 | + | |
282 | + addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle); | |
283 | + __dma_sync(addr + offset, size, direction); | |
284 | + } | |
285 | +} | |
286 | + | |
287 | +EXPORT_SYMBOL(dma_sync_single_range_for_device); | |
288 | + | |
289 | +void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | |
290 | + enum dma_data_direction direction) | |
291 | +{ | |
292 | + int i; | |
293 | + | |
294 | + BUG_ON(direction == DMA_NONE); | |
295 | + | |
296 | + /* Make sure that gcc doesn't leave the empty loop body. */ | |
297 | + for (i = 0; i < nelems; i++, sg++) { | |
298 | + if (!plat_device_is_coherent(dev)) | |
299 | + __dma_sync((unsigned long)page_address(sg->page), | |
300 | + sg->length, direction); | |
301 | + plat_unmap_dma_mem(sg->dma_address); | |
302 | + } | |
303 | +} | |
304 | + | |
305 | +EXPORT_SYMBOL(dma_sync_sg_for_cpu); | |
306 | + | |
307 | +void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | |
308 | + enum dma_data_direction direction) | |
309 | +{ | |
310 | + int i; | |
311 | + | |
312 | + BUG_ON(direction == DMA_NONE); | |
313 | + | |
314 | + /* Make sure that gcc doesn't leave the empty loop body. */ | |
315 | + for (i = 0; i < nelems; i++, sg++) { | |
316 | + if (!plat_device_is_coherent(dev)) | |
317 | + __dma_sync((unsigned long)page_address(sg->page), | |
318 | + sg->length, direction); | |
319 | + plat_unmap_dma_mem(sg->dma_address); | |
320 | + } | |
321 | +} | |
322 | + | |
323 | +EXPORT_SYMBOL(dma_sync_sg_for_device); | |
324 | + | |
325 | +int dma_mapping_error(dma_addr_t dma_addr) | |
326 | +{ | |
327 | + return 0; | |
328 | +} | |
329 | + | |
330 | +EXPORT_SYMBOL(dma_mapping_error); | |
331 | + | |
332 | +int dma_supported(struct device *dev, u64 mask) | |
333 | +{ | |
334 | + /* | |
335 | + * we fall back to GFP_DMA when the mask isn't all 1s, | |
336 | + * so we can't guarantee allocations that must be | |
337 | + * within a tighter range than GFP_DMA.. | |
338 | + */ | |
339 | + if (mask < 0x00ffffff) | |
340 | + return 0; | |
341 | + | |
342 | + return 1; | |
343 | +} | |
344 | + | |
345 | +EXPORT_SYMBOL(dma_supported); | |
346 | + | |
347 | +int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) | |
348 | +{ | |
349 | + return plat_device_is_coherent(dev); | |
350 | +} | |
351 | + | |
352 | +EXPORT_SYMBOL(dma_is_consistent); | |
353 | + | |
354 | +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
355 | + enum dma_data_direction direction) | |
356 | +{ | |
357 | + BUG_ON(direction == DMA_NONE); | |
358 | + | |
359 | + if (!plat_device_is_coherent(dev)) | |
360 | + dma_cache_wback_inv((unsigned long)vaddr, size); | |
361 | +} | |
362 | + | |
363 | +EXPORT_SYMBOL(dma_cache_sync); |
arch/mips/mm/dma-ip27.c
1 | -/* | |
2 | - * This file is subject to the terms and conditions of the GNU General Public | |
3 | - * License. See the file "COPYING" in the main directory of this archive | |
4 | - * for more details. | |
5 | - * | |
6 | - * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | |
7 | - * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> | |
8 | - * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | |
9 | - */ | |
10 | -#include <linux/types.h> | |
11 | -#include <linux/mm.h> | |
12 | -#include <linux/module.h> | |
13 | -#include <linux/string.h> | |
14 | -#include <linux/pci.h> | |
15 | - | |
16 | -#include <asm/cache.h> | |
17 | -#include <asm/pci/bridge.h> | |
18 | - | |
19 | -#define pdev_to_baddr(pdev, addr) \ | |
20 | - (BRIDGE_CONTROLLER(pdev->bus)->baddr + (addr)) | |
21 | -#define dev_to_baddr(dev, addr) \ | |
22 | - pdev_to_baddr(to_pci_dev(dev), (addr)) | |
23 | - | |
24 | -void *dma_alloc_noncoherent(struct device *dev, size_t size, | |
25 | - dma_addr_t * dma_handle, gfp_t gfp) | |
26 | -{ | |
27 | - void *ret; | |
28 | - | |
29 | - /* ignore region specifiers */ | |
30 | - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | |
31 | - | |
32 | - if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | |
33 | - gfp |= GFP_DMA; | |
34 | - ret = (void *) __get_free_pages(gfp, get_order(size)); | |
35 | - | |
36 | - if (ret != NULL) { | |
37 | - memset(ret, 0, size); | |
38 | - *dma_handle = dev_to_baddr(dev, virt_to_phys(ret)); | |
39 | - } | |
40 | - | |
41 | - return ret; | |
42 | -} | |
43 | - | |
44 | -EXPORT_SYMBOL(dma_alloc_noncoherent); | |
45 | - | |
46 | -void *dma_alloc_coherent(struct device *dev, size_t size, | |
47 | - dma_addr_t * dma_handle, gfp_t gfp) | |
48 | - __attribute__((alias("dma_alloc_noncoherent"))); | |
49 | - | |
50 | -EXPORT_SYMBOL(dma_alloc_coherent); | |
51 | - | |
52 | -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | |
53 | - dma_addr_t dma_handle) | |
54 | -{ | |
55 | - unsigned long addr = (unsigned long) vaddr; | |
56 | - | |
57 | - free_pages(addr, get_order(size)); | |
58 | -} | |
59 | - | |
60 | -EXPORT_SYMBOL(dma_free_noncoherent); | |
61 | - | |
62 | -void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |
63 | - dma_addr_t dma_handle) __attribute__((alias("dma_free_noncoherent"))); | |
64 | - | |
65 | -EXPORT_SYMBOL(dma_free_coherent); | |
66 | - | |
67 | -dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | |
68 | - enum dma_data_direction direction) | |
69 | -{ | |
70 | - BUG_ON(direction == DMA_NONE); | |
71 | - | |
72 | - return dev_to_baddr(dev, __pa(ptr)); | |
73 | -} | |
74 | - | |
75 | -EXPORT_SYMBOL(dma_map_single); | |
76 | - | |
77 | -void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
78 | - enum dma_data_direction direction) | |
79 | -{ | |
80 | - BUG_ON(direction == DMA_NONE); | |
81 | -} | |
82 | - | |
83 | -EXPORT_SYMBOL(dma_unmap_single); | |
84 | - | |
85 | -int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
86 | - enum dma_data_direction direction) | |
87 | -{ | |
88 | - int i; | |
89 | - | |
90 | - BUG_ON(direction == DMA_NONE); | |
91 | - | |
92 | - for (i = 0; i < nents; i++, sg++) { | |
93 | - sg->dma_address = (dma_addr_t) dev_to_baddr(dev, | |
94 | - page_to_phys(sg->page) + sg->offset); | |
95 | - } | |
96 | - | |
97 | - return nents; | |
98 | -} | |
99 | - | |
100 | -EXPORT_SYMBOL(dma_map_sg); | |
101 | - | |
102 | -dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
103 | - unsigned long offset, size_t size, enum dma_data_direction direction) | |
104 | -{ | |
105 | - BUG_ON(direction == DMA_NONE); | |
106 | - | |
107 | - return dev_to_baddr(dev, page_to_phys(page) + offset); | |
108 | -} | |
109 | - | |
110 | -EXPORT_SYMBOL(dma_map_page); | |
111 | - | |
112 | -void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | |
113 | - enum dma_data_direction direction) | |
114 | -{ | |
115 | - BUG_ON(direction == DMA_NONE); | |
116 | -} | |
117 | - | |
118 | -EXPORT_SYMBOL(dma_unmap_page); | |
119 | - | |
120 | -void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |
121 | - enum dma_data_direction direction) | |
122 | -{ | |
123 | - BUG_ON(direction == DMA_NONE); | |
124 | -} | |
125 | - | |
126 | -EXPORT_SYMBOL(dma_unmap_sg); | |
127 | - | |
128 | -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | |
129 | - enum dma_data_direction direction) | |
130 | -{ | |
131 | - BUG_ON(direction == DMA_NONE); | |
132 | -} | |
133 | - | |
134 | -EXPORT_SYMBOL(dma_sync_single_for_cpu); | |
135 | - | |
136 | -void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | |
137 | - enum dma_data_direction direction) | |
138 | -{ | |
139 | - BUG_ON(direction == DMA_NONE); | |
140 | -} | |
141 | - | |
142 | -EXPORT_SYMBOL(dma_sync_single_for_device); | |
143 | - | |
144 | -void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
145 | - unsigned long offset, size_t size, | |
146 | - enum dma_data_direction direction) | |
147 | -{ | |
148 | - BUG_ON(direction == DMA_NONE); | |
149 | -} | |
150 | - | |
151 | -EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | |
152 | - | |
153 | -void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | |
154 | - unsigned long offset, size_t size, | |
155 | - enum dma_data_direction direction) | |
156 | -{ | |
157 | - BUG_ON(direction == DMA_NONE); | |
158 | -} | |
159 | - | |
160 | -EXPORT_SYMBOL(dma_sync_single_range_for_device); | |
161 | - | |
162 | -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | |
163 | - enum dma_data_direction direction) | |
164 | -{ | |
165 | - BUG_ON(direction == DMA_NONE); | |
166 | -} | |
167 | - | |
168 | -EXPORT_SYMBOL(dma_sync_sg_for_cpu); | |
169 | - | |
170 | -void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | |
171 | - enum dma_data_direction direction) | |
172 | -{ | |
173 | - BUG_ON(direction == DMA_NONE); | |
174 | -} | |
175 | - | |
176 | -EXPORT_SYMBOL(dma_sync_sg_for_device); | |
177 | - | |
178 | -int dma_mapping_error(dma_addr_t dma_addr) | |
179 | -{ | |
180 | - return 0; | |
181 | -} | |
182 | - | |
183 | -EXPORT_SYMBOL(dma_mapping_error); | |
184 | - | |
185 | -int dma_supported(struct device *dev, u64 mask) | |
186 | -{ | |
187 | - /* | |
188 | - * we fall back to GFP_DMA when the mask isn't all 1s, | |
189 | - * so we can't guarantee allocations that must be | |
190 | - * within a tighter range than GFP_DMA.. | |
191 | - */ | |
192 | - if (mask < 0x00ffffff) | |
193 | - return 0; | |
194 | - | |
195 | - return 1; | |
196 | -} | |
197 | - | |
198 | -EXPORT_SYMBOL(dma_supported); | |
199 | - | |
200 | -int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) | |
201 | -{ | |
202 | - return 1; | |
203 | -} | |
204 | - | |
205 | -EXPORT_SYMBOL(dma_is_consistent); | |
206 | - | |
207 | -void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
208 | - enum dma_data_direction direction) | |
209 | -{ | |
210 | - BUG_ON(direction == DMA_NONE); | |
211 | -} | |
212 | - | |
213 | -EXPORT_SYMBOL(dma_cache_sync); | |
214 | - | |
215 | -dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, | |
216 | - struct page *page, unsigned long offset, int direction) | |
217 | -{ | |
218 | - dma64_addr_t addr = page_to_phys(page) + offset; | |
219 | - | |
220 | - return (dma64_addr_t) pdev_to_baddr(pdev, addr); | |
221 | -} | |
222 | - | |
223 | -EXPORT_SYMBOL(pci_dac_page_to_dma); | |
224 | - | |
225 | -struct page *pci_dac_dma_to_page(struct pci_dev *pdev, | |
226 | - dma64_addr_t dma_addr) | |
227 | -{ | |
228 | - struct bridge_controller *bc = BRIDGE_CONTROLLER(pdev->bus); | |
229 | - | |
230 | - return pfn_to_page((dma_addr - bc->baddr) >> PAGE_SHIFT); | |
231 | -} | |
232 | - | |
233 | -EXPORT_SYMBOL(pci_dac_dma_to_page); | |
234 | - | |
235 | -unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, | |
236 | - dma64_addr_t dma_addr) | |
237 | -{ | |
238 | - return dma_addr & ~PAGE_MASK; | |
239 | -} | |
240 | - | |
241 | -EXPORT_SYMBOL(pci_dac_dma_to_offset); | |
242 | - | |
243 | -void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, | |
244 | - dma64_addr_t dma_addr, size_t len, int direction) | |
245 | -{ | |
246 | - BUG_ON(direction == PCI_DMA_NONE); | |
247 | -} | |
248 | - | |
249 | -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu); | |
250 | - | |
251 | -void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, | |
252 | - dma64_addr_t dma_addr, size_t len, int direction) | |
253 | -{ | |
254 | - BUG_ON(direction == PCI_DMA_NONE); | |
255 | -} | |
256 | - | |
257 | -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device); |
arch/mips/mm/dma-ip32.c
1 | -/* | |
2 | - * This file is subject to the terms and conditions of the GNU General Public | |
3 | - * License. See the file "COPYING" in the main directory of this archive | |
4 | - * for more details. | |
5 | - * | |
6 | - * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | |
7 | - * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> | |
8 | - * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com> | |
9 | - * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | |
10 | - * IP32 changes by Ilya. | |
11 | - */ | |
12 | -#include <linux/types.h> | |
13 | -#include <linux/mm.h> | |
14 | -#include <linux/module.h> | |
15 | -#include <linux/string.h> | |
16 | -#include <linux/dma-mapping.h> | |
17 | - | |
18 | -#include <asm/cache.h> | |
19 | -#include <asm/io.h> | |
20 | -#include <asm/ip32/crime.h> | |
21 | - | |
22 | -/* | |
23 | - * Warning on the terminology - Linux calls an uncached area coherent; | |
24 | - * MIPS terminology calls memory areas with hardware maintained coherency | |
25 | - * coherent. | |
26 | - */ | |
27 | - | |
28 | -/* | |
29 | - * Few notes. | |
30 | - * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M | |
31 | - * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for native-endian) | |
32 | - * 3. All other devices see memory as one big chunk at 0x40000000 | |
33 | - * 4. Non-PCI devices will pass NULL as struct device* | |
34 | - * Thus we translate differently, depending on device. | |
35 | - */ | |
36 | - | |
37 | -#define RAM_OFFSET_MASK 0x3fffffff | |
38 | - | |
39 | -void *dma_alloc_noncoherent(struct device *dev, size_t size, | |
40 | - dma_addr_t * dma_handle, gfp_t gfp) | |
41 | -{ | |
42 | - void *ret; | |
43 | - /* ignore region specifiers */ | |
44 | - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | |
45 | - | |
46 | - if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | |
47 | - gfp |= GFP_DMA; | |
48 | - ret = (void *) __get_free_pages(gfp, get_order(size)); | |
49 | - | |
50 | - if (ret != NULL) { | |
51 | - unsigned long addr = virt_to_phys(ret)&RAM_OFFSET_MASK; | |
52 | - memset(ret, 0, size); | |
53 | - if(dev==NULL) | |
54 | - addr+= CRIME_HI_MEM_BASE; | |
55 | - *dma_handle = addr; | |
56 | - } | |
57 | - | |
58 | - return ret; | |
59 | -} | |
60 | - | |
61 | -EXPORT_SYMBOL(dma_alloc_noncoherent); | |
62 | - | |
63 | -void *dma_alloc_coherent(struct device *dev, size_t size, | |
64 | - dma_addr_t * dma_handle, gfp_t gfp) | |
65 | -{ | |
66 | - void *ret; | |
67 | - | |
68 | - ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp); | |
69 | - if (ret) { | |
70 | - dma_cache_wback_inv((unsigned long) ret, size); | |
71 | - ret = UNCAC_ADDR(ret); | |
72 | - } | |
73 | - | |
74 | - return ret; | |
75 | -} | |
76 | - | |
77 | -EXPORT_SYMBOL(dma_alloc_coherent); | |
78 | - | |
79 | -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | |
80 | - dma_addr_t dma_handle) | |
81 | -{ | |
82 | - free_pages((unsigned long) vaddr, get_order(size)); | |
83 | -} | |
84 | - | |
85 | -EXPORT_SYMBOL(dma_free_noncoherent); | |
86 | - | |
87 | -void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |
88 | - dma_addr_t dma_handle) | |
89 | -{ | |
90 | - unsigned long addr = (unsigned long) vaddr; | |
91 | - | |
92 | - addr = CAC_ADDR(addr); | |
93 | - free_pages(addr, get_order(size)); | |
94 | -} | |
95 | - | |
96 | -EXPORT_SYMBOL(dma_free_coherent); | |
97 | - | |
98 | -static inline void __dma_sync(unsigned long addr, size_t size, | |
99 | - enum dma_data_direction direction) | |
100 | -{ | |
101 | - switch (direction) { | |
102 | - case DMA_TO_DEVICE: | |
103 | - dma_cache_wback(addr, size); | |
104 | - break; | |
105 | - | |
106 | - case DMA_FROM_DEVICE: | |
107 | - dma_cache_inv(addr, size); | |
108 | - break; | |
109 | - | |
110 | - case DMA_BIDIRECTIONAL: | |
111 | - dma_cache_wback_inv(addr, size); | |
112 | - break; | |
113 | - | |
114 | - default: | |
115 | - BUG(); | |
116 | - } | |
117 | -} | |
118 | - | |
119 | -dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | |
120 | - enum dma_data_direction direction) | |
121 | -{ | |
122 | - unsigned long addr = (unsigned long) ptr; | |
123 | - | |
124 | - switch (direction) { | |
125 | - case DMA_TO_DEVICE: | |
126 | - dma_cache_wback(addr, size); | |
127 | - break; | |
128 | - | |
129 | - case DMA_FROM_DEVICE: | |
130 | - dma_cache_inv(addr, size); | |
131 | - break; | |
132 | - | |
133 | - case DMA_BIDIRECTIONAL: | |
134 | - dma_cache_wback_inv(addr, size); | |
135 | - break; | |
136 | - | |
137 | - default: | |
138 | - BUG(); | |
139 | - } | |
140 | - | |
141 | - addr = virt_to_phys(ptr)&RAM_OFFSET_MASK; | |
142 | - if(dev == NULL) | |
143 | - addr+=CRIME_HI_MEM_BASE; | |
144 | - return (dma_addr_t)addr; | |
145 | -} | |
146 | - | |
147 | -EXPORT_SYMBOL(dma_map_single); | |
148 | - | |
149 | -void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
150 | - enum dma_data_direction direction) | |
151 | -{ | |
152 | - switch (direction) { | |
153 | - case DMA_TO_DEVICE: | |
154 | - break; | |
155 | - | |
156 | - case DMA_FROM_DEVICE: | |
157 | - break; | |
158 | - | |
159 | - case DMA_BIDIRECTIONAL: | |
160 | - break; | |
161 | - | |
162 | - default: | |
163 | - BUG(); | |
164 | - } | |
165 | -} | |
166 | - | |
167 | -EXPORT_SYMBOL(dma_unmap_single); | |
168 | - | |
169 | -int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
170 | - enum dma_data_direction direction) | |
171 | -{ | |
172 | - int i; | |
173 | - | |
174 | - BUG_ON(direction == DMA_NONE); | |
175 | - | |
176 | - for (i = 0; i < nents; i++, sg++) { | |
177 | - unsigned long addr; | |
178 | - | |
179 | - addr = (unsigned long) page_address(sg->page)+sg->offset; | |
180 | - if (addr) | |
181 | - __dma_sync(addr, sg->length, direction); | |
182 | - addr = __pa(addr)&RAM_OFFSET_MASK; | |
183 | - if(dev == NULL) | |
184 | - addr += CRIME_HI_MEM_BASE; | |
185 | - sg->dma_address = (dma_addr_t)addr; | |
186 | - } | |
187 | - | |
188 | - return nents; | |
189 | -} | |
190 | - | |
191 | -EXPORT_SYMBOL(dma_map_sg); | |
192 | - | |
193 | -dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
194 | - unsigned long offset, size_t size, enum dma_data_direction direction) | |
195 | -{ | |
196 | - unsigned long addr; | |
197 | - | |
198 | - BUG_ON(direction == DMA_NONE); | |
199 | - | |
200 | - addr = (unsigned long) page_address(page) + offset; | |
201 | - dma_cache_wback_inv(addr, size); | |
202 | - addr = __pa(addr)&RAM_OFFSET_MASK; | |
203 | - if(dev == NULL) | |
204 | - addr += CRIME_HI_MEM_BASE; | |
205 | - | |
206 | - return (dma_addr_t)addr; | |
207 | -} | |
208 | - | |
209 | -EXPORT_SYMBOL(dma_map_page); | |
210 | - | |
211 | -void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | |
212 | - enum dma_data_direction direction) | |
213 | -{ | |
214 | - BUG_ON(direction == DMA_NONE); | |
215 | - | |
216 | - if (direction != DMA_TO_DEVICE) { | |
217 | - unsigned long addr; | |
218 | - | |
219 | - dma_address&=RAM_OFFSET_MASK; | |
220 | - addr = dma_address + PAGE_OFFSET; | |
221 | - if(dma_address>=256*1024*1024) | |
222 | - addr+=CRIME_HI_MEM_BASE; | |
223 | - dma_cache_wback_inv(addr, size); | |
224 | - } | |
225 | -} | |
226 | - | |
227 | -EXPORT_SYMBOL(dma_unmap_page); | |
228 | - | |
229 | -void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |
230 | - enum dma_data_direction direction) | |
231 | -{ | |
232 | - unsigned long addr; | |
233 | - int i; | |
234 | - | |
235 | - BUG_ON(direction == DMA_NONE); | |
236 | - | |
237 | - if (direction == DMA_TO_DEVICE) | |
238 | - return; | |
239 | - | |
240 | - for (i = 0; i < nhwentries; i++, sg++) { | |
241 | - addr = (unsigned long) page_address(sg->page); | |
242 | - if (!addr) | |
243 | - continue; | |
244 | - dma_cache_wback_inv(addr + sg->offset, sg->length); | |
245 | - } | |
246 | -} | |
247 | - | |
248 | -EXPORT_SYMBOL(dma_unmap_sg); | |
249 | - | |
250 | -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
251 | - size_t size, enum dma_data_direction direction) | |
252 | -{ | |
253 | - unsigned long addr; | |
254 | - | |
255 | - BUG_ON(direction == DMA_NONE); | |
256 | - | |
257 | - dma_handle&=RAM_OFFSET_MASK; | |
258 | - addr = dma_handle + PAGE_OFFSET; | |
259 | - if(dma_handle>=256*1024*1024) | |
260 | - addr+=CRIME_HI_MEM_BASE; | |
261 | - __dma_sync(addr, size, direction); | |
262 | -} | |
263 | - | |
264 | -EXPORT_SYMBOL(dma_sync_single_for_cpu); | |
265 | - | |
266 | -void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | |
267 | - size_t size, enum dma_data_direction direction) | |
268 | -{ | |
269 | - unsigned long addr; | |
270 | - | |
271 | - BUG_ON(direction == DMA_NONE); | |
272 | - | |
273 | - dma_handle&=RAM_OFFSET_MASK; | |
274 | - addr = dma_handle + PAGE_OFFSET; | |
275 | - if(dma_handle>=256*1024*1024) | |
276 | - addr+=CRIME_HI_MEM_BASE; | |
277 | - __dma_sync(addr, size, direction); | |
278 | -} | |
279 | - | |
280 | -EXPORT_SYMBOL(dma_sync_single_for_device); | |
281 | - | |
282 | -void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
283 | - unsigned long offset, size_t size, enum dma_data_direction direction) | |
284 | -{ | |
285 | - unsigned long addr; | |
286 | - | |
287 | - BUG_ON(direction == DMA_NONE); | |
288 | - | |
289 | - dma_handle&=RAM_OFFSET_MASK; | |
290 | - addr = dma_handle + offset + PAGE_OFFSET; | |
291 | - if(dma_handle>=256*1024*1024) | |
292 | - addr+=CRIME_HI_MEM_BASE; | |
293 | - __dma_sync(addr, size, direction); | |
294 | -} | |
295 | - | |
296 | -EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | |
297 | - | |
298 | -void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | |
299 | - unsigned long offset, size_t size, enum dma_data_direction direction) | |
300 | -{ | |
301 | - unsigned long addr; | |
302 | - | |
303 | - BUG_ON(direction == DMA_NONE); | |
304 | - | |
305 | - dma_handle&=RAM_OFFSET_MASK; | |
306 | - addr = dma_handle + offset + PAGE_OFFSET; | |
307 | - if(dma_handle>=256*1024*1024) | |
308 | - addr+=CRIME_HI_MEM_BASE; | |
309 | - __dma_sync(addr, size, direction); | |
310 | -} | |
311 | - | |
312 | -EXPORT_SYMBOL(dma_sync_single_range_for_device); | |
313 | - | |
314 | -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | |
315 | - enum dma_data_direction direction) | |
316 | -{ | |
317 | - int i; | |
318 | - | |
319 | - BUG_ON(direction == DMA_NONE); | |
320 | - | |
321 | - /* Make sure that gcc doesn't leave the empty loop body. */ | |
322 | - for (i = 0; i < nelems; i++, sg++) | |
323 | - __dma_sync((unsigned long)page_address(sg->page), | |
324 | - sg->length, direction); | |
325 | -} | |
326 | - | |
327 | -EXPORT_SYMBOL(dma_sync_sg_for_cpu); | |
328 | - | |
329 | -void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | |
330 | - enum dma_data_direction direction) | |
331 | -{ | |
332 | - int i; | |
333 | - | |
334 | - BUG_ON(direction == DMA_NONE); | |
335 | - | |
336 | - /* Make sure that gcc doesn't leave the empty loop body. */ | |
337 | - for (i = 0; i < nelems; i++, sg++) | |
338 | - __dma_sync((unsigned long)page_address(sg->page), | |
339 | - sg->length, direction); | |
340 | -} | |
341 | - | |
342 | -EXPORT_SYMBOL(dma_sync_sg_for_device); | |
343 | - | |
344 | -int dma_mapping_error(dma_addr_t dma_addr) | |
345 | -{ | |
346 | - return 0; | |
347 | -} | |
348 | - | |
349 | -EXPORT_SYMBOL(dma_mapping_error); | |
350 | - | |
351 | -int dma_supported(struct device *dev, u64 mask) | |
352 | -{ | |
353 | - /* | |
354 | - * we fall back to GFP_DMA when the mask isn't all 1s, | |
355 | - * so we can't guarantee allocations that must be | |
356 | - * within a tighter range than GFP_DMA.. | |
357 | - */ | |
358 | - if (mask < 0x00ffffff) | |
359 | - return 0; | |
360 | - | |
361 | - return 1; | |
362 | -} | |
363 | - | |
364 | -EXPORT_SYMBOL(dma_supported); | |
365 | - | |
366 | -int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) | |
367 | -{ | |
368 | - return 1; | |
369 | -} | |
370 | - | |
371 | -EXPORT_SYMBOL(dma_is_consistent); | |
372 | - | |
373 | -void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
374 | - enum dma_data_direction direction) | |
375 | -{ | |
376 | - if (direction == DMA_NONE) | |
377 | - return; | |
378 | - | |
379 | - dma_cache_wback_inv((unsigned long)vaddr, size); | |
380 | -} | |
381 | - | |
382 | -EXPORT_SYMBOL(dma_cache_sync); |
arch/mips/mm/dma-noncoherent.c
1 | -/* | |
2 | - * This file is subject to the terms and conditions of the GNU General Public | |
3 | - * License. See the file "COPYING" in the main directory of this archive | |
4 | - * for more details. | |
5 | - * | |
6 | - * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | |
7 | - * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> | |
8 | - * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | |
9 | - */ | |
10 | -#include <linux/types.h> | |
11 | -#include <linux/mm.h> | |
12 | -#include <linux/module.h> | |
13 | -#include <linux/string.h> | |
14 | -#include <linux/dma-mapping.h> | |
15 | - | |
16 | -#include <asm/cache.h> | |
17 | -#include <asm/io.h> | |
18 | - | |
19 | -/* | |
20 | - * Warning on the terminology - Linux calls an uncached area coherent; | |
21 | - * MIPS terminology calls memory areas with hardware maintained coherency | |
22 | - * coherent. | |
23 | - */ | |
24 | - | |
25 | -void *dma_alloc_noncoherent(struct device *dev, size_t size, | |
26 | - dma_addr_t * dma_handle, gfp_t gfp) | |
27 | -{ | |
28 | - void *ret; | |
29 | - /* ignore region specifiers */ | |
30 | - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | |
31 | - | |
32 | - if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | |
33 | - gfp |= GFP_DMA; | |
34 | - ret = (void *) __get_free_pages(gfp, get_order(size)); | |
35 | - | |
36 | - if (ret != NULL) { | |
37 | - memset(ret, 0, size); | |
38 | - *dma_handle = virt_to_phys(ret); | |
39 | - } | |
40 | - | |
41 | - return ret; | |
42 | -} | |
43 | - | |
44 | -EXPORT_SYMBOL(dma_alloc_noncoherent); | |
45 | - | |
46 | -void *dma_alloc_coherent(struct device *dev, size_t size, | |
47 | - dma_addr_t * dma_handle, gfp_t gfp) | |
48 | -{ | |
49 | - void *ret; | |
50 | - | |
51 | - ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp); | |
52 | - if (ret) { | |
53 | - dma_cache_wback_inv((unsigned long) ret, size); | |
54 | - ret = UNCAC_ADDR(ret); | |
55 | - } | |
56 | - | |
57 | - return ret; | |
58 | -} | |
59 | - | |
60 | -EXPORT_SYMBOL(dma_alloc_coherent); | |
61 | - | |
62 | -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | |
63 | - dma_addr_t dma_handle) | |
64 | -{ | |
65 | - free_pages((unsigned long) vaddr, get_order(size)); | |
66 | -} | |
67 | - | |
68 | -EXPORT_SYMBOL(dma_free_noncoherent); | |
69 | - | |
70 | -void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |
71 | - dma_addr_t dma_handle) | |
72 | -{ | |
73 | - unsigned long addr = (unsigned long) vaddr; | |
74 | - | |
75 | - addr = CAC_ADDR(addr); | |
76 | - free_pages(addr, get_order(size)); | |
77 | -} | |
78 | - | |
79 | -EXPORT_SYMBOL(dma_free_coherent); | |
80 | - | |
81 | -static inline void __dma_sync(unsigned long addr, size_t size, | |
82 | - enum dma_data_direction direction) | |
83 | -{ | |
84 | - switch (direction) { | |
85 | - case DMA_TO_DEVICE: | |
86 | - dma_cache_wback(addr, size); | |
87 | - break; | |
88 | - | |
89 | - case DMA_FROM_DEVICE: | |
90 | - dma_cache_inv(addr, size); | |
91 | - break; | |
92 | - | |
93 | - case DMA_BIDIRECTIONAL: | |
94 | - dma_cache_wback_inv(addr, size); | |
95 | - break; | |
96 | - | |
97 | - default: | |
98 | - BUG(); | |
99 | - } | |
100 | -} | |
101 | - | |
102 | -dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | |
103 | - enum dma_data_direction direction) | |
104 | -{ | |
105 | - unsigned long addr = (unsigned long) ptr; | |
106 | - | |
107 | - __dma_sync(addr, size, direction); | |
108 | - | |
109 | - return virt_to_phys(ptr); | |
110 | -} | |
111 | - | |
112 | -EXPORT_SYMBOL(dma_map_single); | |
113 | - | |
114 | -void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
115 | - enum dma_data_direction direction) | |
116 | -{ | |
117 | - unsigned long addr; | |
118 | - addr = dma_addr + PAGE_OFFSET; | |
119 | - | |
120 | - //__dma_sync(addr, size, direction); | |
121 | -} | |
122 | - | |
123 | -EXPORT_SYMBOL(dma_unmap_single); | |
124 | - | |
125 | -int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
126 | - enum dma_data_direction direction) | |
127 | -{ | |
128 | - int i; | |
129 | - | |
130 | - BUG_ON(direction == DMA_NONE); | |
131 | - | |
132 | - for (i = 0; i < nents; i++, sg++) { | |
133 | - unsigned long addr; | |
134 | - | |
135 | - addr = (unsigned long) page_address(sg->page); | |
136 | - if (addr) { | |
137 | - __dma_sync(addr + sg->offset, sg->length, direction); | |
138 | - sg->dma_address = (dma_addr_t)page_to_phys(sg->page) | |
139 | - + sg->offset; | |
140 | - } | |
141 | - } | |
142 | - | |
143 | - return nents; | |
144 | -} | |
145 | - | |
146 | -EXPORT_SYMBOL(dma_map_sg); | |
147 | - | |
148 | -dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
149 | - unsigned long offset, size_t size, enum dma_data_direction direction) | |
150 | -{ | |
151 | - unsigned long addr; | |
152 | - | |
153 | - BUG_ON(direction == DMA_NONE); | |
154 | - | |
155 | - addr = (unsigned long) page_address(page) + offset; | |
156 | - dma_cache_wback_inv(addr, size); | |
157 | - | |
158 | - return page_to_phys(page) + offset; | |
159 | -} | |
160 | - | |
161 | -EXPORT_SYMBOL(dma_map_page); | |
162 | - | |
163 | -void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | |
164 | - enum dma_data_direction direction) | |
165 | -{ | |
166 | - BUG_ON(direction == DMA_NONE); | |
167 | - | |
168 | - if (direction != DMA_TO_DEVICE) { | |
169 | - unsigned long addr; | |
170 | - | |
171 | - addr = dma_address + PAGE_OFFSET; | |
172 | - dma_cache_wback_inv(addr, size); | |
173 | - } | |
174 | -} | |
175 | - | |
176 | -EXPORT_SYMBOL(dma_unmap_page); | |
177 | - | |
178 | -void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |
179 | - enum dma_data_direction direction) | |
180 | -{ | |
181 | - unsigned long addr; | |
182 | - int i; | |
183 | - | |
184 | - BUG_ON(direction == DMA_NONE); | |
185 | - | |
186 | - if (direction == DMA_TO_DEVICE) | |
187 | - return; | |
188 | - | |
189 | - for (i = 0; i < nhwentries; i++, sg++) { | |
190 | - addr = (unsigned long) page_address(sg->page); | |
191 | - if (addr) | |
192 | - __dma_sync(addr + sg->offset, sg->length, direction); | |
193 | - } | |
194 | -} | |
195 | - | |
196 | -EXPORT_SYMBOL(dma_unmap_sg); | |
197 | - | |
198 | -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
199 | - size_t size, enum dma_data_direction direction) | |
200 | -{ | |
201 | - unsigned long addr; | |
202 | - | |
203 | - BUG_ON(direction == DMA_NONE); | |
204 | - | |
205 | - addr = dma_handle + PAGE_OFFSET; | |
206 | - __dma_sync(addr, size, direction); | |
207 | -} | |
208 | - | |
209 | -EXPORT_SYMBOL(dma_sync_single_for_cpu); | |
210 | - | |
211 | -void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | |
212 | - size_t size, enum dma_data_direction direction) | |
213 | -{ | |
214 | - unsigned long addr; | |
215 | - | |
216 | - BUG_ON(direction == DMA_NONE); | |
217 | - | |
218 | - addr = dma_handle + PAGE_OFFSET; | |
219 | - __dma_sync(addr, size, direction); | |
220 | -} | |
221 | - | |
222 | -EXPORT_SYMBOL(dma_sync_single_for_device); | |
223 | - | |
224 | -void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
225 | - unsigned long offset, size_t size, enum dma_data_direction direction) | |
226 | -{ | |
227 | - unsigned long addr; | |
228 | - | |
229 | - BUG_ON(direction == DMA_NONE); | |
230 | - | |
231 | - addr = dma_handle + offset + PAGE_OFFSET; | |
232 | - __dma_sync(addr, size, direction); | |
233 | -} | |
234 | - | |
235 | -EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | |
236 | - | |
237 | -void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | |
238 | - unsigned long offset, size_t size, enum dma_data_direction direction) | |
239 | -{ | |
240 | - unsigned long addr; | |
241 | - | |
242 | - BUG_ON(direction == DMA_NONE); | |
243 | - | |
244 | - addr = dma_handle + offset + PAGE_OFFSET; | |
245 | - __dma_sync(addr, size, direction); | |
246 | -} | |
247 | - | |
248 | -EXPORT_SYMBOL(dma_sync_single_range_for_device); | |
249 | - | |
250 | -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | |
251 | - enum dma_data_direction direction) | |
252 | -{ | |
253 | - int i; | |
254 | - | |
255 | - BUG_ON(direction == DMA_NONE); | |
256 | - | |
257 | - /* Make sure that gcc doesn't leave the empty loop body. */ | |
258 | - for (i = 0; i < nelems; i++, sg++) | |
259 | - __dma_sync((unsigned long)page_address(sg->page), | |
260 | - sg->length, direction); | |
261 | -} | |
262 | - | |
263 | -EXPORT_SYMBOL(dma_sync_sg_for_cpu); | |
264 | - | |
265 | -void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | |
266 | - enum dma_data_direction direction) | |
267 | -{ | |
268 | - int i; | |
269 | - | |
270 | - BUG_ON(direction == DMA_NONE); | |
271 | - | |
272 | - /* Make sure that gcc doesn't leave the empty loop body. */ | |
273 | - for (i = 0; i < nelems; i++, sg++) | |
274 | - __dma_sync((unsigned long)page_address(sg->page), | |
275 | - sg->length, direction); | |
276 | -} | |
277 | - | |
278 | -EXPORT_SYMBOL(dma_sync_sg_for_device); | |
279 | - | |
280 | -int dma_mapping_error(dma_addr_t dma_addr) | |
281 | -{ | |
282 | - return 0; | |
283 | -} | |
284 | - | |
285 | -EXPORT_SYMBOL(dma_mapping_error); | |
286 | - | |
287 | -int dma_supported(struct device *dev, u64 mask) | |
288 | -{ | |
289 | - /* | |
290 | - * we fall back to GFP_DMA when the mask isn't all 1s, | |
291 | - * so we can't guarantee allocations that must be | |
292 | - * within a tighter range than GFP_DMA.. | |
293 | - */ | |
294 | - if (mask < 0x00ffffff) | |
295 | - return 0; | |
296 | - | |
297 | - return 1; | |
298 | -} | |
299 | - | |
300 | -EXPORT_SYMBOL(dma_supported); | |
301 | - | |
302 | -int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) | |
303 | -{ | |
304 | - return 1; | |
305 | -} | |
306 | - | |
307 | -EXPORT_SYMBOL(dma_is_consistent); | |
308 | - | |
309 | -void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
310 | - enum dma_data_direction direction) | |
311 | -{ | |
312 | - if (direction == DMA_NONE) | |
313 | - return; | |
314 | - | |
315 | - dma_cache_wback_inv((unsigned long)vaddr, size); | |
316 | -} | |
317 | - | |
318 | -EXPORT_SYMBOL(dma_cache_sync); | |
319 | - | |
320 | -/* The DAC routines are a PCIism.. */ | |
321 | - | |
322 | -#ifdef CONFIG_PCI | |
323 | - | |
324 | -#include <linux/pci.h> | |
325 | - | |
326 | -dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, | |
327 | - struct page *page, unsigned long offset, int direction) | |
328 | -{ | |
329 | - return (dma64_addr_t)page_to_phys(page) + offset; | |
330 | -} | |
331 | - | |
332 | -EXPORT_SYMBOL(pci_dac_page_to_dma); | |
333 | - | |
334 | -struct page *pci_dac_dma_to_page(struct pci_dev *pdev, | |
335 | - dma64_addr_t dma_addr) | |
336 | -{ | |
337 | - return mem_map + (dma_addr >> PAGE_SHIFT); | |
338 | -} | |
339 | - | |
340 | -EXPORT_SYMBOL(pci_dac_dma_to_page); | |
341 | - | |
342 | -unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, | |
343 | - dma64_addr_t dma_addr) | |
344 | -{ | |
345 | - return dma_addr & ~PAGE_MASK; | |
346 | -} | |
347 | - | |
348 | -EXPORT_SYMBOL(pci_dac_dma_to_offset); | |
349 | - | |
350 | -void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, | |
351 | - dma64_addr_t dma_addr, size_t len, int direction) | |
352 | -{ | |
353 | - BUG_ON(direction == PCI_DMA_NONE); | |
354 | - | |
355 | - dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len); | |
356 | -} | |
357 | - | |
358 | -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu); | |
359 | - | |
360 | -void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, | |
361 | - dma64_addr_t dma_addr, size_t len, int direction) | |
362 | -{ | |
363 | - BUG_ON(direction == PCI_DMA_NONE); | |
364 | - | |
365 | - dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len); | |
366 | -} | |
367 | - | |
368 | -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device); | |
369 | - | |
370 | -#endif /* CONFIG_PCI */ |
arch/mips/pci/Makefile
arch/mips/pci/pci-dac.c
1 | +/* | |
2 | + * This file is subject to the terms and conditions of the GNU General Public | |
3 | + * License. See the file "COPYING" in the main directory of this archive | |
4 | + * for more details. | |
5 | + * | |
6 | + * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | |
7 | + * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> | |
8 | + * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | |
9 | + */ | |
10 | + | |
11 | +#include <linux/types.h> | |
12 | +#include <linux/dma-mapping.h> | |
13 | +#include <linux/mm.h> | |
14 | +#include <linux/module.h> | |
15 | +#include <linux/string.h> | |
16 | + | |
17 | +#include <asm/cache.h> | |
18 | +#include <asm/io.h> | |
19 | + | |
20 | +#include <dma-coherence.h> | |
21 | + | |
22 | +#include <linux/pci.h> | |
23 | + | |
24 | +dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, | |
25 | + struct page *page, unsigned long offset, int direction) | |
26 | +{ | |
27 | + struct device *dev = &pdev->dev; | |
28 | + | |
29 | + BUG_ON(direction == DMA_NONE); | |
30 | + | |
31 | + if (!plat_device_is_coherent(dev)) { | |
32 | + unsigned long addr; | |
33 | + | |
34 | + addr = (unsigned long) page_address(page) + offset; | |
35 | + dma_cache_wback_inv(addr, PAGE_SIZE); | |
36 | + } | |
37 | + | |
38 | + return plat_map_dma_mem_page(dev, page) + offset; | |
39 | +} | |
40 | + | |
41 | +EXPORT_SYMBOL(pci_dac_page_to_dma); | |
42 | + | |
43 | +struct page *pci_dac_dma_to_page(struct pci_dev *pdev, | |
44 | + dma64_addr_t dma_addr) | |
45 | +{ | |
46 | + return pfn_to_page(plat_dma_addr_to_phys(dma_addr) >> PAGE_SHIFT); | |
47 | +} | |
48 | + | |
49 | +EXPORT_SYMBOL(pci_dac_dma_to_page); | |
50 | + | |
51 | +unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, | |
52 | + dma64_addr_t dma_addr) | |
53 | +{ | |
54 | + return dma_addr & ~PAGE_MASK; | |
55 | +} | |
56 | + | |
57 | +EXPORT_SYMBOL(pci_dac_dma_to_offset); | |
58 | + | |
59 | +void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, | |
60 | + dma64_addr_t dma_addr, size_t len, int direction) | |
61 | +{ | |
62 | + BUG_ON(direction == PCI_DMA_NONE); | |
63 | + | |
64 | + if (!plat_device_is_coherent(&pdev->dev)) | |
65 | + dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len); | |
66 | +} | |
67 | + | |
68 | +EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu); | |
69 | + | |
70 | +void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, | |
71 | + dma64_addr_t dma_addr, size_t len, int direction) | |
72 | +{ | |
73 | + BUG_ON(direction == PCI_DMA_NONE); | |
74 | + | |
75 | + if (!plat_device_is_coherent(&pdev->dev)) | |
76 | + dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len); | |
77 | +} | |
78 | + | |
79 | +EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device); |
include/asm-mips/mach-generic/dma-coherence.h
1 | +/* | |
2 | + * This file is subject to the terms and conditions of the GNU General Public | |
3 | + * License. See the file "COPYING" in the main directory of this archive | |
4 | + * for more details. | |
5 | + * | |
6 | + * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org> | |
7 | + * | |
8 | + */ | |
9 | +#ifndef __ASM_MACH_GENERIC_DMA_COHERENCE_H | |
10 | +#define __ASM_MACH_GENERIC_DMA_COHERENCE_H | |
11 | + | |
12 | +struct device; | |
13 | + | |
14 | +static dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size) | |
15 | +{ | |
16 | + return virt_to_phys(addr); | |
17 | +} | |
18 | + | |
19 | +static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page) | |
20 | +{ | |
21 | + return page_to_phys(page); | |
22 | +} | |
23 | + | |
24 | +static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) | |
25 | +{ | |
26 | + return dma_addr; | |
27 | +} | |
28 | + | |
29 | +static void plat_unmap_dma_mem(dma_addr_t dma_addr) | |
30 | +{ | |
31 | +} | |
32 | + | |
33 | +static inline int plat_device_is_coherent(struct device *dev) | |
34 | +{ | |
35 | +#ifdef CONFIG_DMA_COHERENT | |
36 | + return 1; | |
37 | +#endif | |
38 | +#ifdef CONFIG_DMA_NONCOHERENT | |
39 | + return 0; | |
40 | +#endif | |
41 | +} | |
42 | + | |
43 | +#endif /* __ASM_MACH_GENERIC_DMA_COHERENCE_H */ |
include/asm-mips/mach-generic/kmalloc.h
include/asm-mips/mach-ip27/dma-coherence.h
1 | +/* | |
2 | + * This file is subject to the terms and conditions of the GNU General Public | |
3 | + * License. See the file "COPYING" in the main directory of this archive | |
4 | + * for more details. | |
5 | + * | |
6 | + * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org> | |
7 | + * | |
8 | + */ | |
9 | +#ifndef __ASM_MACH_IP27_DMA_COHERENCE_H | |
10 | +#define __ASM_MACH_IP27_DMA_COHERENCE_H | |
11 | + | |
12 | +#include <asm/pci/bridge.h> | |
13 | + | |
14 | +#define pdev_to_baddr(pdev, addr) \ | |
15 | + (BRIDGE_CONTROLLER(pdev->bus)->baddr + (addr)) | |
16 | +#define dev_to_baddr(dev, addr) \ | |
17 | + pdev_to_baddr(to_pci_dev(dev), (addr)) | |
18 | + | |
19 | +struct device; | |
20 | + | |
21 | +static dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size) | |
22 | +{ | |
23 | + dma_addr_t pa = dev_to_baddr(dev, virt_to_phys(addr)); | |
24 | + | |
25 | + return pa; | |
26 | +} | |
27 | + | |
28 | +static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page) | |
29 | +{ | |
30 | + dma_addr_t pa = dev_to_baddr(dev, page_to_phys(page)); | |
31 | + | |
32 | + return pa; | |
33 | +} | |
34 | + | |
35 | +static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) | |
36 | +{ | |
37 | + return dma_addr & (0xffUL << 56); | |
38 | +} | |
39 | + | |
40 | +static void plat_unmap_dma_mem(dma_addr_t dma_addr) | |
41 | +{ | |
42 | +} | |
43 | + | |
44 | +static inline int plat_device_is_coherent(struct device *dev) | |
45 | +{ | |
46 | + return 1; /* IP27 non-cohernet mode is unsupported */ | |
47 | +} | |
48 | + | |
49 | +#endif /* __ASM_MACH_IP27_DMA_COHERENCE_H */ |
include/asm-mips/mach-ip32/dma-coherence.h
1 | +/* | |
2 | + * This file is subject to the terms and conditions of the GNU General Public | |
3 | + * License. See the file "COPYING" in the main directory of this archive | |
4 | + * for more details. | |
5 | + * | |
6 | + * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org> | |
7 | + * | |
8 | + */ | |
9 | +#ifndef __ASM_MACH_IP35_DMA_COHERENCE_H | |
10 | +#define __ASM_MACH_IP35_DMA_COHERENCE_H | |
11 | + | |
12 | +#include <asm/ip32/crime.h> | |
13 | + | |
14 | +struct device; | |
15 | + | |
16 | +/* | |
17 | + * Few notes. | |
18 | + * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M | |
19 | + * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for | |
20 | + * native-endian) | |
21 | + * 3. All other devices see memory as one big chunk at 0x40000000 | |
22 | + * 4. Non-PCI devices will pass NULL as struct device* | |
23 | + * | |
24 | + * Thus we translate differently, depending on device. | |
25 | + */ | |
26 | + | |
27 | +#define RAM_OFFSET_MASK 0x3fffffffUL | |
28 | + | |
29 | +static dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size) | |
30 | +{ | |
31 | + dma_addr_t pa = virt_to_phys(addr) & RAM_OFFSET_MASK; | |
32 | + | |
33 | + if (dev == NULL) | |
34 | + pa += CRIME_HI_MEM_BASE; | |
35 | + | |
36 | + return pa; | |
37 | +} | |
38 | + | |
39 | +static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page) | |
40 | +{ | |
41 | + dma_addr_t pa; | |
42 | + | |
43 | + pa = page_to_phys(page) & RAM_OFFSET_MASK; | |
44 | + | |
45 | + if (dev == NULL) | |
46 | + pa += CRIME_HI_MEM_BASE; | |
47 | + | |
48 | + return pa; | |
49 | +} | |
50 | + | |
51 | +/* This is almost certainly wrong but it's what dma-ip32.c used to use */ | |
52 | +static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) | |
53 | +{ | |
54 | + unsigned long addr = dma_addr & RAM_OFFSET_MASK; | |
55 | + | |
56 | + if (dma_addr >= 256*1024*1024) | |
57 | + addr += CRIME_HI_MEM_BASE; | |
58 | + | |
59 | + return addr; | |
60 | +} | |
61 | + | |
62 | +static void plat_unmap_dma_mem(dma_addr_t dma_addr) | |
63 | +{ | |
64 | +} | |
65 | + | |
66 | +static inline int plat_device_is_coherent(struct device *dev) | |
67 | +{ | |
68 | + return 0; /* IP32 is non-cohernet */ | |
69 | +} | |
70 | + | |
71 | +#endif /* __ASM_MACH_IP35_DMA_COHERENCE_H */ |
include/asm-mips/mach-jazz/dma-coherence.h
1 | +/* | |
2 | + * This file is subject to the terms and conditions of the GNU General Public | |
3 | + * License. See the file "COPYING" in the main directory of this archive | |
4 | + * for more details. | |
5 | + * | |
6 | + * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org> | |
7 | + */ | |
8 | +#ifndef __ASM_MACH_JAZZ_DMA_COHERENCE_H | |
9 | +#define __ASM_MACH_JAZZ_DMA_COHERENCE_H | |
10 | + | |
11 | +#include <asm/jazzdma.h> | |
12 | + | |
13 | +struct device; | |
14 | + | |
15 | +static dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size) | |
16 | +{ | |
17 | + return vdma_alloc(virt_to_phys(addr), size); | |
18 | +} | |
19 | + | |
20 | +static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page) | |
21 | +{ | |
22 | + return vdma_alloc(page_to_phys(page), PAGE_SIZE); | |
23 | +} | |
24 | + | |
25 | +static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) | |
26 | +{ | |
27 | + return vdma_log2phys(dma_addr); | |
28 | +} | |
29 | + | |
30 | +static void plat_unmap_dma_mem(dma_addr_t dma_addr) | |
31 | +{ | |
32 | + vdma_free(dma_addr); | |
33 | +} | |
34 | + | |
35 | +static inline int plat_device_is_coherent(struct device *dev) | |
36 | +{ | |
37 | + return 0; | |
38 | +} | |
39 | + | |
40 | +#endif /* __ASM_MACH_JAZZ_DMA_COHERENCE_H */ |