Commit 971121f27179508beb18b86b84e1f8399d7b3875

Authored by Paul Mundt
1 parent 2eb2a43682

sh: Fix up more dma-mapping fallout.

commit dbe6f1869188b6e04e38aa861dd198befb08bcd7
("dma-mapping: mark dma_sync_single and dma_sync_sg as deprecated"
conveniently broke every single SH build.

In the future it would be great if people could at least bother
figuring out how to use grep.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>

Showing 1 changed file with 6 additions and 6 deletions Inline Diff

arch/sh/include/asm/dma-mapping.h
1 #ifndef __ASM_SH_DMA_MAPPING_H 1 #ifndef __ASM_SH_DMA_MAPPING_H
2 #define __ASM_SH_DMA_MAPPING_H 2 #define __ASM_SH_DMA_MAPPING_H
3 3
4 #include <linux/mm.h> 4 #include <linux/mm.h>
5 #include <linux/scatterlist.h> 5 #include <linux/scatterlist.h>
6 #include <linux/dma-debug.h> 6 #include <linux/dma-debug.h>
7 #include <asm/cacheflush.h> 7 #include <asm/cacheflush.h>
8 #include <asm/io.h> 8 #include <asm/io.h>
9 #include <asm-generic/dma-coherent.h> 9 #include <asm-generic/dma-coherent.h>
10 10
11 extern struct bus_type pci_bus_type; 11 extern struct bus_type pci_bus_type;
12 12
13 #define dma_supported(dev, mask) (1) 13 #define dma_supported(dev, mask) (1)
14 14
15 static inline int dma_set_mask(struct device *dev, u64 mask) 15 static inline int dma_set_mask(struct device *dev, u64 mask)
16 { 16 {
17 if (!dev->dma_mask || !dma_supported(dev, mask)) 17 if (!dev->dma_mask || !dma_supported(dev, mask))
18 return -EIO; 18 return -EIO;
19 19
20 *dev->dma_mask = mask; 20 *dev->dma_mask = mask;
21 21
22 return 0; 22 return 0;
23 } 23 }
24 24
25 void *dma_alloc_coherent(struct device *dev, size_t size, 25 void *dma_alloc_coherent(struct device *dev, size_t size,
26 dma_addr_t *dma_handle, gfp_t flag); 26 dma_addr_t *dma_handle, gfp_t flag);
27 27
28 void dma_free_coherent(struct device *dev, size_t size, 28 void dma_free_coherent(struct device *dev, size_t size,
29 void *vaddr, dma_addr_t dma_handle); 29 void *vaddr, dma_addr_t dma_handle);
30 30
31 void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 31 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
32 enum dma_data_direction dir); 32 enum dma_data_direction dir);
33 33
34 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 34 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
35 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 35 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
36 #define dma_is_consistent(d, h) (1) 36 #define dma_is_consistent(d, h) (1)
37 37
38 static inline dma_addr_t dma_map_single(struct device *dev, 38 static inline dma_addr_t dma_map_single(struct device *dev,
39 void *ptr, size_t size, 39 void *ptr, size_t size,
40 enum dma_data_direction dir) 40 enum dma_data_direction dir)
41 { 41 {
42 dma_addr_t addr = virt_to_phys(ptr); 42 dma_addr_t addr = virt_to_phys(ptr);
43 43
44 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) 44 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
45 if (dev->bus == &pci_bus_type) 45 if (dev->bus == &pci_bus_type)
46 return addr; 46 return addr;
47 #endif 47 #endif
48 dma_cache_sync(dev, ptr, size, dir); 48 dma_cache_sync(dev, ptr, size, dir);
49 49
50 debug_dma_map_page(dev, virt_to_page(ptr), 50 debug_dma_map_page(dev, virt_to_page(ptr),
51 (unsigned long)ptr & ~PAGE_MASK, size, 51 (unsigned long)ptr & ~PAGE_MASK, size,
52 dir, addr, true); 52 dir, addr, true);
53 53
54 return addr; 54 return addr;
55 } 55 }
56 56
57 static inline void dma_unmap_single(struct device *dev, dma_addr_t addr, 57 static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
58 size_t size, enum dma_data_direction dir) 58 size_t size, enum dma_data_direction dir)
59 { 59 {
60 debug_dma_unmap_page(dev, addr, size, dir, true); 60 debug_dma_unmap_page(dev, addr, size, dir, true);
61 } 61 }
62 62
63 static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, 63 static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
64 int nents, enum dma_data_direction dir) 64 int nents, enum dma_data_direction dir)
65 { 65 {
66 int i; 66 int i;
67 67
68 for (i = 0; i < nents; i++) { 68 for (i = 0; i < nents; i++) {
69 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 69 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
70 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); 70 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
71 #endif 71 #endif
72 sg[i].dma_address = sg_phys(&sg[i]); 72 sg[i].dma_address = sg_phys(&sg[i]);
73 sg[i].dma_length = sg[i].length; 73 sg[i].dma_length = sg[i].length;
74 } 74 }
75 75
76 debug_dma_map_sg(dev, sg, nents, i, dir); 76 debug_dma_map_sg(dev, sg, nents, i, dir);
77 77
78 return nents; 78 return nents;
79 } 79 }
80 80
81 static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, 81 static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
82 int nents, enum dma_data_direction dir) 82 int nents, enum dma_data_direction dir)
83 { 83 {
84 debug_dma_unmap_sg(dev, sg, nents, dir); 84 debug_dma_unmap_sg(dev, sg, nents, dir);
85 } 85 }
86 86
87 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 87 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
88 unsigned long offset, size_t size, 88 unsigned long offset, size_t size,
89 enum dma_data_direction dir) 89 enum dma_data_direction dir)
90 { 90 {
91 return dma_map_single(dev, page_address(page) + offset, size, dir); 91 return dma_map_single(dev, page_address(page) + offset, size, dir);
92 } 92 }
93 93
94 static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, 94 static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
95 size_t size, enum dma_data_direction dir) 95 size_t size, enum dma_data_direction dir)
96 { 96 {
97 dma_unmap_single(dev, dma_address, size, dir); 97 dma_unmap_single(dev, dma_address, size, dir);
98 } 98 }
99 99
100 static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle, 100 static inline void __dma_sync_single(struct device *dev, dma_addr_t dma_handle,
101 size_t size, enum dma_data_direction dir) 101 size_t size, enum dma_data_direction dir)
102 { 102 {
103 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) 103 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
104 if (dev->bus == &pci_bus_type) 104 if (dev->bus == &pci_bus_type)
105 return; 105 return;
106 #endif 106 #endif
107 dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir); 107 dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir);
108 } 108 }
109 109
110 static inline void dma_sync_single_range(struct device *dev, 110 static inline void dma_sync_single_range(struct device *dev,
111 dma_addr_t dma_handle, 111 dma_addr_t dma_handle,
112 unsigned long offset, size_t size, 112 unsigned long offset, size_t size,
113 enum dma_data_direction dir) 113 enum dma_data_direction dir)
114 { 114 {
115 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) 115 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
116 if (dev->bus == &pci_bus_type) 116 if (dev->bus == &pci_bus_type)
117 return; 117 return;
118 #endif 118 #endif
119 dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir); 119 dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir);
120 } 120 }
121 121
122 static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg, 122 static inline void __dma_sync_sg(struct device *dev, struct scatterlist *sg,
123 int nelems, enum dma_data_direction dir) 123 int nelems, enum dma_data_direction dir)
124 { 124 {
125 int i; 125 int i;
126 126
127 for (i = 0; i < nelems; i++) { 127 for (i = 0; i < nelems; i++) {
128 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 128 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
129 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); 129 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
130 #endif 130 #endif
131 sg[i].dma_address = sg_phys(&sg[i]); 131 sg[i].dma_address = sg_phys(&sg[i]);
132 sg[i].dma_length = sg[i].length; 132 sg[i].dma_length = sg[i].length;
133 } 133 }
134 } 134 }
135 135
136 static inline void dma_sync_single_for_cpu(struct device *dev, 136 static inline void dma_sync_single_for_cpu(struct device *dev,
137 dma_addr_t dma_handle, size_t size, 137 dma_addr_t dma_handle, size_t size,
138 enum dma_data_direction dir) 138 enum dma_data_direction dir)
139 { 139 {
140 dma_sync_single(dev, dma_handle, size, dir); 140 __dma_sync_single(dev, dma_handle, size, dir);
141 debug_dma_sync_single_for_cpu(dev, dma_handle, size, dir); 141 debug_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
142 } 142 }
143 143
144 static inline void dma_sync_single_for_device(struct device *dev, 144 static inline void dma_sync_single_for_device(struct device *dev,
145 dma_addr_t dma_handle, 145 dma_addr_t dma_handle,
146 size_t size, 146 size_t size,
147 enum dma_data_direction dir) 147 enum dma_data_direction dir)
148 { 148 {
149 dma_sync_single(dev, dma_handle, size, dir); 149 __dma_sync_single(dev, dma_handle, size, dir);
150 debug_dma_sync_single_for_device(dev, dma_handle, size, dir); 150 debug_dma_sync_single_for_device(dev, dma_handle, size, dir);
151 } 151 }
152 152
153 static inline void dma_sync_single_range_for_cpu(struct device *dev, 153 static inline void dma_sync_single_range_for_cpu(struct device *dev,
154 dma_addr_t dma_handle, 154 dma_addr_t dma_handle,
155 unsigned long offset, 155 unsigned long offset,
156 size_t size, 156 size_t size,
157 enum dma_data_direction direction) 157 enum dma_data_direction direction)
158 { 158 {
159 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); 159 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
160 debug_dma_sync_single_range_for_cpu(dev, dma_handle, 160 debug_dma_sync_single_range_for_cpu(dev, dma_handle,
161 offset, size, direction); 161 offset, size, direction);
162 } 162 }
163 163
164 static inline void dma_sync_single_range_for_device(struct device *dev, 164 static inline void dma_sync_single_range_for_device(struct device *dev,
165 dma_addr_t dma_handle, 165 dma_addr_t dma_handle,
166 unsigned long offset, 166 unsigned long offset,
167 size_t size, 167 size_t size,
168 enum dma_data_direction direction) 168 enum dma_data_direction direction)
169 { 169 {
170 dma_sync_single_for_device(dev, dma_handle+offset, size, direction); 170 dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
171 debug_dma_sync_single_range_for_device(dev, dma_handle, 171 debug_dma_sync_single_range_for_device(dev, dma_handle,
172 offset, size, direction); 172 offset, size, direction);
173 } 173 }
174 174
175 175
176 static inline void dma_sync_sg_for_cpu(struct device *dev, 176 static inline void dma_sync_sg_for_cpu(struct device *dev,
177 struct scatterlist *sg, int nelems, 177 struct scatterlist *sg, int nelems,
178 enum dma_data_direction dir) 178 enum dma_data_direction dir)
179 { 179 {
180 dma_sync_sg(dev, sg, nelems, dir); 180 __dma_sync_sg(dev, sg, nelems, dir);
181 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); 181 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
182 } 182 }
183 183
184 static inline void dma_sync_sg_for_device(struct device *dev, 184 static inline void dma_sync_sg_for_device(struct device *dev,
185 struct scatterlist *sg, int nelems, 185 struct scatterlist *sg, int nelems,
186 enum dma_data_direction dir) 186 enum dma_data_direction dir)
187 { 187 {
188 dma_sync_sg(dev, sg, nelems, dir); 188 __dma_sync_sg(dev, sg, nelems, dir);
189 debug_dma_sync_sg_for_device(dev, sg, nelems, dir); 189 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
190 } 190 }
191 191
192 static inline int dma_get_cache_alignment(void) 192 static inline int dma_get_cache_alignment(void)
193 { 193 {
194 /* 194 /*
195 * Each processor family will define its own L1_CACHE_SHIFT, 195 * Each processor family will define its own L1_CACHE_SHIFT,
196 * L1_CACHE_BYTES wraps to this, so this is always safe. 196 * L1_CACHE_BYTES wraps to this, so this is always safe.
197 */ 197 */
198 return L1_CACHE_BYTES; 198 return L1_CACHE_BYTES;
199 } 199 }
200 200
201 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 201 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
202 { 202 {
203 return dma_addr == 0; 203 return dma_addr == 0;
204 } 204 }
205 205
206 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY 206 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
207 207
208 extern int 208 extern int
209 dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, 209 dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
210 dma_addr_t device_addr, size_t size, int flags); 210 dma_addr_t device_addr, size_t size, int flags);
211 211
212 extern void 212 extern void
213 dma_release_declared_memory(struct device *dev); 213 dma_release_declared_memory(struct device *dev);
214 214
215 extern void * 215 extern void *
216 dma_mark_declared_memory_occupied(struct device *dev, 216 dma_mark_declared_memory_occupied(struct device *dev,
217 dma_addr_t device_addr, size_t size); 217 dma_addr_t device_addr, size_t size);
218 218
219 #endif /* __ASM_SH_DMA_MAPPING_H */ 219 #endif /* __ASM_SH_DMA_MAPPING_H */
220 220