Commit 88f3907f6f447899544beadf491dccb32015dacb

Authored by FUJITA Tomonori
Committed by Joerg Roedel
1 parent 884d05970b

dma-debug: fix debug_dma_sync_sg_for_cpu and debug_dma_sync_sg_for_device

DMA-mapping.txt says that debug_dma_sync_sg family must be called with
the _same_ one you passed into the dma_map_sg call, it should _NOT_ be
the 'count' value _returned_ from the dma_map_sg call.

debug_dma_sync_sg_for_cpu and debug_dma_sync_sg_for_device can't
handle this properly; they need to use the sg_mapped_ents in struct
dma_debug_entry as debug_dma_unmap_sg() does.

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>

Showing 1 changed file with 37 additions and 11 deletions Inline Diff

1 /* 1 /*
2 * Copyright (C) 2008 Advanced Micro Devices, Inc. 2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
3 * 3 *
4 * Author: Joerg Roedel <joerg.roedel@amd.com> 4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation. 8 * by the Free Software Foundation.
9 * 9 *
10 * This program is distributed in the hope that it will be useful, 10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License 15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19 19
20 #include <linux/scatterlist.h> 20 #include <linux/scatterlist.h>
21 #include <linux/dma-mapping.h> 21 #include <linux/dma-mapping.h>
22 #include <linux/stacktrace.h> 22 #include <linux/stacktrace.h>
23 #include <linux/dma-debug.h> 23 #include <linux/dma-debug.h>
24 #include <linux/spinlock.h> 24 #include <linux/spinlock.h>
25 #include <linux/debugfs.h> 25 #include <linux/debugfs.h>
26 #include <linux/device.h> 26 #include <linux/device.h>
27 #include <linux/types.h> 27 #include <linux/types.h>
28 #include <linux/sched.h> 28 #include <linux/sched.h>
29 #include <linux/list.h> 29 #include <linux/list.h>
30 #include <linux/slab.h> 30 #include <linux/slab.h>
31 31
32 #include <asm/sections.h> 32 #include <asm/sections.h>
33 33
34 #define HASH_SIZE 1024ULL 34 #define HASH_SIZE 1024ULL
35 #define HASH_FN_SHIFT 13 35 #define HASH_FN_SHIFT 13
36 #define HASH_FN_MASK (HASH_SIZE - 1) 36 #define HASH_FN_MASK (HASH_SIZE - 1)
37 37
38 enum { 38 enum {
39 dma_debug_single, 39 dma_debug_single,
40 dma_debug_page, 40 dma_debug_page,
41 dma_debug_sg, 41 dma_debug_sg,
42 dma_debug_coherent, 42 dma_debug_coherent,
43 }; 43 };
44 44
45 #define DMA_DEBUG_STACKTRACE_ENTRIES 5 45 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
46 46
47 struct dma_debug_entry { 47 struct dma_debug_entry {
48 struct list_head list; 48 struct list_head list;
49 struct device *dev; 49 struct device *dev;
50 int type; 50 int type;
51 phys_addr_t paddr; 51 phys_addr_t paddr;
52 u64 dev_addr; 52 u64 dev_addr;
53 u64 size; 53 u64 size;
54 int direction; 54 int direction;
55 int sg_call_ents; 55 int sg_call_ents;
56 int sg_mapped_ents; 56 int sg_mapped_ents;
57 #ifdef CONFIG_STACKTRACE 57 #ifdef CONFIG_STACKTRACE
58 struct stack_trace stacktrace; 58 struct stack_trace stacktrace;
59 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; 59 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
60 #endif 60 #endif
61 }; 61 };
62 62
63 struct hash_bucket { 63 struct hash_bucket {
64 struct list_head list; 64 struct list_head list;
65 spinlock_t lock; 65 spinlock_t lock;
66 } ____cacheline_aligned_in_smp; 66 } ____cacheline_aligned_in_smp;
67 67
68 /* Hash list to save the allocated dma addresses */ 68 /* Hash list to save the allocated dma addresses */
69 static struct hash_bucket dma_entry_hash[HASH_SIZE]; 69 static struct hash_bucket dma_entry_hash[HASH_SIZE];
70 /* List of pre-allocated dma_debug_entry's */ 70 /* List of pre-allocated dma_debug_entry's */
71 static LIST_HEAD(free_entries); 71 static LIST_HEAD(free_entries);
72 /* Lock for the list above */ 72 /* Lock for the list above */
73 static DEFINE_SPINLOCK(free_entries_lock); 73 static DEFINE_SPINLOCK(free_entries_lock);
74 74
75 /* Global disable flag - will be set in case of an error */ 75 /* Global disable flag - will be set in case of an error */
76 static bool global_disable __read_mostly; 76 static bool global_disable __read_mostly;
77 77
78 /* Global error count */ 78 /* Global error count */
79 static u32 error_count; 79 static u32 error_count;
80 80
81 /* Global error show enable*/ 81 /* Global error show enable*/
82 static u32 show_all_errors __read_mostly; 82 static u32 show_all_errors __read_mostly;
83 /* Number of errors to show */ 83 /* Number of errors to show */
84 static u32 show_num_errors = 1; 84 static u32 show_num_errors = 1;
85 85
86 static u32 num_free_entries; 86 static u32 num_free_entries;
87 static u32 min_free_entries; 87 static u32 min_free_entries;
88 static u32 nr_total_entries; 88 static u32 nr_total_entries;
89 89
90 /* number of preallocated entries requested by kernel cmdline */ 90 /* number of preallocated entries requested by kernel cmdline */
91 static u32 req_entries; 91 static u32 req_entries;
92 92
93 /* debugfs dentry's for the stuff above */ 93 /* debugfs dentry's for the stuff above */
94 static struct dentry *dma_debug_dent __read_mostly; 94 static struct dentry *dma_debug_dent __read_mostly;
95 static struct dentry *global_disable_dent __read_mostly; 95 static struct dentry *global_disable_dent __read_mostly;
96 static struct dentry *error_count_dent __read_mostly; 96 static struct dentry *error_count_dent __read_mostly;
97 static struct dentry *show_all_errors_dent __read_mostly; 97 static struct dentry *show_all_errors_dent __read_mostly;
98 static struct dentry *show_num_errors_dent __read_mostly; 98 static struct dentry *show_num_errors_dent __read_mostly;
99 static struct dentry *num_free_entries_dent __read_mostly; 99 static struct dentry *num_free_entries_dent __read_mostly;
100 static struct dentry *min_free_entries_dent __read_mostly; 100 static struct dentry *min_free_entries_dent __read_mostly;
101 101
102 static const char *type2name[4] = { "single", "page", 102 static const char *type2name[4] = { "single", "page",
103 "scather-gather", "coherent" }; 103 "scather-gather", "coherent" };
104 104
105 static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", 105 static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
106 "DMA_FROM_DEVICE", "DMA_NONE" }; 106 "DMA_FROM_DEVICE", "DMA_NONE" };
107 107
108 /* little merge helper - remove it after the merge window */ 108 /* little merge helper - remove it after the merge window */
109 #ifndef BUS_NOTIFY_UNBOUND_DRIVER 109 #ifndef BUS_NOTIFY_UNBOUND_DRIVER
110 #define BUS_NOTIFY_UNBOUND_DRIVER 0x0005 110 #define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
111 #endif 111 #endif
112 112
113 /* 113 /*
114 * The access to some variables in this macro is racy. We can't use atomic_t 114 * The access to some variables in this macro is racy. We can't use atomic_t
115 * here because all these variables are exported to debugfs. Some of them even 115 * here because all these variables are exported to debugfs. Some of them even
116 * writeable. This is also the reason why a lock won't help much. But anyway, 116 * writeable. This is also the reason why a lock won't help much. But anyway,
117 * the races are no big deal. Here is why: 117 * the races are no big deal. Here is why:
118 * 118 *
119 * error_count: the addition is racy, but the worst thing that can happen is 119 * error_count: the addition is racy, but the worst thing that can happen is
120 * that we don't count some errors 120 * that we don't count some errors
121 * show_num_errors: the subtraction is racy. Also no big deal because in 121 * show_num_errors: the subtraction is racy. Also no big deal because in
122 * worst case this will result in one warning more in the 122 * worst case this will result in one warning more in the
123 * system log than the user configured. This variable is 123 * system log than the user configured. This variable is
124 * writeable via debugfs. 124 * writeable via debugfs.
125 */ 125 */
126 static inline void dump_entry_trace(struct dma_debug_entry *entry) 126 static inline void dump_entry_trace(struct dma_debug_entry *entry)
127 { 127 {
128 #ifdef CONFIG_STACKTRACE 128 #ifdef CONFIG_STACKTRACE
129 if (entry) { 129 if (entry) {
130 printk(KERN_WARNING "Mapped at:\n"); 130 printk(KERN_WARNING "Mapped at:\n");
131 print_stack_trace(&entry->stacktrace, 0); 131 print_stack_trace(&entry->stacktrace, 0);
132 } 132 }
133 #endif 133 #endif
134 } 134 }
135 135
136 #define err_printk(dev, entry, format, arg...) do { \ 136 #define err_printk(dev, entry, format, arg...) do { \
137 error_count += 1; \ 137 error_count += 1; \
138 if (show_all_errors || show_num_errors > 0) { \ 138 if (show_all_errors || show_num_errors > 0) { \
139 WARN(1, "%s %s: " format, \ 139 WARN(1, "%s %s: " format, \
140 dev_driver_string(dev), \ 140 dev_driver_string(dev), \
141 dev_name(dev) , ## arg); \ 141 dev_name(dev) , ## arg); \
142 dump_entry_trace(entry); \ 142 dump_entry_trace(entry); \
143 } \ 143 } \
144 if (!show_all_errors && show_num_errors > 0) \ 144 if (!show_all_errors && show_num_errors > 0) \
145 show_num_errors -= 1; \ 145 show_num_errors -= 1; \
146 } while (0); 146 } while (0);
147 147
148 /* 148 /*
149 * Hash related functions 149 * Hash related functions
150 * 150 *
151 * Every DMA-API request is saved into a struct dma_debug_entry. To 151 * Every DMA-API request is saved into a struct dma_debug_entry. To
152 * have quick access to these structs they are stored into a hash. 152 * have quick access to these structs they are stored into a hash.
153 */ 153 */
154 static int hash_fn(struct dma_debug_entry *entry) 154 static int hash_fn(struct dma_debug_entry *entry)
155 { 155 {
156 /* 156 /*
157 * Hash function is based on the dma address. 157 * Hash function is based on the dma address.
158 * We use bits 20-27 here as the index into the hash 158 * We use bits 20-27 here as the index into the hash
159 */ 159 */
160 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; 160 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
161 } 161 }
162 162
163 /* 163 /*
164 * Request exclusive access to a hash bucket for a given dma_debug_entry. 164 * Request exclusive access to a hash bucket for a given dma_debug_entry.
165 */ 165 */
166 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, 166 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
167 unsigned long *flags) 167 unsigned long *flags)
168 { 168 {
169 int idx = hash_fn(entry); 169 int idx = hash_fn(entry);
170 unsigned long __flags; 170 unsigned long __flags;
171 171
172 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); 172 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
173 *flags = __flags; 173 *flags = __flags;
174 return &dma_entry_hash[idx]; 174 return &dma_entry_hash[idx];
175 } 175 }
176 176
177 /* 177 /*
178 * Give up exclusive access to the hash bucket 178 * Give up exclusive access to the hash bucket
179 */ 179 */
180 static void put_hash_bucket(struct hash_bucket *bucket, 180 static void put_hash_bucket(struct hash_bucket *bucket,
181 unsigned long *flags) 181 unsigned long *flags)
182 { 182 {
183 unsigned long __flags = *flags; 183 unsigned long __flags = *flags;
184 184
185 spin_unlock_irqrestore(&bucket->lock, __flags); 185 spin_unlock_irqrestore(&bucket->lock, __flags);
186 } 186 }
187 187
188 /* 188 /*
189 * Search a given entry in the hash bucket list 189 * Search a given entry in the hash bucket list
190 */ 190 */
191 static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, 191 static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
192 struct dma_debug_entry *ref) 192 struct dma_debug_entry *ref)
193 { 193 {
194 struct dma_debug_entry *entry; 194 struct dma_debug_entry *entry;
195 195
196 list_for_each_entry(entry, &bucket->list, list) { 196 list_for_each_entry(entry, &bucket->list, list) {
197 if ((entry->dev_addr == ref->dev_addr) && 197 if ((entry->dev_addr == ref->dev_addr) &&
198 (entry->dev == ref->dev)) 198 (entry->dev == ref->dev))
199 return entry; 199 return entry;
200 } 200 }
201 201
202 return NULL; 202 return NULL;
203 } 203 }
204 204
205 /* 205 /*
206 * Add an entry to a hash bucket 206 * Add an entry to a hash bucket
207 */ 207 */
208 static void hash_bucket_add(struct hash_bucket *bucket, 208 static void hash_bucket_add(struct hash_bucket *bucket,
209 struct dma_debug_entry *entry) 209 struct dma_debug_entry *entry)
210 { 210 {
211 list_add_tail(&entry->list, &bucket->list); 211 list_add_tail(&entry->list, &bucket->list);
212 } 212 }
213 213
214 /* 214 /*
215 * Remove entry from a hash bucket list 215 * Remove entry from a hash bucket list
216 */ 216 */
217 static void hash_bucket_del(struct dma_debug_entry *entry) 217 static void hash_bucket_del(struct dma_debug_entry *entry)
218 { 218 {
219 list_del(&entry->list); 219 list_del(&entry->list);
220 } 220 }
221 221
222 /* 222 /*
223 * Dump mapping entries for debugging purposes 223 * Dump mapping entries for debugging purposes
224 */ 224 */
225 void debug_dma_dump_mappings(struct device *dev) 225 void debug_dma_dump_mappings(struct device *dev)
226 { 226 {
227 int idx; 227 int idx;
228 228
229 for (idx = 0; idx < HASH_SIZE; idx++) { 229 for (idx = 0; idx < HASH_SIZE; idx++) {
230 struct hash_bucket *bucket = &dma_entry_hash[idx]; 230 struct hash_bucket *bucket = &dma_entry_hash[idx];
231 struct dma_debug_entry *entry; 231 struct dma_debug_entry *entry;
232 unsigned long flags; 232 unsigned long flags;
233 233
234 spin_lock_irqsave(&bucket->lock, flags); 234 spin_lock_irqsave(&bucket->lock, flags);
235 235
236 list_for_each_entry(entry, &bucket->list, list) { 236 list_for_each_entry(entry, &bucket->list, list) {
237 if (!dev || dev == entry->dev) { 237 if (!dev || dev == entry->dev) {
238 dev_info(entry->dev, 238 dev_info(entry->dev,
239 "%s idx %d P=%Lx D=%Lx L=%Lx %s\n", 239 "%s idx %d P=%Lx D=%Lx L=%Lx %s\n",
240 type2name[entry->type], idx, 240 type2name[entry->type], idx,
241 (unsigned long long)entry->paddr, 241 (unsigned long long)entry->paddr,
242 entry->dev_addr, entry->size, 242 entry->dev_addr, entry->size,
243 dir2name[entry->direction]); 243 dir2name[entry->direction]);
244 } 244 }
245 } 245 }
246 246
247 spin_unlock_irqrestore(&bucket->lock, flags); 247 spin_unlock_irqrestore(&bucket->lock, flags);
248 } 248 }
249 } 249 }
250 EXPORT_SYMBOL(debug_dma_dump_mappings); 250 EXPORT_SYMBOL(debug_dma_dump_mappings);
251 251
252 /* 252 /*
253 * Wrapper function for adding an entry to the hash. 253 * Wrapper function for adding an entry to the hash.
254 * This function takes care of locking itself. 254 * This function takes care of locking itself.
255 */ 255 */
256 static void add_dma_entry(struct dma_debug_entry *entry) 256 static void add_dma_entry(struct dma_debug_entry *entry)
257 { 257 {
258 struct hash_bucket *bucket; 258 struct hash_bucket *bucket;
259 unsigned long flags; 259 unsigned long flags;
260 260
261 bucket = get_hash_bucket(entry, &flags); 261 bucket = get_hash_bucket(entry, &flags);
262 hash_bucket_add(bucket, entry); 262 hash_bucket_add(bucket, entry);
263 put_hash_bucket(bucket, &flags); 263 put_hash_bucket(bucket, &flags);
264 } 264 }
265 265
266 static struct dma_debug_entry *__dma_entry_alloc(void) 266 static struct dma_debug_entry *__dma_entry_alloc(void)
267 { 267 {
268 struct dma_debug_entry *entry; 268 struct dma_debug_entry *entry;
269 269
270 entry = list_entry(free_entries.next, struct dma_debug_entry, list); 270 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
271 list_del(&entry->list); 271 list_del(&entry->list);
272 memset(entry, 0, sizeof(*entry)); 272 memset(entry, 0, sizeof(*entry));
273 273
274 num_free_entries -= 1; 274 num_free_entries -= 1;
275 if (num_free_entries < min_free_entries) 275 if (num_free_entries < min_free_entries)
276 min_free_entries = num_free_entries; 276 min_free_entries = num_free_entries;
277 277
278 return entry; 278 return entry;
279 } 279 }
280 280
281 /* struct dma_entry allocator 281 /* struct dma_entry allocator
282 * 282 *
283 * The next two functions implement the allocator for 283 * The next two functions implement the allocator for
284 * struct dma_debug_entries. 284 * struct dma_debug_entries.
285 */ 285 */
286 static struct dma_debug_entry *dma_entry_alloc(void) 286 static struct dma_debug_entry *dma_entry_alloc(void)
287 { 287 {
288 struct dma_debug_entry *entry = NULL; 288 struct dma_debug_entry *entry = NULL;
289 unsigned long flags; 289 unsigned long flags;
290 290
291 spin_lock_irqsave(&free_entries_lock, flags); 291 spin_lock_irqsave(&free_entries_lock, flags);
292 292
293 if (list_empty(&free_entries)) { 293 if (list_empty(&free_entries)) {
294 printk(KERN_ERR "DMA-API: debugging out of memory " 294 printk(KERN_ERR "DMA-API: debugging out of memory "
295 "- disabling\n"); 295 "- disabling\n");
296 global_disable = true; 296 global_disable = true;
297 goto out; 297 goto out;
298 } 298 }
299 299
300 entry = __dma_entry_alloc(); 300 entry = __dma_entry_alloc();
301 301
302 #ifdef CONFIG_STACKTRACE 302 #ifdef CONFIG_STACKTRACE
303 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; 303 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
304 entry->stacktrace.entries = entry->st_entries; 304 entry->stacktrace.entries = entry->st_entries;
305 entry->stacktrace.skip = 2; 305 entry->stacktrace.skip = 2;
306 save_stack_trace(&entry->stacktrace); 306 save_stack_trace(&entry->stacktrace);
307 #endif 307 #endif
308 308
309 out: 309 out:
310 spin_unlock_irqrestore(&free_entries_lock, flags); 310 spin_unlock_irqrestore(&free_entries_lock, flags);
311 311
312 return entry; 312 return entry;
313 } 313 }
314 314
315 static void dma_entry_free(struct dma_debug_entry *entry) 315 static void dma_entry_free(struct dma_debug_entry *entry)
316 { 316 {
317 unsigned long flags; 317 unsigned long flags;
318 318
319 /* 319 /*
320 * add to beginning of the list - this way the entries are 320 * add to beginning of the list - this way the entries are
321 * more likely cache hot when they are reallocated. 321 * more likely cache hot when they are reallocated.
322 */ 322 */
323 spin_lock_irqsave(&free_entries_lock, flags); 323 spin_lock_irqsave(&free_entries_lock, flags);
324 list_add(&entry->list, &free_entries); 324 list_add(&entry->list, &free_entries);
325 num_free_entries += 1; 325 num_free_entries += 1;
326 spin_unlock_irqrestore(&free_entries_lock, flags); 326 spin_unlock_irqrestore(&free_entries_lock, flags);
327 } 327 }
328 328
329 int dma_debug_resize_entries(u32 num_entries) 329 int dma_debug_resize_entries(u32 num_entries)
330 { 330 {
331 int i, delta, ret = 0; 331 int i, delta, ret = 0;
332 unsigned long flags; 332 unsigned long flags;
333 struct dma_debug_entry *entry; 333 struct dma_debug_entry *entry;
334 LIST_HEAD(tmp); 334 LIST_HEAD(tmp);
335 335
336 spin_lock_irqsave(&free_entries_lock, flags); 336 spin_lock_irqsave(&free_entries_lock, flags);
337 337
338 if (nr_total_entries < num_entries) { 338 if (nr_total_entries < num_entries) {
339 delta = num_entries - nr_total_entries; 339 delta = num_entries - nr_total_entries;
340 340
341 spin_unlock_irqrestore(&free_entries_lock, flags); 341 spin_unlock_irqrestore(&free_entries_lock, flags);
342 342
343 for (i = 0; i < delta; i++) { 343 for (i = 0; i < delta; i++) {
344 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 344 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
345 if (!entry) 345 if (!entry)
346 break; 346 break;
347 347
348 list_add_tail(&entry->list, &tmp); 348 list_add_tail(&entry->list, &tmp);
349 } 349 }
350 350
351 spin_lock_irqsave(&free_entries_lock, flags); 351 spin_lock_irqsave(&free_entries_lock, flags);
352 352
353 list_splice(&tmp, &free_entries); 353 list_splice(&tmp, &free_entries);
354 nr_total_entries += i; 354 nr_total_entries += i;
355 num_free_entries += i; 355 num_free_entries += i;
356 } else { 356 } else {
357 delta = nr_total_entries - num_entries; 357 delta = nr_total_entries - num_entries;
358 358
359 for (i = 0; i < delta && !list_empty(&free_entries); i++) { 359 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
360 entry = __dma_entry_alloc(); 360 entry = __dma_entry_alloc();
361 kfree(entry); 361 kfree(entry);
362 } 362 }
363 363
364 nr_total_entries -= i; 364 nr_total_entries -= i;
365 } 365 }
366 366
367 if (nr_total_entries != num_entries) 367 if (nr_total_entries != num_entries)
368 ret = 1; 368 ret = 1;
369 369
370 spin_unlock_irqrestore(&free_entries_lock, flags); 370 spin_unlock_irqrestore(&free_entries_lock, flags);
371 371
372 return ret; 372 return ret;
373 } 373 }
374 EXPORT_SYMBOL(dma_debug_resize_entries); 374 EXPORT_SYMBOL(dma_debug_resize_entries);
375 375
376 /* 376 /*
377 * DMA-API debugging init code 377 * DMA-API debugging init code
378 * 378 *
379 * The init code does two things: 379 * The init code does two things:
380 * 1. Initialize core data structures 380 * 1. Initialize core data structures
381 * 2. Preallocate a given number of dma_debug_entry structs 381 * 2. Preallocate a given number of dma_debug_entry structs
382 */ 382 */
383 383
384 static int prealloc_memory(u32 num_entries) 384 static int prealloc_memory(u32 num_entries)
385 { 385 {
386 struct dma_debug_entry *entry, *next_entry; 386 struct dma_debug_entry *entry, *next_entry;
387 int i; 387 int i;
388 388
389 for (i = 0; i < num_entries; ++i) { 389 for (i = 0; i < num_entries; ++i) {
390 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 390 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
391 if (!entry) 391 if (!entry)
392 goto out_err; 392 goto out_err;
393 393
394 list_add_tail(&entry->list, &free_entries); 394 list_add_tail(&entry->list, &free_entries);
395 } 395 }
396 396
397 num_free_entries = num_entries; 397 num_free_entries = num_entries;
398 min_free_entries = num_entries; 398 min_free_entries = num_entries;
399 399
400 printk(KERN_INFO "DMA-API: preallocated %d debug entries\n", 400 printk(KERN_INFO "DMA-API: preallocated %d debug entries\n",
401 num_entries); 401 num_entries);
402 402
403 return 0; 403 return 0;
404 404
405 out_err: 405 out_err:
406 406
407 list_for_each_entry_safe(entry, next_entry, &free_entries, list) { 407 list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
408 list_del(&entry->list); 408 list_del(&entry->list);
409 kfree(entry); 409 kfree(entry);
410 } 410 }
411 411
412 return -ENOMEM; 412 return -ENOMEM;
413 } 413 }
414 414
415 static int dma_debug_fs_init(void) 415 static int dma_debug_fs_init(void)
416 { 416 {
417 dma_debug_dent = debugfs_create_dir("dma-api", NULL); 417 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
418 if (!dma_debug_dent) { 418 if (!dma_debug_dent) {
419 printk(KERN_ERR "DMA-API: can not create debugfs directory\n"); 419 printk(KERN_ERR "DMA-API: can not create debugfs directory\n");
420 return -ENOMEM; 420 return -ENOMEM;
421 } 421 }
422 422
423 global_disable_dent = debugfs_create_bool("disabled", 0444, 423 global_disable_dent = debugfs_create_bool("disabled", 0444,
424 dma_debug_dent, 424 dma_debug_dent,
425 (u32 *)&global_disable); 425 (u32 *)&global_disable);
426 if (!global_disable_dent) 426 if (!global_disable_dent)
427 goto out_err; 427 goto out_err;
428 428
429 error_count_dent = debugfs_create_u32("error_count", 0444, 429 error_count_dent = debugfs_create_u32("error_count", 0444,
430 dma_debug_dent, &error_count); 430 dma_debug_dent, &error_count);
431 if (!error_count_dent) 431 if (!error_count_dent)
432 goto out_err; 432 goto out_err;
433 433
434 show_all_errors_dent = debugfs_create_u32("all_errors", 0644, 434 show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
435 dma_debug_dent, 435 dma_debug_dent,
436 &show_all_errors); 436 &show_all_errors);
437 if (!show_all_errors_dent) 437 if (!show_all_errors_dent)
438 goto out_err; 438 goto out_err;
439 439
440 show_num_errors_dent = debugfs_create_u32("num_errors", 0644, 440 show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
441 dma_debug_dent, 441 dma_debug_dent,
442 &show_num_errors); 442 &show_num_errors);
443 if (!show_num_errors_dent) 443 if (!show_num_errors_dent)
444 goto out_err; 444 goto out_err;
445 445
446 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444, 446 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
447 dma_debug_dent, 447 dma_debug_dent,
448 &num_free_entries); 448 &num_free_entries);
449 if (!num_free_entries_dent) 449 if (!num_free_entries_dent)
450 goto out_err; 450 goto out_err;
451 451
452 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444, 452 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
453 dma_debug_dent, 453 dma_debug_dent,
454 &min_free_entries); 454 &min_free_entries);
455 if (!min_free_entries_dent) 455 if (!min_free_entries_dent)
456 goto out_err; 456 goto out_err;
457 457
458 return 0; 458 return 0;
459 459
460 out_err: 460 out_err:
461 debugfs_remove_recursive(dma_debug_dent); 461 debugfs_remove_recursive(dma_debug_dent);
462 462
463 return -ENOMEM; 463 return -ENOMEM;
464 } 464 }
465 465
466 static int device_dma_allocations(struct device *dev) 466 static int device_dma_allocations(struct device *dev)
467 { 467 {
468 struct dma_debug_entry *entry; 468 struct dma_debug_entry *entry;
469 unsigned long flags; 469 unsigned long flags;
470 int count = 0, i; 470 int count = 0, i;
471 471
472 for (i = 0; i < HASH_SIZE; ++i) { 472 for (i = 0; i < HASH_SIZE; ++i) {
473 spin_lock_irqsave(&dma_entry_hash[i].lock, flags); 473 spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
474 list_for_each_entry(entry, &dma_entry_hash[i].list, list) { 474 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
475 if (entry->dev == dev) 475 if (entry->dev == dev)
476 count += 1; 476 count += 1;
477 } 477 }
478 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags); 478 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
479 } 479 }
480 480
481 return count; 481 return count;
482 } 482 }
483 483
484 static int dma_debug_device_change(struct notifier_block *nb, 484 static int dma_debug_device_change(struct notifier_block *nb,
485 unsigned long action, void *data) 485 unsigned long action, void *data)
486 { 486 {
487 struct device *dev = data; 487 struct device *dev = data;
488 int count; 488 int count;
489 489
490 490
491 switch (action) { 491 switch (action) {
492 case BUS_NOTIFY_UNBOUND_DRIVER: 492 case BUS_NOTIFY_UNBOUND_DRIVER:
493 count = device_dma_allocations(dev); 493 count = device_dma_allocations(dev);
494 if (count == 0) 494 if (count == 0)
495 break; 495 break;
496 err_printk(dev, NULL, "DMA-API: device driver has pending " 496 err_printk(dev, NULL, "DMA-API: device driver has pending "
497 "DMA allocations while released from device " 497 "DMA allocations while released from device "
498 "[count=%d]\n", count); 498 "[count=%d]\n", count);
499 break; 499 break;
500 default: 500 default:
501 break; 501 break;
502 } 502 }
503 503
504 return 0; 504 return 0;
505 } 505 }
506 506
507 void dma_debug_add_bus(struct bus_type *bus) 507 void dma_debug_add_bus(struct bus_type *bus)
508 { 508 {
509 struct notifier_block *nb; 509 struct notifier_block *nb;
510 510
511 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); 511 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
512 if (nb == NULL) { 512 if (nb == NULL) {
513 printk(KERN_ERR "dma_debug_add_bus: out of memory\n"); 513 printk(KERN_ERR "dma_debug_add_bus: out of memory\n");
514 return; 514 return;
515 } 515 }
516 516
517 nb->notifier_call = dma_debug_device_change; 517 nb->notifier_call = dma_debug_device_change;
518 518
519 bus_register_notifier(bus, nb); 519 bus_register_notifier(bus, nb);
520 } 520 }
521 521
522 /* 522 /*
523 * Let the architectures decide how many entries should be preallocated. 523 * Let the architectures decide how many entries should be preallocated.
524 */ 524 */
525 void dma_debug_init(u32 num_entries) 525 void dma_debug_init(u32 num_entries)
526 { 526 {
527 int i; 527 int i;
528 528
529 if (global_disable) 529 if (global_disable)
530 return; 530 return;
531 531
532 for (i = 0; i < HASH_SIZE; ++i) { 532 for (i = 0; i < HASH_SIZE; ++i) {
533 INIT_LIST_HEAD(&dma_entry_hash[i].list); 533 INIT_LIST_HEAD(&dma_entry_hash[i].list);
534 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED; 534 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED;
535 } 535 }
536 536
537 if (dma_debug_fs_init() != 0) { 537 if (dma_debug_fs_init() != 0) {
538 printk(KERN_ERR "DMA-API: error creating debugfs entries " 538 printk(KERN_ERR "DMA-API: error creating debugfs entries "
539 "- disabling\n"); 539 "- disabling\n");
540 global_disable = true; 540 global_disable = true;
541 541
542 return; 542 return;
543 } 543 }
544 544
545 if (req_entries) 545 if (req_entries)
546 num_entries = req_entries; 546 num_entries = req_entries;
547 547
548 if (prealloc_memory(num_entries) != 0) { 548 if (prealloc_memory(num_entries) != 0) {
549 printk(KERN_ERR "DMA-API: debugging out of memory error " 549 printk(KERN_ERR "DMA-API: debugging out of memory error "
550 "- disabled\n"); 550 "- disabled\n");
551 global_disable = true; 551 global_disable = true;
552 552
553 return; 553 return;
554 } 554 }
555 555
556 nr_total_entries = num_free_entries; 556 nr_total_entries = num_free_entries;
557 557
558 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n"); 558 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n");
559 } 559 }
560 560
561 static __init int dma_debug_cmdline(char *str) 561 static __init int dma_debug_cmdline(char *str)
562 { 562 {
563 if (!str) 563 if (!str)
564 return -EINVAL; 564 return -EINVAL;
565 565
566 if (strncmp(str, "off", 3) == 0) { 566 if (strncmp(str, "off", 3) == 0) {
567 printk(KERN_INFO "DMA-API: debugging disabled on kernel " 567 printk(KERN_INFO "DMA-API: debugging disabled on kernel "
568 "command line\n"); 568 "command line\n");
569 global_disable = true; 569 global_disable = true;
570 } 570 }
571 571
572 return 0; 572 return 0;
573 } 573 }
574 574
575 static __init int dma_debug_entries_cmdline(char *str) 575 static __init int dma_debug_entries_cmdline(char *str)
576 { 576 {
577 int res; 577 int res;
578 578
579 if (!str) 579 if (!str)
580 return -EINVAL; 580 return -EINVAL;
581 581
582 res = get_option(&str, &req_entries); 582 res = get_option(&str, &req_entries);
583 583
584 if (!res) 584 if (!res)
585 req_entries = 0; 585 req_entries = 0;
586 586
587 return 0; 587 return 0;
588 } 588 }
589 589
590 __setup("dma_debug=", dma_debug_cmdline); 590 __setup("dma_debug=", dma_debug_cmdline);
591 __setup("dma_debug_entries=", dma_debug_entries_cmdline); 591 __setup("dma_debug_entries=", dma_debug_entries_cmdline);
592 592
593 static void check_unmap(struct dma_debug_entry *ref) 593 static void check_unmap(struct dma_debug_entry *ref)
594 { 594 {
595 struct dma_debug_entry *entry; 595 struct dma_debug_entry *entry;
596 struct hash_bucket *bucket; 596 struct hash_bucket *bucket;
597 unsigned long flags; 597 unsigned long flags;
598 598
599 if (dma_mapping_error(ref->dev, ref->dev_addr)) { 599 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
600 err_printk(ref->dev, NULL, "DMA-API: device driver tries " 600 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
601 "to free an invalid DMA memory address\n"); 601 "to free an invalid DMA memory address\n");
602 return; 602 return;
603 } 603 }
604 604
605 bucket = get_hash_bucket(ref, &flags); 605 bucket = get_hash_bucket(ref, &flags);
606 entry = hash_bucket_find(bucket, ref); 606 entry = hash_bucket_find(bucket, ref);
607 607
608 if (!entry) { 608 if (!entry) {
609 err_printk(ref->dev, NULL, "DMA-API: device driver tries " 609 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
610 "to free DMA memory it has not allocated " 610 "to free DMA memory it has not allocated "
611 "[device address=0x%016llx] [size=%llu bytes]\n", 611 "[device address=0x%016llx] [size=%llu bytes]\n",
612 ref->dev_addr, ref->size); 612 ref->dev_addr, ref->size);
613 goto out; 613 goto out;
614 } 614 }
615 615
616 if (ref->size != entry->size) { 616 if (ref->size != entry->size) {
617 err_printk(ref->dev, entry, "DMA-API: device driver frees " 617 err_printk(ref->dev, entry, "DMA-API: device driver frees "
618 "DMA memory with different size " 618 "DMA memory with different size "
619 "[device address=0x%016llx] [map size=%llu bytes] " 619 "[device address=0x%016llx] [map size=%llu bytes] "
620 "[unmap size=%llu bytes]\n", 620 "[unmap size=%llu bytes]\n",
621 ref->dev_addr, entry->size, ref->size); 621 ref->dev_addr, entry->size, ref->size);
622 } 622 }
623 623
624 if (ref->type != entry->type) { 624 if (ref->type != entry->type) {
625 err_printk(ref->dev, entry, "DMA-API: device driver frees " 625 err_printk(ref->dev, entry, "DMA-API: device driver frees "
626 "DMA memory with wrong function " 626 "DMA memory with wrong function "
627 "[device address=0x%016llx] [size=%llu bytes] " 627 "[device address=0x%016llx] [size=%llu bytes] "
628 "[mapped as %s] [unmapped as %s]\n", 628 "[mapped as %s] [unmapped as %s]\n",
629 ref->dev_addr, ref->size, 629 ref->dev_addr, ref->size,
630 type2name[entry->type], type2name[ref->type]); 630 type2name[entry->type], type2name[ref->type]);
631 } else if ((entry->type == dma_debug_coherent) && 631 } else if ((entry->type == dma_debug_coherent) &&
632 (ref->paddr != entry->paddr)) { 632 (ref->paddr != entry->paddr)) {
633 err_printk(ref->dev, entry, "DMA-API: device driver frees " 633 err_printk(ref->dev, entry, "DMA-API: device driver frees "
634 "DMA memory with different CPU address " 634 "DMA memory with different CPU address "
635 "[device address=0x%016llx] [size=%llu bytes] " 635 "[device address=0x%016llx] [size=%llu bytes] "
636 "[cpu alloc address=%p] [cpu free address=%p]", 636 "[cpu alloc address=%p] [cpu free address=%p]",
637 ref->dev_addr, ref->size, 637 ref->dev_addr, ref->size,
638 (void *)entry->paddr, (void *)ref->paddr); 638 (void *)entry->paddr, (void *)ref->paddr);
639 } 639 }
640 640
641 if (ref->sg_call_ents && ref->type == dma_debug_sg && 641 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
642 ref->sg_call_ents != entry->sg_call_ents) { 642 ref->sg_call_ents != entry->sg_call_ents) {
643 err_printk(ref->dev, entry, "DMA-API: device driver frees " 643 err_printk(ref->dev, entry, "DMA-API: device driver frees "
644 "DMA sg list with different entry count " 644 "DMA sg list with different entry count "
645 "[map count=%d] [unmap count=%d]\n", 645 "[map count=%d] [unmap count=%d]\n",
646 entry->sg_call_ents, ref->sg_call_ents); 646 entry->sg_call_ents, ref->sg_call_ents);
647 } 647 }
648 648
649 /* 649 /*
650 * This may be no bug in reality - but most implementations of the 650 * This may be no bug in reality - but most implementations of the
651 * DMA API don't handle this properly, so check for it here 651 * DMA API don't handle this properly, so check for it here
652 */ 652 */
653 if (ref->direction != entry->direction) { 653 if (ref->direction != entry->direction) {
654 err_printk(ref->dev, entry, "DMA-API: device driver frees " 654 err_printk(ref->dev, entry, "DMA-API: device driver frees "
655 "DMA memory with different direction " 655 "DMA memory with different direction "
656 "[device address=0x%016llx] [size=%llu bytes] " 656 "[device address=0x%016llx] [size=%llu bytes] "
657 "[mapped with %s] [unmapped with %s]\n", 657 "[mapped with %s] [unmapped with %s]\n",
658 ref->dev_addr, ref->size, 658 ref->dev_addr, ref->size,
659 dir2name[entry->direction], 659 dir2name[entry->direction],
660 dir2name[ref->direction]); 660 dir2name[ref->direction]);
661 } 661 }
662 662
663 hash_bucket_del(entry); 663 hash_bucket_del(entry);
664 dma_entry_free(entry); 664 dma_entry_free(entry);
665 665
666 out: 666 out:
667 put_hash_bucket(bucket, &flags); 667 put_hash_bucket(bucket, &flags);
668 } 668 }
669 669
670 static void check_for_stack(struct device *dev, void *addr) 670 static void check_for_stack(struct device *dev, void *addr)
671 { 671 {
672 if (object_is_on_stack(addr)) 672 if (object_is_on_stack(addr))
673 err_printk(dev, NULL, "DMA-API: device driver maps memory from" 673 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
674 "stack [addr=%p]\n", addr); 674 "stack [addr=%p]\n", addr);
675 } 675 }
676 676
677 static inline bool overlap(void *addr, u64 size, void *start, void *end) 677 static inline bool overlap(void *addr, u64 size, void *start, void *end)
678 { 678 {
679 void *addr2 = (char *)addr + size; 679 void *addr2 = (char *)addr + size;
680 680
681 return ((addr >= start && addr < end) || 681 return ((addr >= start && addr < end) ||
682 (addr2 >= start && addr2 < end) || 682 (addr2 >= start && addr2 < end) ||
683 ((addr < start) && (addr2 >= end))); 683 ((addr < start) && (addr2 >= end)));
684 } 684 }
685 685
686 static void check_for_illegal_area(struct device *dev, void *addr, u64 size) 686 static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
687 { 687 {
688 if (overlap(addr, size, _text, _etext) || 688 if (overlap(addr, size, _text, _etext) ||
689 overlap(addr, size, __start_rodata, __end_rodata)) 689 overlap(addr, size, __start_rodata, __end_rodata))
690 err_printk(dev, NULL, "DMA-API: device driver maps " 690 err_printk(dev, NULL, "DMA-API: device driver maps "
691 "memory from kernel text or rodata " 691 "memory from kernel text or rodata "
692 "[addr=%p] [size=%llu]\n", addr, size); 692 "[addr=%p] [size=%llu]\n", addr, size);
693 } 693 }
694 694
695 static void check_sync(struct device *dev, dma_addr_t addr, 695 static void check_sync(struct device *dev, dma_addr_t addr,
696 u64 size, u64 offset, int direction, bool to_cpu) 696 u64 size, u64 offset, int direction, bool to_cpu)
697 { 697 {
698 struct dma_debug_entry ref = { 698 struct dma_debug_entry ref = {
699 .dev = dev, 699 .dev = dev,
700 .dev_addr = addr, 700 .dev_addr = addr,
701 .size = size, 701 .size = size,
702 .direction = direction, 702 .direction = direction,
703 }; 703 };
704 struct dma_debug_entry *entry; 704 struct dma_debug_entry *entry;
705 struct hash_bucket *bucket; 705 struct hash_bucket *bucket;
706 unsigned long flags; 706 unsigned long flags;
707 707
708 bucket = get_hash_bucket(&ref, &flags); 708 bucket = get_hash_bucket(&ref, &flags);
709 709
710 entry = hash_bucket_find(bucket, &ref); 710 entry = hash_bucket_find(bucket, &ref);
711 711
712 if (!entry) { 712 if (!entry) {
713 err_printk(dev, NULL, "DMA-API: device driver tries " 713 err_printk(dev, NULL, "DMA-API: device driver tries "
714 "to sync DMA memory it has not allocated " 714 "to sync DMA memory it has not allocated "
715 "[device address=0x%016llx] [size=%llu bytes]\n", 715 "[device address=0x%016llx] [size=%llu bytes]\n",
716 (unsigned long long)addr, size); 716 (unsigned long long)addr, size);
717 goto out; 717 goto out;
718 } 718 }
719 719
720 if ((offset + size) > entry->size) { 720 if ((offset + size) > entry->size) {
721 err_printk(dev, entry, "DMA-API: device driver syncs" 721 err_printk(dev, entry, "DMA-API: device driver syncs"
722 " DMA memory outside allocated range " 722 " DMA memory outside allocated range "
723 "[device address=0x%016llx] " 723 "[device address=0x%016llx] "
724 "[allocation size=%llu bytes] [sync offset=%llu] " 724 "[allocation size=%llu bytes] [sync offset=%llu] "
725 "[sync size=%llu]\n", entry->dev_addr, entry->size, 725 "[sync size=%llu]\n", entry->dev_addr, entry->size,
726 offset, size); 726 offset, size);
727 } 727 }
728 728
729 if (direction != entry->direction) { 729 if (direction != entry->direction) {
730 err_printk(dev, entry, "DMA-API: device driver syncs " 730 err_printk(dev, entry, "DMA-API: device driver syncs "
731 "DMA memory with different direction " 731 "DMA memory with different direction "
732 "[device address=0x%016llx] [size=%llu bytes] " 732 "[device address=0x%016llx] [size=%llu bytes] "
733 "[mapped with %s] [synced with %s]\n", 733 "[mapped with %s] [synced with %s]\n",
734 (unsigned long long)addr, entry->size, 734 (unsigned long long)addr, entry->size,
735 dir2name[entry->direction], 735 dir2name[entry->direction],
736 dir2name[direction]); 736 dir2name[direction]);
737 } 737 }
738 738
739 if (entry->direction == DMA_BIDIRECTIONAL) 739 if (entry->direction == DMA_BIDIRECTIONAL)
740 goto out; 740 goto out;
741 741
742 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && 742 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
743 !(direction == DMA_TO_DEVICE)) 743 !(direction == DMA_TO_DEVICE))
744 err_printk(dev, entry, "DMA-API: device driver syncs " 744 err_printk(dev, entry, "DMA-API: device driver syncs "
745 "device read-only DMA memory for cpu " 745 "device read-only DMA memory for cpu "
746 "[device address=0x%016llx] [size=%llu bytes] " 746 "[device address=0x%016llx] [size=%llu bytes] "
747 "[mapped with %s] [synced with %s]\n", 747 "[mapped with %s] [synced with %s]\n",
748 (unsigned long long)addr, entry->size, 748 (unsigned long long)addr, entry->size,
749 dir2name[entry->direction], 749 dir2name[entry->direction],
750 dir2name[direction]); 750 dir2name[direction]);
751 751
752 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && 752 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
753 !(direction == DMA_FROM_DEVICE)) 753 !(direction == DMA_FROM_DEVICE))
754 err_printk(dev, entry, "DMA-API: device driver syncs " 754 err_printk(dev, entry, "DMA-API: device driver syncs "
755 "device write-only DMA memory to device " 755 "device write-only DMA memory to device "
756 "[device address=0x%016llx] [size=%llu bytes] " 756 "[device address=0x%016llx] [size=%llu bytes] "
757 "[mapped with %s] [synced with %s]\n", 757 "[mapped with %s] [synced with %s]\n",
758 (unsigned long long)addr, entry->size, 758 (unsigned long long)addr, entry->size,
759 dir2name[entry->direction], 759 dir2name[entry->direction],
760 dir2name[direction]); 760 dir2name[direction]);
761 761
762 out: 762 out:
763 put_hash_bucket(bucket, &flags); 763 put_hash_bucket(bucket, &flags);
764 764
765 } 765 }
766 766
767 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, 767 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
768 size_t size, int direction, dma_addr_t dma_addr, 768 size_t size, int direction, dma_addr_t dma_addr,
769 bool map_single) 769 bool map_single)
770 { 770 {
771 struct dma_debug_entry *entry; 771 struct dma_debug_entry *entry;
772 772
773 if (unlikely(global_disable)) 773 if (unlikely(global_disable))
774 return; 774 return;
775 775
776 if (unlikely(dma_mapping_error(dev, dma_addr))) 776 if (unlikely(dma_mapping_error(dev, dma_addr)))
777 return; 777 return;
778 778
779 entry = dma_entry_alloc(); 779 entry = dma_entry_alloc();
780 if (!entry) 780 if (!entry)
781 return; 781 return;
782 782
783 entry->dev = dev; 783 entry->dev = dev;
784 entry->type = dma_debug_page; 784 entry->type = dma_debug_page;
785 entry->paddr = page_to_phys(page) + offset; 785 entry->paddr = page_to_phys(page) + offset;
786 entry->dev_addr = dma_addr; 786 entry->dev_addr = dma_addr;
787 entry->size = size; 787 entry->size = size;
788 entry->direction = direction; 788 entry->direction = direction;
789 789
790 if (map_single) 790 if (map_single)
791 entry->type = dma_debug_single; 791 entry->type = dma_debug_single;
792 792
793 if (!PageHighMem(page)) { 793 if (!PageHighMem(page)) {
794 void *addr = ((char *)page_address(page)) + offset; 794 void *addr = ((char *)page_address(page)) + offset;
795 check_for_stack(dev, addr); 795 check_for_stack(dev, addr);
796 check_for_illegal_area(dev, addr, size); 796 check_for_illegal_area(dev, addr, size);
797 } 797 }
798 798
799 add_dma_entry(entry); 799 add_dma_entry(entry);
800 } 800 }
801 EXPORT_SYMBOL(debug_dma_map_page); 801 EXPORT_SYMBOL(debug_dma_map_page);
802 802
803 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, 803 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
804 size_t size, int direction, bool map_single) 804 size_t size, int direction, bool map_single)
805 { 805 {
806 struct dma_debug_entry ref = { 806 struct dma_debug_entry ref = {
807 .type = dma_debug_page, 807 .type = dma_debug_page,
808 .dev = dev, 808 .dev = dev,
809 .dev_addr = addr, 809 .dev_addr = addr,
810 .size = size, 810 .size = size,
811 .direction = direction, 811 .direction = direction,
812 }; 812 };
813 813
814 if (unlikely(global_disable)) 814 if (unlikely(global_disable))
815 return; 815 return;
816 816
817 if (map_single) 817 if (map_single)
818 ref.type = dma_debug_single; 818 ref.type = dma_debug_single;
819 819
820 check_unmap(&ref); 820 check_unmap(&ref);
821 } 821 }
822 EXPORT_SYMBOL(debug_dma_unmap_page); 822 EXPORT_SYMBOL(debug_dma_unmap_page);
823 823
824 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, 824 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
825 int nents, int mapped_ents, int direction) 825 int nents, int mapped_ents, int direction)
826 { 826 {
827 struct dma_debug_entry *entry; 827 struct dma_debug_entry *entry;
828 struct scatterlist *s; 828 struct scatterlist *s;
829 int i; 829 int i;
830 830
831 if (unlikely(global_disable)) 831 if (unlikely(global_disable))
832 return; 832 return;
833 833
834 for_each_sg(sg, s, mapped_ents, i) { 834 for_each_sg(sg, s, mapped_ents, i) {
835 entry = dma_entry_alloc(); 835 entry = dma_entry_alloc();
836 if (!entry) 836 if (!entry)
837 return; 837 return;
838 838
839 entry->type = dma_debug_sg; 839 entry->type = dma_debug_sg;
840 entry->dev = dev; 840 entry->dev = dev;
841 entry->paddr = sg_phys(s); 841 entry->paddr = sg_phys(s);
842 entry->size = sg_dma_len(s); 842 entry->size = sg_dma_len(s);
843 entry->dev_addr = sg_dma_address(s); 843 entry->dev_addr = sg_dma_address(s);
844 entry->direction = direction; 844 entry->direction = direction;
845 entry->sg_call_ents = nents; 845 entry->sg_call_ents = nents;
846 entry->sg_mapped_ents = mapped_ents; 846 entry->sg_mapped_ents = mapped_ents;
847 847
848 if (!PageHighMem(sg_page(s))) { 848 if (!PageHighMem(sg_page(s))) {
849 check_for_stack(dev, sg_virt(s)); 849 check_for_stack(dev, sg_virt(s));
850 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s)); 850 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
851 } 851 }
852 852
853 add_dma_entry(entry); 853 add_dma_entry(entry);
854 } 854 }
855 } 855 }
856 EXPORT_SYMBOL(debug_dma_map_sg); 856 EXPORT_SYMBOL(debug_dma_map_sg);
857 857
858 static int get_nr_mapped_entries(struct device *dev, struct scatterlist *s)
859 {
860 struct dma_debug_entry *entry;
861 struct hash_bucket *bucket;
862 unsigned long flags;
863 int mapped_ents = 0;
864 struct dma_debug_entry ref;
865
866 ref.dev = dev;
867 ref.dev_addr = sg_dma_address(s);
868 ref.size = sg_dma_len(s),
869
870 bucket = get_hash_bucket(&ref, &flags);
871 entry = hash_bucket_find(bucket, &ref);
872 if (entry)
873 mapped_ents = entry->sg_mapped_ents;
874 put_hash_bucket(bucket, &flags);
875
876 return mapped_ents;
877 }
878
858 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, 879 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
859 int nelems, int dir) 880 int nelems, int dir)
860 { 881 {
861 struct dma_debug_entry *entry;
862 struct scatterlist *s; 882 struct scatterlist *s;
863 int mapped_ents = 0, i; 883 int mapped_ents = 0, i;
864 unsigned long flags;
865 884
866 if (unlikely(global_disable)) 885 if (unlikely(global_disable))
867 return; 886 return;
868 887
869 for_each_sg(sglist, s, nelems, i) { 888 for_each_sg(sglist, s, nelems, i) {
870 889
871 struct dma_debug_entry ref = { 890 struct dma_debug_entry ref = {
872 .type = dma_debug_sg, 891 .type = dma_debug_sg,
873 .dev = dev, 892 .dev = dev,
874 .paddr = sg_phys(s), 893 .paddr = sg_phys(s),
875 .dev_addr = sg_dma_address(s), 894 .dev_addr = sg_dma_address(s),
876 .size = sg_dma_len(s), 895 .size = sg_dma_len(s),
877 .direction = dir, 896 .direction = dir,
878 .sg_call_ents = 0, 897 .sg_call_ents = 0,
879 }; 898 };
880 899
881 if (mapped_ents && i >= mapped_ents) 900 if (mapped_ents && i >= mapped_ents)
882 break; 901 break;
883 902
884 if (mapped_ents == 0) { 903 if (!i) {
885 struct hash_bucket *bucket;
886 ref.sg_call_ents = nelems; 904 ref.sg_call_ents = nelems;
887 bucket = get_hash_bucket(&ref, &flags); 905 mapped_ents = get_nr_mapped_entries(dev, s);
888 entry = hash_bucket_find(bucket, &ref);
889 if (entry)
890 mapped_ents = entry->sg_mapped_ents;
891 put_hash_bucket(bucket, &flags);
892 } 906 }
893 907
894 check_unmap(&ref); 908 check_unmap(&ref);
895 } 909 }
896 } 910 }
897 EXPORT_SYMBOL(debug_dma_unmap_sg); 911 EXPORT_SYMBOL(debug_dma_unmap_sg);
898 912
899 void debug_dma_alloc_coherent(struct device *dev, size_t size, 913 void debug_dma_alloc_coherent(struct device *dev, size_t size,
900 dma_addr_t dma_addr, void *virt) 914 dma_addr_t dma_addr, void *virt)
901 { 915 {
902 struct dma_debug_entry *entry; 916 struct dma_debug_entry *entry;
903 917
904 if (unlikely(global_disable)) 918 if (unlikely(global_disable))
905 return; 919 return;
906 920
907 if (unlikely(virt == NULL)) 921 if (unlikely(virt == NULL))
908 return; 922 return;
909 923
910 entry = dma_entry_alloc(); 924 entry = dma_entry_alloc();
911 if (!entry) 925 if (!entry)
912 return; 926 return;
913 927
914 entry->type = dma_debug_coherent; 928 entry->type = dma_debug_coherent;
915 entry->dev = dev; 929 entry->dev = dev;
916 entry->paddr = virt_to_phys(virt); 930 entry->paddr = virt_to_phys(virt);
917 entry->size = size; 931 entry->size = size;
918 entry->dev_addr = dma_addr; 932 entry->dev_addr = dma_addr;
919 entry->direction = DMA_BIDIRECTIONAL; 933 entry->direction = DMA_BIDIRECTIONAL;
920 934
921 add_dma_entry(entry); 935 add_dma_entry(entry);
922 } 936 }
923 EXPORT_SYMBOL(debug_dma_alloc_coherent); 937 EXPORT_SYMBOL(debug_dma_alloc_coherent);
924 938
925 void debug_dma_free_coherent(struct device *dev, size_t size, 939 void debug_dma_free_coherent(struct device *dev, size_t size,
926 void *virt, dma_addr_t addr) 940 void *virt, dma_addr_t addr)
927 { 941 {
928 struct dma_debug_entry ref = { 942 struct dma_debug_entry ref = {
929 .type = dma_debug_coherent, 943 .type = dma_debug_coherent,
930 .dev = dev, 944 .dev = dev,
931 .paddr = virt_to_phys(virt), 945 .paddr = virt_to_phys(virt),
932 .dev_addr = addr, 946 .dev_addr = addr,
933 .size = size, 947 .size = size,
934 .direction = DMA_BIDIRECTIONAL, 948 .direction = DMA_BIDIRECTIONAL,
935 }; 949 };
936 950
937 if (unlikely(global_disable)) 951 if (unlikely(global_disable))
938 return; 952 return;
939 953
940 check_unmap(&ref); 954 check_unmap(&ref);
941 } 955 }
942 EXPORT_SYMBOL(debug_dma_free_coherent); 956 EXPORT_SYMBOL(debug_dma_free_coherent);
943 957
944 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 958 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
945 size_t size, int direction) 959 size_t size, int direction)
946 { 960 {
947 if (unlikely(global_disable)) 961 if (unlikely(global_disable))
948 return; 962 return;
949 963
950 check_sync(dev, dma_handle, size, 0, direction, true); 964 check_sync(dev, dma_handle, size, 0, direction, true);
951 } 965 }
952 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); 966 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
953 967
954 void debug_dma_sync_single_for_device(struct device *dev, 968 void debug_dma_sync_single_for_device(struct device *dev,
955 dma_addr_t dma_handle, size_t size, 969 dma_addr_t dma_handle, size_t size,
956 int direction) 970 int direction)
957 { 971 {
958 if (unlikely(global_disable)) 972 if (unlikely(global_disable))
959 return; 973 return;
960 974
961 check_sync(dev, dma_handle, size, 0, direction, false); 975 check_sync(dev, dma_handle, size, 0, direction, false);
962 } 976 }
963 EXPORT_SYMBOL(debug_dma_sync_single_for_device); 977 EXPORT_SYMBOL(debug_dma_sync_single_for_device);
964 978
965 void debug_dma_sync_single_range_for_cpu(struct device *dev, 979 void debug_dma_sync_single_range_for_cpu(struct device *dev,
966 dma_addr_t dma_handle, 980 dma_addr_t dma_handle,
967 unsigned long offset, size_t size, 981 unsigned long offset, size_t size,
968 int direction) 982 int direction)
969 { 983 {
970 if (unlikely(global_disable)) 984 if (unlikely(global_disable))
971 return; 985 return;
972 986
973 check_sync(dev, dma_handle, size, offset, direction, true); 987 check_sync(dev, dma_handle, size, offset, direction, true);
974 } 988 }
975 EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); 989 EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
976 990
977 void debug_dma_sync_single_range_for_device(struct device *dev, 991 void debug_dma_sync_single_range_for_device(struct device *dev,
978 dma_addr_t dma_handle, 992 dma_addr_t dma_handle,
979 unsigned long offset, 993 unsigned long offset,
980 size_t size, int direction) 994 size_t size, int direction)
981 { 995 {
982 if (unlikely(global_disable)) 996 if (unlikely(global_disable))
983 return; 997 return;
984 998
985 check_sync(dev, dma_handle, size, offset, direction, false); 999 check_sync(dev, dma_handle, size, offset, direction, false);
986 } 1000 }
987 EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); 1001 EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
988 1002
989 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 1003 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
990 int nelems, int direction) 1004 int nelems, int direction)
991 { 1005 {
992 struct scatterlist *s; 1006 struct scatterlist *s;
993 int i; 1007 int mapped_ents = 0, i;
994 1008
995 if (unlikely(global_disable)) 1009 if (unlikely(global_disable))
996 return; 1010 return;
997 1011
998 for_each_sg(sg, s, nelems, i) { 1012 for_each_sg(sg, s, nelems, i) {
1013 if (!i)
1014 mapped_ents = get_nr_mapped_entries(dev, s);
1015
1016 if (i >= mapped_ents)
1017 break;
1018
999 check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0, 1019 check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0,
1000 direction, true); 1020 direction, true);
1001 } 1021 }
1002 } 1022 }
1003 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); 1023 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
1004 1024
1005 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1025 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1006 int nelems, int direction) 1026 int nelems, int direction)
1007 { 1027 {
1008 struct scatterlist *s; 1028 struct scatterlist *s;
1009 int i; 1029 int mapped_ents = 0, i;
1010 1030
1011 if (unlikely(global_disable)) 1031 if (unlikely(global_disable))
1012 return; 1032 return;
1013 1033
1014 for_each_sg(sg, s, nelems, i) { 1034 for_each_sg(sg, s, nelems, i) {
1035 if (!i)
1036 mapped_ents = get_nr_mapped_entries(dev, s);
1037
1038 if (i >= mapped_ents)
1039 break;
1040