Blame view
kernel/dma/debug.c
43.2 KB
450515395 treewide: Replace... |
1 |
// SPDX-License-Identifier: GPL-2.0-only |
f2f45e5f3 dma-debug: add he... |
2 3 4 5 |
/* * Copyright (C) 2008 Advanced Micro Devices, Inc. * * Author: Joerg Roedel <joerg.roedel@amd.com> |
f2f45e5f3 dma-debug: add he... |
6 |
*/ |
f737b095c dma-debug: Use pr... |
7 |
#define pr_fmt(fmt) "DMA-API: " fmt |
68db0cf10 sched/headers: Pr... |
8 |
#include <linux/sched/task_stack.h> |
972aa45ce dma-debug: add ad... |
9 |
#include <linux/scatterlist.h> |
2d62ece14 dma-debug: add co... |
10 |
#include <linux/dma-mapping.h> |
299300258 sched/headers: Pr... |
11 |
#include <linux/sched/task.h> |
6c132d1bc dma-debug: print ... |
12 |
#include <linux/stacktrace.h> |
f2f45e5f3 dma-debug: add he... |
13 |
#include <linux/dma-debug.h> |
30dfa90cc dma-debug: add ha... |
14 |
#include <linux/spinlock.h> |
b4a0f533e dma-api: Teach th... |
15 |
#include <linux/vmalloc.h> |
788dcfa6f dma-debug: add de... |
16 |
#include <linux/debugfs.h> |
8a6fc708b dma-debug: add de... |
17 |
#include <linux/uaccess.h> |
23a7bfae6 lib: dma-debug ne... |
18 |
#include <linux/export.h> |
2d62ece14 dma-debug: add co... |
19 |
#include <linux/device.h> |
f2f45e5f3 dma-debug: add he... |
20 |
#include <linux/types.h> |
2d62ece14 dma-debug: add co... |
21 |
#include <linux/sched.h> |
8a6fc708b dma-debug: add de... |
22 |
#include <linux/ctype.h> |
f2f45e5f3 dma-debug: add he... |
23 |
#include <linux/list.h> |
6bf078715 dma-debug: add in... |
24 |
#include <linux/slab.h> |
f2f45e5f3 dma-debug: add he... |
25 |
|
2e34bde18 dma-debug: add ch... |
26 |
#include <asm/sections.h> |
30dfa90cc dma-debug: add ha... |
27 28 29 |
#define HASH_SIZE 1024ULL #define HASH_FN_SHIFT 13 #define HASH_FN_MASK (HASH_SIZE - 1) |
15b28bbcd dma-debug: move i... |
30 |
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) |
2b9d9ac02 dma-debug: Dynami... |
31 |
/* If the pool runs out, add this many new entries at once */ |
ad78dee0b dma-debug: Batch ... |
32 |
#define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry)) |
15b28bbcd dma-debug: move i... |
33 |
|
f2f45e5f3 dma-debug: add he... |
34 35 |
enum { dma_debug_single, |
f2f45e5f3 dma-debug: add he... |
36 37 |
dma_debug_sg, dma_debug_coherent, |
0e74b34df dma-debug: add su... |
38 |
dma_debug_resource, |
f2f45e5f3 dma-debug: add he... |
39 |
}; |
6c9c6d630 dma-debug: New in... |
40 41 42 43 44 |
enum map_err_types { MAP_ERR_CHECK_NOT_APPLICABLE, MAP_ERR_NOT_CHECKED, MAP_ERR_CHECKED, }; |
6c132d1bc dma-debug: print ... |
45 |
#define DMA_DEBUG_STACKTRACE_ENTRIES 5 |
0abdd7a81 dma-debug: introd... |
46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
/** * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping * @list: node on pre-allocated free_entries list * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent * @type: single, page, sg, coherent * @pfn: page frame of the start address * @offset: offset of mapping relative to pfn * @size: length of the mapping * @direction: enum dma_data_direction * @sg_call_ents: 'nents' from dma_map_sg * @sg_mapped_ents: 'mapped_ents' from dma_map_sg * @map_err_type: track whether dma_mapping_error() was checked * @stacktrace: support backtraces when a violation is detected */ |
f2f45e5f3 dma-debug: add he... |
60 61 62 63 |
struct dma_debug_entry { struct list_head list; struct device *dev; int type; |
0abdd7a81 dma-debug: introd... |
64 65 |
unsigned long pfn; size_t offset; |
f2f45e5f3 dma-debug: add he... |
66 67 68 69 70 |
u64 dev_addr; u64 size; int direction; int sg_call_ents; int sg_mapped_ents; |
6c9c6d630 dma-debug: New in... |
71 |
enum map_err_types map_err_type; |
6c132d1bc dma-debug: print ... |
72 |
#ifdef CONFIG_STACKTRACE |
746017ed8 dma/debug: Simpli... |
73 74 |
unsigned int stack_len; unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; |
6c132d1bc dma-debug: print ... |
75 |
#endif |
f2f45e5f3 dma-debug: add he... |
76 |
}; |
c6a21d0b8 dma-debug: hash_b... |
77 |
typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *); |
30dfa90cc dma-debug: add ha... |
78 79 80 |
struct hash_bucket { struct list_head list; spinlock_t lock; |
2d62ece14 dma-debug: add co... |
81 |
} ____cacheline_aligned_in_smp; |
30dfa90cc dma-debug: add ha... |
82 83 84 |
/* Hash list to save the allocated dma addresses */ static struct hash_bucket dma_entry_hash[HASH_SIZE]; |
3b1e79ed7 dma-debug: add al... |
85 86 87 88 89 90 |
/* List of pre-allocated dma_debug_entry's */ static LIST_HEAD(free_entries); /* Lock for the list above */ static DEFINE_SPINLOCK(free_entries_lock); /* Global disable flag - will be set in case of an error */ |
621a5f7ad debugfs: Pass boo... |
91 |
static bool global_disable __read_mostly; |
3b1e79ed7 dma-debug: add al... |
92 |
|
2ce8e7ed0 dma-debug: preven... |
93 94 |
/* Early initialization disable flag, set at the end of dma_debug_init */ static bool dma_debug_initialized __read_mostly; |
01ce18b31 dma-debug: introd... |
95 96 |
static inline bool dma_debug_disabled(void) { |
2ce8e7ed0 dma-debug: preven... |
97 |
return global_disable || !dma_debug_initialized; |
01ce18b31 dma-debug: introd... |
98 |
} |
788dcfa6f dma-debug: add de... |
99 100 101 102 103 104 105 |
/* Global error count */ static u32 error_count; /* Global error show enable*/ static u32 show_all_errors __read_mostly; /* Number of errors to show */ static u32 show_num_errors = 1; |
3b1e79ed7 dma-debug: add al... |
106 107 |
static u32 num_free_entries; static u32 min_free_entries; |
e6a1a89d5 dma-debug: add dm... |
108 |
static u32 nr_total_entries; |
30dfa90cc dma-debug: add ha... |
109 |
|
59d3daafa dma-debug: add ke... |
110 |
/* number of preallocated entries requested by kernel cmdline */ |
bcebe324c dma-debug: simpli... |
111 |
static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; |
59d3daafa dma-debug: add ke... |
112 |
|
2e507d849 dma-debug: add va... |
113 114 115 116 117 118 119 120 |
/* per-driver filter related state */ #define NAME_MAX_LEN 64 static char current_driver_name[NAME_MAX_LEN] __read_mostly; static struct device_driver *current_driver __read_mostly; static DEFINE_RWLOCK(driver_name_lock); |
788dcfa6f dma-debug: add de... |
121 |
|
6c9c6d630 dma-debug: New in... |
122 123 124 125 126 |
static const char *const maperr2str[] = { [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable", [MAP_ERR_NOT_CHECKED] = "dma map error not checked", [MAP_ERR_CHECKED] = "dma map error checked", }; |
f09387468 dma-debug: fix di... |
127 128 129 130 131 132 |
static const char *type2name[] = { [dma_debug_single] = "single", [dma_debug_sg] = "scather-gather", [dma_debug_coherent] = "coherent", [dma_debug_resource] = "resource", }; |
2d62ece14 dma-debug: add co... |
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", "DMA_FROM_DEVICE", "DMA_NONE" }; /* * The access to some variables in this macro is racy. We can't use atomic_t * here because all these variables are exported to debugfs. Some of them even * writeable. This is also the reason why a lock won't help much. But anyway, * the races are no big deal. Here is why: * * error_count: the addition is racy, but the worst thing that can happen is * that we don't count some errors * show_num_errors: the subtraction is racy. Also no big deal because in * worst case this will result in one warning more in the * system log than the user configured. This variable is * writeable via debugfs. */ |
6c132d1bc dma-debug: print ... |
150 151 152 153 |
static inline void dump_entry_trace(struct dma_debug_entry *entry) { #ifdef CONFIG_STACKTRACE if (entry) { |
e7ed70eed dma-debug: use pr... |
154 155 |
pr_warning("Mapped at: "); |
746017ed8 dma/debug: Simpli... |
156 |
stack_trace_print(entry->stack_entries, entry->stack_len, 0); |
6c132d1bc dma-debug: print ... |
157 158 159 |
} #endif } |
2e507d849 dma-debug: add va... |
160 161 |
static bool driver_filter(struct device *dev) { |
0bf841281 dma-debug: simpli... |
162 163 164 |
struct device_driver *drv; unsigned long flags; bool ret; |
2e507d849 dma-debug: add va... |
165 166 167 168 169 |
/* driver filter off */ if (likely(!current_driver_name[0])) return true; /* driver filter on and initialized */ |
ec9c96ef3 dma-debug: Fix ch... |
170 |
if (current_driver && dev && dev->driver == current_driver) |
2e507d849 dma-debug: add va... |
171 |
return true; |
ec9c96ef3 dma-debug: Fix ch... |
172 173 174 |
/* driver filter on, but we can't filter on a NULL device... */ if (!dev) return false; |
0bf841281 dma-debug: simpli... |
175 176 |
if (current_driver || !current_driver_name[0]) return false; |
2e507d849 dma-debug: add va... |
177 |
|
0bf841281 dma-debug: simpli... |
178 |
/* driver filter on but not yet initialized */ |
f3ff92470 Remove useless ge... |
179 |
drv = dev->driver; |
0bf841281 dma-debug: simpli... |
180 181 182 183 184 185 186 187 188 189 190 |
if (!drv) return false; /* lock to protect against change of current_driver_name */ read_lock_irqsave(&driver_name_lock, flags); ret = false; if (drv->name && strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) { current_driver = drv; ret = true; |
2e507d849 dma-debug: add va... |
191 |
} |
0bf841281 dma-debug: simpli... |
192 |
read_unlock_irqrestore(&driver_name_lock, flags); |
0bf841281 dma-debug: simpli... |
193 194 |
return ret; |
2e507d849 dma-debug: add va... |
195 |
} |
ec9c96ef3 dma-debug: Fix ch... |
196 197 198 199 |
#define err_printk(dev, entry, format, arg...) do { \ error_count += 1; \ if (driver_filter(dev) && \ (show_all_errors || show_num_errors > 0)) { \ |
f737b095c dma-debug: Use pr... |
200 |
WARN(1, pr_fmt("%s %s: ") format, \ |
ec9c96ef3 dma-debug: Fix ch... |
201 202 203 204 205 206 |
dev ? dev_driver_string(dev) : "NULL", \ dev ? dev_name(dev) : "NULL", ## arg); \ dump_entry_trace(entry); \ } \ if (!show_all_errors && show_num_errors > 0) \ show_num_errors -= 1; \ |
2d62ece14 dma-debug: add co... |
207 |
} while (0); |
30dfa90cc dma-debug: add ha... |
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 |
/* * Hash related functions * * Every DMA-API request is saved into a struct dma_debug_entry. To * have quick access to these structs they are stored into a hash. */ static int hash_fn(struct dma_debug_entry *entry) { /* * Hash function is based on the dma address. * We use bits 20-27 here as the index into the hash */ return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; } /* * Request exclusive access to a hash bucket for a given dma_debug_entry. */ static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, unsigned long *flags) |
d5dfc80f8 dma-debug: track ... |
228 |
__acquires(&dma_entry_hash[idx].lock) |
30dfa90cc dma-debug: add ha... |
229 230 231 232 233 234 235 236 237 238 239 240 241 242 |
{ int idx = hash_fn(entry); unsigned long __flags; spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); *flags = __flags; return &dma_entry_hash[idx]; } /* * Give up exclusive access to the hash bucket */ static void put_hash_bucket(struct hash_bucket *bucket, unsigned long *flags) |
d5dfc80f8 dma-debug: track ... |
243 |
__releases(&bucket->lock) |
30dfa90cc dma-debug: add ha... |
244 245 246 247 248 |
{ unsigned long __flags = *flags; spin_unlock_irqrestore(&bucket->lock, __flags); } |
c6a21d0b8 dma-debug: hash_b... |
249 250 |
static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) { |
91ec37cc1 Fix comparison us... |
251 |
return ((a->dev_addr == b->dev_addr) && |
c6a21d0b8 dma-debug: hash_b... |
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 |
(a->dev == b->dev)) ? true : false; } static bool containing_match(struct dma_debug_entry *a, struct dma_debug_entry *b) { if (a->dev != b->dev) return false; if ((b->dev_addr <= a->dev_addr) && ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) return true; return false; } |
30dfa90cc dma-debug: add ha... |
267 268 269 |
/* * Search a given entry in the hash bucket list */ |
c6a21d0b8 dma-debug: hash_b... |
270 271 272 |
static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, struct dma_debug_entry *ref, match_fn match) |
30dfa90cc dma-debug: add ha... |
273 |
{ |
7caf6a49b dma-debug: change... |
274 |
struct dma_debug_entry *entry, *ret = NULL; |
fe73fbe1c lib/dma-debug.c: ... |
275 |
int matches = 0, match_lvl, last_lvl = -1; |
30dfa90cc dma-debug: add ha... |
276 277 |
list_for_each_entry(entry, &bucket->list, list) { |
c6a21d0b8 dma-debug: hash_b... |
278 |
if (!match(ref, entry)) |
7caf6a49b dma-debug: change... |
279 280 281 282 283 284 285 |
continue; /* * Some drivers map the same physical address multiple * times. Without a hardware IOMMU this results in the * same device addresses being put into the dma-debug * hash multiple times too. This can result in false |
af901ca18 tree-wide: fix as... |
286 |
* positives being reported. Therefore we implement a |
7caf6a49b dma-debug: change... |
287 288 289 290 291 292 |
* best-fit algorithm here which returns the entry from * the hash which fits best to the reference value * instead of the first-fit. */ matches += 1; match_lvl = 0; |
e5e8c5b90 dma-debug: check ... |
293 294 295 296 |
entry->size == ref->size ? ++match_lvl : 0; entry->type == ref->type ? ++match_lvl : 0; entry->direction == ref->direction ? ++match_lvl : 0; entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; |
7caf6a49b dma-debug: change... |
297 |
|
e5e8c5b90 dma-debug: check ... |
298 |
if (match_lvl == 4) { |
7caf6a49b dma-debug: change... |
299 |
/* perfect-fit - return the result */ |
30dfa90cc dma-debug: add ha... |
300 |
return entry; |
7caf6a49b dma-debug: change... |
301 302 303 |
} else if (match_lvl > last_lvl) { /* * We found an entry that fits better then the |
fe73fbe1c lib/dma-debug.c: ... |
304 |
* previous one or it is the 1st match. |
7caf6a49b dma-debug: change... |
305 306 307 308 |
*/ last_lvl = match_lvl; ret = entry; } |
30dfa90cc dma-debug: add ha... |
309 |
} |
7caf6a49b dma-debug: change... |
310 311 312 313 314 315 316 |
/* * If we have multiple matches but no perfect-fit, just return * NULL. */ ret = (matches == 1) ? ret : NULL; return ret; |
30dfa90cc dma-debug: add ha... |
317 |
} |
c6a21d0b8 dma-debug: hash_b... |
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 |
static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, struct dma_debug_entry *ref) { return __hash_bucket_find(bucket, ref, exact_match); } static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, struct dma_debug_entry *ref, unsigned long *flags) { unsigned int max_range = dma_get_max_seg_size(ref->dev); struct dma_debug_entry *entry, index = *ref; unsigned int range = 0; while (range <= max_range) { |
a7a2c02a4 lib/dma-debug: fi... |
334 |
entry = __hash_bucket_find(*bucket, ref, containing_match); |
c6a21d0b8 dma-debug: hash_b... |
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 |
if (entry) return entry; /* * Nothing found, go back a hash bucket */ put_hash_bucket(*bucket, flags); range += (1 << HASH_FN_SHIFT); index.dev_addr -= (1 << HASH_FN_SHIFT); *bucket = get_hash_bucket(&index, flags); } return NULL; } |
30dfa90cc dma-debug: add ha... |
350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 |
/* * Add an entry to a hash bucket */ static void hash_bucket_add(struct hash_bucket *bucket, struct dma_debug_entry *entry) { list_add_tail(&entry->list, &bucket->list); } /* * Remove entry from a hash bucket list */ static void hash_bucket_del(struct dma_debug_entry *entry) { list_del(&entry->list); } |
0abdd7a81 dma-debug: introd... |
366 367 |
static unsigned long long phys_addr(struct dma_debug_entry *entry) { |
0e74b34df dma-debug: add su... |
368 369 |
if (entry->type == dma_debug_resource) return __pfn_to_phys(entry->pfn) + entry->offset; |
0abdd7a81 dma-debug: introd... |
370 371 |
return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; } |
30dfa90cc dma-debug: add ha... |
372 |
/* |
ac26c18bd dma-debug: add fu... |
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 |
* Dump mapping entries for debugging purposes */ void debug_dma_dump_mappings(struct device *dev) { int idx; for (idx = 0; idx < HASH_SIZE; idx++) { struct hash_bucket *bucket = &dma_entry_hash[idx]; struct dma_debug_entry *entry; unsigned long flags; spin_lock_irqsave(&bucket->lock, flags); list_for_each_entry(entry, &bucket->list, list) { if (!dev || dev == entry->dev) { dev_info(entry->dev, |
0abdd7a81 dma-debug: introd... |
389 390 |
"%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s ", |
ac26c18bd dma-debug: add fu... |
391 |
type2name[entry->type], idx, |
0abdd7a81 dma-debug: introd... |
392 |
phys_addr(entry), entry->pfn, |
ac26c18bd dma-debug: add fu... |
393 |
entry->dev_addr, entry->size, |
6c9c6d630 dma-debug: New in... |
394 395 |
dir2name[entry->direction], maperr2str[entry->map_err_type]); |
ac26c18bd dma-debug: add fu... |
396 397 398 399 |
} } spin_unlock_irqrestore(&bucket->lock, flags); |
34205ed59 dma-debug: add a ... |
400 |
cond_resched(); |
ac26c18bd dma-debug: add fu... |
401 402 |
} } |
ac26c18bd dma-debug: add fu... |
403 404 |
/* |
3b7a6418c dma debug: accoun... |
405 406 407 408 |
* For each mapping (initial cacheline in the case of * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a * scatterlist, or the cacheline specified in dma_map_single) insert * into this tree using the cacheline as the key. At |
0abdd7a81 dma-debug: introd... |
409 |
* dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If |
3b7a6418c dma debug: accoun... |
410 |
* the entry already exists at insertion time add a tag as a reference |
0abdd7a81 dma-debug: introd... |
411 |
* count for the overlapping mappings. For now, the overlap tracking |
3b7a6418c dma debug: accoun... |
412 413 414 |
* just ensures that 'unmaps' balance 'maps' before marking the * cacheline idle, but we should also be flagging overlaps as an API * violation. |
0abdd7a81 dma-debug: introd... |
415 416 417 |
* * Memory usage is mostly constrained by the maximum number of available * dma-debug entries in that we need a free dma_debug_entry before |
3b7a6418c dma debug: accoun... |
418 419 420 421 422 |
* inserting into the tree. In the case of dma_map_page and * dma_alloc_coherent there is only one dma_debug_entry and one * dma_active_cacheline entry to track per event. dma_map_sg(), on the * other hand, consumes a single dma_debug_entry, but inserts 'nents' * entries into the tree. |
0abdd7a81 dma-debug: introd... |
423 424 |
* * At any time debug_dma_assert_idle() can be called to trigger a |
3b7a6418c dma debug: accoun... |
425 |
* warning if any cachelines in the given page are in the active set. |
0abdd7a81 dma-debug: introd... |
426 |
*/ |
3b7a6418c dma debug: accoun... |
427 |
static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT); |
0abdd7a81 dma-debug: introd... |
428 |
static DEFINE_SPINLOCK(radix_lock); |
3b7a6418c dma debug: accoun... |
429 430 431 |
#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT) |
0abdd7a81 dma-debug: introd... |
432 |
|
3b7a6418c dma debug: accoun... |
433 434 435 436 437 438 439 |
static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) { return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + (entry->offset >> L1_CACHE_SHIFT); } static int active_cacheline_read_overlap(phys_addr_t cln) |
0abdd7a81 dma-debug: introd... |
440 441 442 443 |
{ int overlap = 0, i; for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) |
3b7a6418c dma debug: accoun... |
444 |
if (radix_tree_tag_get(&dma_active_cacheline, cln, i)) |
0abdd7a81 dma-debug: introd... |
445 446 447 |
overlap |= 1 << i; return overlap; } |
3b7a6418c dma debug: accoun... |
448 |
static int active_cacheline_set_overlap(phys_addr_t cln, int overlap) |
0abdd7a81 dma-debug: introd... |
449 450 |
{ int i; |
3b7a6418c dma debug: accoun... |
451 |
if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0) |
59f2e7df5 dma-debug: fix ov... |
452 |
return overlap; |
0abdd7a81 dma-debug: introd... |
453 454 455 |
for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) if (overlap & 1 << i) |
3b7a6418c dma debug: accoun... |
456 |
radix_tree_tag_set(&dma_active_cacheline, cln, i); |
0abdd7a81 dma-debug: introd... |
457 |
else |
3b7a6418c dma debug: accoun... |
458 |
radix_tree_tag_clear(&dma_active_cacheline, cln, i); |
0abdd7a81 dma-debug: introd... |
459 460 461 |
return overlap; } |
3b7a6418c dma debug: accoun... |
462 |
static void active_cacheline_inc_overlap(phys_addr_t cln) |
0abdd7a81 dma-debug: introd... |
463 |
{ |
3b7a6418c dma debug: accoun... |
464 |
int overlap = active_cacheline_read_overlap(cln); |
0abdd7a81 dma-debug: introd... |
465 |
|
3b7a6418c dma debug: accoun... |
466 |
overlap = active_cacheline_set_overlap(cln, ++overlap); |
0abdd7a81 dma-debug: introd... |
467 468 469 470 |
/* If we overflowed the overlap counter then we're potentially * leaking dma-mappings. Otherwise, if maps and unmaps are * balanced then this overflow may cause false negatives in |
3b7a6418c dma debug: accoun... |
471 |
* debug_dma_assert_idle() as the cacheline may be marked idle |
0abdd7a81 dma-debug: introd... |
472 473 |
* prematurely. */ |
3b7a6418c dma debug: accoun... |
474 |
WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, |
f737b095c dma-debug: Use pr... |
475 476 |
pr_fmt("exceeded %d overlapping mappings of cacheline %pa "), |
3b7a6418c dma debug: accoun... |
477 |
ACTIVE_CACHELINE_MAX_OVERLAP, &cln); |
0abdd7a81 dma-debug: introd... |
478 |
} |
3b7a6418c dma debug: accoun... |
479 |
static int active_cacheline_dec_overlap(phys_addr_t cln) |
0abdd7a81 dma-debug: introd... |
480 |
{ |
3b7a6418c dma debug: accoun... |
481 |
int overlap = active_cacheline_read_overlap(cln); |
0abdd7a81 dma-debug: introd... |
482 |
|
3b7a6418c dma debug: accoun... |
483 |
return active_cacheline_set_overlap(cln, --overlap); |
0abdd7a81 dma-debug: introd... |
484 |
} |
3b7a6418c dma debug: accoun... |
485 |
static int active_cacheline_insert(struct dma_debug_entry *entry) |
0abdd7a81 dma-debug: introd... |
486 |
{ |
3b7a6418c dma debug: accoun... |
487 |
phys_addr_t cln = to_cacheline_number(entry); |
0abdd7a81 dma-debug: introd... |
488 489 |
unsigned long flags; int rc; |
3b7a6418c dma debug: accoun... |
490 491 492 493 494 495 |
/* If the device is not writing memory then we don't have any * concerns about the cpu consuming stale data. This mitigates * legitimate usages of overlapping mappings. */ if (entry->direction == DMA_TO_DEVICE) return 0; |
0abdd7a81 dma-debug: introd... |
496 |
spin_lock_irqsave(&radix_lock, flags); |
3b7a6418c dma debug: accoun... |
497 |
rc = radix_tree_insert(&dma_active_cacheline, cln, entry); |
0abdd7a81 dma-debug: introd... |
498 |
if (rc == -EEXIST) |
3b7a6418c dma debug: accoun... |
499 |
active_cacheline_inc_overlap(cln); |
0abdd7a81 dma-debug: introd... |
500 501 502 503 |
spin_unlock_irqrestore(&radix_lock, flags); return rc; } |
3b7a6418c dma debug: accoun... |
504 |
static void active_cacheline_remove(struct dma_debug_entry *entry) |
0abdd7a81 dma-debug: introd... |
505 |
{ |
3b7a6418c dma debug: accoun... |
506 |
phys_addr_t cln = to_cacheline_number(entry); |
0abdd7a81 dma-debug: introd... |
507 |
unsigned long flags; |
3b7a6418c dma debug: accoun... |
508 509 510 |
/* ...mirror the insert case */ if (entry->direction == DMA_TO_DEVICE) return; |
0abdd7a81 dma-debug: introd... |
511 |
spin_lock_irqsave(&radix_lock, flags); |
59f2e7df5 dma-debug: fix ov... |
512 |
/* since we are counting overlaps the final put of the |
3b7a6418c dma debug: accoun... |
513 514 |
* cacheline will occur when the overlap count is 0. * active_cacheline_dec_overlap() returns -1 in that case |
59f2e7df5 dma-debug: fix ov... |
515 |
*/ |
3b7a6418c dma debug: accoun... |
516 517 |
if (active_cacheline_dec_overlap(cln) < 0) radix_tree_delete(&dma_active_cacheline, cln); |
0abdd7a81 dma-debug: introd... |
518 519 520 521 522 |
spin_unlock_irqrestore(&radix_lock, flags); } /** * debug_dma_assert_idle() - assert that a page is not undergoing dma |
3b7a6418c dma debug: accoun... |
523 |
* @page: page to lookup in the dma_active_cacheline tree |
0abdd7a81 dma-debug: introd... |
524 525 526 527 528 529 530 |
* * Place a call to this routine in cases where the cpu touching the page * before the dma completes (page is dma_unmapped) will lead to data * corruption. */ void debug_dma_assert_idle(struct page *page) { |
3b7a6418c dma debug: accoun... |
531 532 533 534 |
static struct dma_debug_entry *ents[CACHELINES_PER_PAGE]; struct dma_debug_entry *entry = NULL; void **results = (void **) &ents; unsigned int nents, i; |
0abdd7a81 dma-debug: introd... |
535 |
unsigned long flags; |
3b7a6418c dma debug: accoun... |
536 |
phys_addr_t cln; |
0abdd7a81 dma-debug: introd... |
537 |
|
c9d120b0b dma-debug: skip d... |
538 539 |
if (dma_debug_disabled()) return; |
0abdd7a81 dma-debug: introd... |
540 541 |
if (!page) return; |
3b7a6418c dma debug: accoun... |
542 |
cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT; |
0abdd7a81 dma-debug: introd... |
543 |
spin_lock_irqsave(&radix_lock, flags); |
3b7a6418c dma debug: accoun... |
544 545 546 547 548 549 550 551 552 553 554 |
nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln, CACHELINES_PER_PAGE); for (i = 0; i < nents; i++) { phys_addr_t ent_cln = to_cacheline_number(ents[i]); if (ent_cln == cln) { entry = ents[i]; break; } else if (ent_cln >= cln + CACHELINES_PER_PAGE) break; } |
0abdd7a81 dma-debug: introd... |
555 556 557 558 |
spin_unlock_irqrestore(&radix_lock, flags); if (!entry) return; |
3b7a6418c dma debug: accoun... |
559 |
cln = to_cacheline_number(entry); |
0abdd7a81 dma-debug: introd... |
560 |
err_printk(entry->dev, entry, |
f737b095c dma-debug: Use pr... |
561 562 |
"cpu touching an active dma mapped cacheline [cln=%pa] ", |
3b7a6418c dma debug: accoun... |
563 |
&cln); |
0abdd7a81 dma-debug: introd... |
564 565 566 |
} /* |
30dfa90cc dma-debug: add ha... |
567 568 569 570 571 572 573 |
* Wrapper function for adding an entry to the hash. * This function takes care of locking itself. */ static void add_dma_entry(struct dma_debug_entry *entry) { struct hash_bucket *bucket; unsigned long flags; |
0abdd7a81 dma-debug: introd... |
574 |
int rc; |
30dfa90cc dma-debug: add ha... |
575 576 577 578 |
bucket = get_hash_bucket(entry, &flags); hash_bucket_add(bucket, entry); put_hash_bucket(bucket, &flags); |
0abdd7a81 dma-debug: introd... |
579 |
|
3b7a6418c dma debug: accoun... |
580 |
rc = active_cacheline_insert(entry); |
0abdd7a81 dma-debug: introd... |
581 |
if (rc == -ENOMEM) { |
f737b095c dma-debug: Use pr... |
582 583 |
pr_err("cacheline tracking ENOMEM, dma-debug disabled "); |
0abdd7a81 dma-debug: introd... |
584 585 586 587 588 589 |
global_disable = true; } /* TODO: report -EEXIST errors here as overlapping mappings are * not supported by the DMA API */ |
30dfa90cc dma-debug: add ha... |
590 |
} |
ad78dee0b dma-debug: Batch ... |
591 |
static int dma_debug_create_entries(gfp_t gfp) |
2b9d9ac02 dma-debug: Dynami... |
592 |
{ |
ad78dee0b dma-debug: Batch ... |
593 |
struct dma_debug_entry *entry; |
2b9d9ac02 dma-debug: Dynami... |
594 |
int i; |
ad78dee0b dma-debug: Batch ... |
595 596 597 |
entry = (void *)get_zeroed_page(gfp); if (!entry) return -ENOMEM; |
2b9d9ac02 dma-debug: Dynami... |
598 |
|
ad78dee0b dma-debug: Batch ... |
599 600 |
for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++) list_add_tail(&entry[i].list, &free_entries); |
2b9d9ac02 dma-debug: Dynami... |
601 |
|
ad78dee0b dma-debug: Batch ... |
602 603 |
num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES; nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES; |
2b9d9ac02 dma-debug: Dynami... |
604 605 |
return 0; |
2b9d9ac02 dma-debug: Dynami... |
606 |
} |
e6a1a89d5 dma-debug: add dm... |
607 608 609 610 611 612 613 614 615 616 617 618 619 620 |
static struct dma_debug_entry *__dma_entry_alloc(void) { struct dma_debug_entry *entry; entry = list_entry(free_entries.next, struct dma_debug_entry, list); list_del(&entry->list); memset(entry, 0, sizeof(*entry)); num_free_entries -= 1; if (num_free_entries < min_free_entries) min_free_entries = num_free_entries; return entry; } |
ceb51173b dma-debug: Make l... |
621 622 623 624 625 626 627 628 629 630 631 632 |
void __dma_entry_alloc_check_leak(void) { u32 tmp = nr_total_entries % nr_prealloc_entries; /* Shout each time we tick over some multiple of the initial pool */ if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) { pr_info("dma_debug_entry pool grown to %u (%u00%%) ", nr_total_entries, (nr_total_entries / nr_prealloc_entries)); } } |
3b1e79ed7 dma-debug: add al... |
633 634 635 636 637 638 639 |
/* struct dma_entry allocator * * The next two functions implement the allocator for * struct dma_debug_entries. */ static struct dma_debug_entry *dma_entry_alloc(void) { |
29cdd4e4e dma-debug: releas... |
640 |
struct dma_debug_entry *entry; |
3b1e79ed7 dma-debug: add al... |
641 642 643 |
unsigned long flags; spin_lock_irqsave(&free_entries_lock, flags); |
2b9d9ac02 dma-debug: Dynami... |
644 |
if (num_free_entries == 0) { |
ad78dee0b dma-debug: Batch ... |
645 |
if (dma_debug_create_entries(GFP_ATOMIC)) { |
2b9d9ac02 dma-debug: Dynami... |
646 647 648 649 650 651 |
global_disable = true; spin_unlock_irqrestore(&free_entries_lock, flags); pr_err("debugging out of memory - disabling "); return NULL; } |
ceb51173b dma-debug: Make l... |
652 |
__dma_entry_alloc_check_leak(); |
3b1e79ed7 dma-debug: add al... |
653 |
} |
e6a1a89d5 dma-debug: add dm... |
654 |
entry = __dma_entry_alloc(); |
3b1e79ed7 dma-debug: add al... |
655 |
|
29cdd4e4e dma-debug: releas... |
656 |
spin_unlock_irqrestore(&free_entries_lock, flags); |
6c132d1bc dma-debug: print ... |
657 |
#ifdef CONFIG_STACKTRACE |
746017ed8 dma/debug: Simpli... |
658 659 660 |
entry->stack_len = stack_trace_save(entry->stack_entries, ARRAY_SIZE(entry->stack_entries), 1); |
6c132d1bc dma-debug: print ... |
661 |
#endif |
3b1e79ed7 dma-debug: add al... |
662 663 664 665 666 667 |
return entry; } static void dma_entry_free(struct dma_debug_entry *entry) { unsigned long flags; |
3b7a6418c dma debug: accoun... |
668 |
active_cacheline_remove(entry); |
0abdd7a81 dma-debug: introd... |
669 |
|
3b1e79ed7 dma-debug: add al... |
670 671 672 673 674 675 676 677 678 |
/* * add to beginning of the list - this way the entries are * more likely cache hot when they are reallocated. */ spin_lock_irqsave(&free_entries_lock, flags); list_add(&entry->list, &free_entries); num_free_entries += 1; spin_unlock_irqrestore(&free_entries_lock, flags); } |
6bf078715 dma-debug: add in... |
679 680 681 682 683 684 685 |
/* * DMA-API debugging init code * * The init code does two things: * 1. Initialize core data structures * 2. Preallocate a given number of dma_debug_entry structs */ |
8a6fc708b dma-debug: add de... |
686 687 688 |
static ssize_t filter_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { |
8a6fc708b dma-debug: add de... |
689 |
char buf[NAME_MAX_LEN + 1]; |
c17e2cf73 dma-debug: code s... |
690 |
unsigned long flags; |
8a6fc708b dma-debug: add de... |
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 |
int len; if (!current_driver_name[0]) return 0; /* * We can't copy to userspace directly because current_driver_name can * only be read under the driver_name_lock with irqs disabled. So * create a temporary copy first. */ read_lock_irqsave(&driver_name_lock, flags); len = scnprintf(buf, NAME_MAX_LEN + 1, "%s ", current_driver_name); read_unlock_irqrestore(&driver_name_lock, flags); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t filter_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { |
8a6fc708b dma-debug: add de... |
712 |
char buf[NAME_MAX_LEN]; |
c17e2cf73 dma-debug: code s... |
713 714 |
unsigned long flags; size_t len; |
8a6fc708b dma-debug: add de... |
715 716 717 718 719 720 721 722 |
int i; /* * We can't copy from userspace directly. Access to * current_driver_name is protected with a write_lock with irqs * disabled. Since copy_from_user can fault and may sleep we * need to copy to temporary buffer first */ |
e7ed70eed dma-debug: use pr... |
723 |
len = min(count, (size_t)(NAME_MAX_LEN - 1)); |
8a6fc708b dma-debug: add de... |
724 725 726 727 728 729 |
if (copy_from_user(buf, userbuf, len)) return -EFAULT; buf[len] = 0; write_lock_irqsave(&driver_name_lock, flags); |
312325094 dma-debug: commen... |
730 731 |
/* * Now handle the string we got from userspace very carefully. |
8a6fc708b dma-debug: add de... |
732 733 734 735 736 737 738 739 740 |
* The rules are: * - only use the first token we got * - token delimiter is everything looking like a space * character (' ', ' ', '\t' ...) * */ if (!isalnum(buf[0])) { /* |
312325094 dma-debug: commen... |
741 |
* If the first character userspace gave us is not |
8a6fc708b dma-debug: add de... |
742 743 744 745 |
* alphanumerical then assume the filter should be * switched off. */ if (current_driver_name[0]) |
f737b095c dma-debug: Use pr... |
746 747 |
pr_info("switching off dma-debug driver filter "); |
8a6fc708b dma-debug: add de... |
748 749 750 751 752 753 754 755 756 |
current_driver_name[0] = 0; current_driver = NULL; goto out_unlock; } /* * Now parse out the first token and use it as the name for the * driver to filter for. */ |
39a37ce1c dma-debug: Cleanu... |
757 |
for (i = 0; i < NAME_MAX_LEN - 1; ++i) { |
8a6fc708b dma-debug: add de... |
758 759 760 761 762 763 |
current_driver_name[i] = buf[i]; if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) break; } current_driver_name[i] = 0; current_driver = NULL; |
f737b095c dma-debug: Use pr... |
764 765 |
pr_info("enable driver filter for driver [%s] ", |
e7ed70eed dma-debug: use pr... |
766 |
current_driver_name); |
8a6fc708b dma-debug: add de... |
767 768 769 770 771 772 |
out_unlock: write_unlock_irqrestore(&driver_name_lock, flags); return count; } |
aeb583d08 lib/dma-debug.c: ... |
773 |
static const struct file_operations filter_fops = { |
8a6fc708b dma-debug: add de... |
774 775 |
.read = filter_read, .write = filter_write, |
6038f373a llseek: automatic... |
776 |
.llseek = default_llseek, |
8a6fc708b dma-debug: add de... |
777 |
}; |
0a3b192c2 dma-debug: add du... |
778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 |
static int dump_show(struct seq_file *seq, void *v) { int idx; for (idx = 0; idx < HASH_SIZE; idx++) { struct hash_bucket *bucket = &dma_entry_hash[idx]; struct dma_debug_entry *entry; unsigned long flags; spin_lock_irqsave(&bucket->lock, flags); list_for_each_entry(entry, &bucket->list, list) { seq_printf(seq, "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx %s %s ", dev_name(entry->dev), dev_driver_string(entry->dev), type2name[entry->type], idx, phys_addr(entry), entry->pfn, entry->dev_addr, entry->size, dir2name[entry->direction], maperr2str[entry->map_err_type]); } spin_unlock_irqrestore(&bucket->lock, flags); } return 0; } DEFINE_SHOW_ATTRIBUTE(dump); |
8e4d81b98 dma: debug: no ne... |
805 |
static void dma_debug_fs_init(void) |
788dcfa6f dma-debug: add de... |
806 |
{ |
8e4d81b98 dma: debug: no ne... |
807 |
struct dentry *dentry = debugfs_create_dir("dma-api", NULL); |
788dcfa6f dma-debug: add de... |
808 |
|
8e4d81b98 dma: debug: no ne... |
809 810 811 812 813 814 815 816 |
debugfs_create_bool("disabled", 0444, dentry, &global_disable); debugfs_create_u32("error_count", 0444, dentry, &error_count); debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors); debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors); debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries); debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries); debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries); debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops); |
0a3b192c2 dma-debug: add du... |
817 |
debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops); |
788dcfa6f dma-debug: add de... |
818 |
} |
ba4b87ad5 dma-debug: print ... |
819 |
static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) |
ed888aef4 dma-debug: re-add... |
820 821 822 823 824 825 |
{ struct dma_debug_entry *entry; unsigned long flags; int count = 0, i; for (i = 0; i < HASH_SIZE; ++i) { |
6a5cd60ba lib/dma-debug.c: ... |
826 |
spin_lock_irqsave(&dma_entry_hash[i].lock, flags); |
ed888aef4 dma-debug: re-add... |
827 |
list_for_each_entry(entry, &dma_entry_hash[i].list, list) { |
ba4b87ad5 dma-debug: print ... |
828 |
if (entry->dev == dev) { |
ed888aef4 dma-debug: re-add... |
829 |
count += 1; |
ba4b87ad5 dma-debug: print ... |
830 831 |
*out_entry = entry; } |
ed888aef4 dma-debug: re-add... |
832 |
} |
6a5cd60ba lib/dma-debug.c: ... |
833 |
spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags); |
ed888aef4 dma-debug: re-add... |
834 835 836 837 |
} return count; } |
a8fe9ea20 dma-debug: Fix bu... |
838 |
static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) |
ed888aef4 dma-debug: re-add... |
839 840 |
{ struct device *dev = data; |
ba4b87ad5 dma-debug: print ... |
841 |
struct dma_debug_entry *uninitialized_var(entry); |
ed888aef4 dma-debug: re-add... |
842 |
int count; |
01ce18b31 dma-debug: introd... |
843 |
if (dma_debug_disabled()) |
a8fe9ea20 dma-debug: Fix bu... |
844 |
return 0; |
ed888aef4 dma-debug: re-add... |
845 846 847 |
switch (action) { case BUS_NOTIFY_UNBOUND_DRIVER: |
ba4b87ad5 dma-debug: print ... |
848 |
count = device_dma_allocations(dev, &entry); |
ed888aef4 dma-debug: re-add... |
849 850 |
if (count == 0) break; |
f737b095c dma-debug: Use pr... |
851 |
err_printk(dev, entry, "device driver has pending " |
ed888aef4 dma-debug: re-add... |
852 |
"DMA allocations while released from device " |
ba4b87ad5 dma-debug: print ... |
853 854 855 856 857 858 859 860 |
"[count=%d] " "One of leaked entries details: " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [mapped as %s] ", count, entry->dev_addr, entry->size, dir2name[entry->direction], type2name[entry->type]); |
ed888aef4 dma-debug: re-add... |
861 862 863 864 865 866 867 |
break; default: break; } return 0; } |
41531c8f5 dma-debug: add a ... |
868 869 |
void dma_debug_add_bus(struct bus_type *bus) { |
ed888aef4 dma-debug: re-add... |
870 |
struct notifier_block *nb; |
01ce18b31 dma-debug: introd... |
871 |
if (dma_debug_disabled()) |
f797d9881 dma-debug: Do not... |
872 |
return; |
ed888aef4 dma-debug: re-add... |
873 874 |
nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); if (nb == NULL) { |
e7ed70eed dma-debug: use pr... |
875 876 |
pr_err("dma_debug_add_bus: out of memory "); |
ed888aef4 dma-debug: re-add... |
877 878 879 880 881 882 |
return; } nb->notifier_call = dma_debug_device_change; bus_register_notifier(bus, nb); |
41531c8f5 dma-debug: add a ... |
883 |
} |
788dcfa6f dma-debug: add de... |
884 |
|
15b28bbcd dma-debug: move i... |
885 |
static int dma_debug_init(void) |
6bf078715 dma-debug: add in... |
886 |
{ |
ad78dee0b dma-debug: Batch ... |
887 |
int i, nr_pages; |
6bf078715 dma-debug: add in... |
888 |
|
2ce8e7ed0 dma-debug: preven... |
889 890 891 892 |
/* Do not use dma_debug_initialized here, since we really want to be * called to set dma_debug_initialized */ if (global_disable) |
15b28bbcd dma-debug: move i... |
893 |
return 0; |
6bf078715 dma-debug: add in... |
894 895 896 |
for (i = 0; i < HASH_SIZE; ++i) { INIT_LIST_HEAD(&dma_entry_hash[i].list); |
b0a5b83ee dma-debug: Put al... |
897 |
spin_lock_init(&dma_entry_hash[i].lock); |
6bf078715 dma-debug: add in... |
898 |
} |
8e4d81b98 dma: debug: no ne... |
899 |
dma_debug_fs_init(); |
788dcfa6f dma-debug: add de... |
900 |
|
ad78dee0b dma-debug: Batch ... |
901 902 903 904 905 906 907 908 909 910 911 |
nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES); for (i = 0; i < nr_pages; ++i) dma_debug_create_entries(GFP_KERNEL); if (num_free_entries >= nr_prealloc_entries) { pr_info("preallocated %d debug entries ", nr_total_entries); } else if (num_free_entries > 0) { pr_warn("%d debug entries requested but only %d allocated ", nr_prealloc_entries, nr_total_entries); } else { |
f737b095c dma-debug: Use pr... |
912 913 |
pr_err("debugging out of memory error - disabled "); |
6bf078715 dma-debug: add in... |
914 |
global_disable = true; |
15b28bbcd dma-debug: move i... |
915 |
return 0; |
6bf078715 dma-debug: add in... |
916 |
} |
2b9d9ac02 dma-debug: Dynami... |
917 |
min_free_entries = num_free_entries; |
e6a1a89d5 dma-debug: add dm... |
918 |
|
2ce8e7ed0 dma-debug: preven... |
919 |
dma_debug_initialized = true; |
f737b095c dma-debug: Use pr... |
920 921 |
pr_info("debugging enabled by kernel config "); |
15b28bbcd dma-debug: move i... |
922 |
return 0; |
6bf078715 dma-debug: add in... |
923 |
} |
15b28bbcd dma-debug: move i... |
924 |
core_initcall(dma_debug_init); |
6bf078715 dma-debug: add in... |
925 |
|
59d3daafa dma-debug: add ke... |
926 927 928 929 930 931 |
static __init int dma_debug_cmdline(char *str) { if (!str) return -EINVAL; if (strncmp(str, "off", 3) == 0) { |
f737b095c dma-debug: Use pr... |
932 933 |
pr_info("debugging disabled on kernel command line "); |
59d3daafa dma-debug: add ke... |
934 935 936 937 938 939 940 941 |
global_disable = true; } return 0; } static __init int dma_debug_entries_cmdline(char *str) { |
59d3daafa dma-debug: add ke... |
942 943 |
if (!str) return -EINVAL; |
bcebe324c dma-debug: simpli... |
944 945 |
if (!get_option(&str, &nr_prealloc_entries)) nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; |
59d3daafa dma-debug: add ke... |
946 947 948 949 950 |
return 0; } __setup("dma_debug=", dma_debug_cmdline); __setup("dma_debug_entries=", dma_debug_entries_cmdline); |
2d62ece14 dma-debug: add co... |
951 952 953 954 955 |
static void check_unmap(struct dma_debug_entry *ref) { struct dma_debug_entry *entry; struct hash_bucket *bucket; unsigned long flags; |
2d62ece14 dma-debug: add co... |
956 |
bucket = get_hash_bucket(ref, &flags); |
c6a21d0b8 dma-debug: hash_b... |
957 |
entry = bucket_find_exact(bucket, ref); |
2d62ece14 dma-debug: add co... |
958 959 |
if (!entry) { |
8d640a51e dma-debug: fix lo... |
960 961 |
/* must drop lock before calling dma_mapping_error */ put_hash_bucket(bucket, &flags); |
bfe0fb0f1 dma-debug: fix to... |
962 963 |
if (dma_mapping_error(ref->dev, ref->dev_addr)) { err_printk(ref->dev, NULL, |
f737b095c dma-debug: Use pr... |
964 |
"device driver tries to free an " |
8d640a51e dma-debug: fix lo... |
965 966 967 968 |
"invalid DMA memory address "); } else { err_printk(ref->dev, NULL, |
f737b095c dma-debug: Use pr... |
969 |
"device driver tries to free DMA " |
8d640a51e dma-debug: fix lo... |
970 971 972 973 |
"memory it has not allocated [device " "address=0x%016llx] [size=%llu bytes] ", ref->dev_addr, ref->size); |
bfe0fb0f1 dma-debug: fix to... |
974 |
} |
8d640a51e dma-debug: fix lo... |
975 |
return; |
2d62ece14 dma-debug: add co... |
976 977 978 |
} if (ref->size != entry->size) { |
f737b095c dma-debug: Use pr... |
979 |
err_printk(ref->dev, entry, "device driver frees " |
2d62ece14 dma-debug: add co... |
980 981 982 983 984 985 986 987 |
"DMA memory with different size " "[device address=0x%016llx] [map size=%llu bytes] " "[unmap size=%llu bytes] ", ref->dev_addr, entry->size, ref->size); } if (ref->type != entry->type) { |
f737b095c dma-debug: Use pr... |
988 |
err_printk(ref->dev, entry, "device driver frees " |
2d62ece14 dma-debug: add co... |
989 990 991 992 993 994 995 |
"DMA memory with wrong function " "[device address=0x%016llx] [size=%llu bytes] " "[mapped as %s] [unmapped as %s] ", ref->dev_addr, ref->size, type2name[entry->type], type2name[ref->type]); } else if ((entry->type == dma_debug_coherent) && |
0abdd7a81 dma-debug: introd... |
996 |
(phys_addr(ref) != phys_addr(entry))) { |
f737b095c dma-debug: Use pr... |
997 |
err_printk(ref->dev, entry, "device driver frees " |
2d62ece14 dma-debug: add co... |
998 999 |
"DMA memory with different CPU address " "[device address=0x%016llx] [size=%llu bytes] " |
59a40e704 dma-debug: Fix co... |
1000 1001 |
"[cpu alloc address=0x%016llx] " "[cpu free address=0x%016llx]", |
2d62ece14 dma-debug: add co... |
1002 |
ref->dev_addr, ref->size, |
0abdd7a81 dma-debug: introd... |
1003 1004 |
phys_addr(entry), phys_addr(ref)); |
2d62ece14 dma-debug: add co... |
1005 1006 1007 1008 |
} if (ref->sg_call_ents && ref->type == dma_debug_sg && ref->sg_call_ents != entry->sg_call_ents) { |
f737b095c dma-debug: Use pr... |
1009 |
err_printk(ref->dev, entry, "device driver frees " |
2d62ece14 dma-debug: add co... |
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 |
"DMA sg list with different entry count " "[map count=%d] [unmap count=%d] ", entry->sg_call_ents, ref->sg_call_ents); } /* * This may be no bug in reality - but most implementations of the * DMA API don't handle this properly, so check for it here */ if (ref->direction != entry->direction) { |
f737b095c dma-debug: Use pr... |
1021 |
err_printk(ref->dev, entry, "device driver frees " |
2d62ece14 dma-debug: add co... |
1022 1023 1024 1025 1026 1027 1028 1029 |
"DMA memory with different direction " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [unmapped with %s] ", ref->dev_addr, ref->size, dir2name[entry->direction], dir2name[ref->direction]); } |
a5759b2bf dma-debug: add co... |
1030 1031 1032 1033 1034 |
/* * Drivers should use dma_mapping_error() to check the returned * addresses of dma_map_single() and dma_map_page(). * If not, print this warning message. See Documentation/DMA-API.txt. */ |
6c9c6d630 dma-debug: New in... |
1035 1036 |
if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { err_printk(ref->dev, entry, |
f737b095c dma-debug: Use pr... |
1037 |
"device driver failed to check map error" |
6c9c6d630 dma-debug: New in... |
1038 1039 1040 1041 1042 |
"[device address=0x%016llx] [size=%llu bytes] " "[mapped as %s]", ref->dev_addr, ref->size, type2name[entry->type]); } |
2d62ece14 dma-debug: add co... |
1043 1044 |
hash_bucket_del(entry); dma_entry_free(entry); |
2d62ece14 dma-debug: add co... |
1045 1046 |
put_hash_bucket(bucket, &flags); } |
b4a0f533e dma-api: Teach th... |
1047 1048 |
static void check_for_stack(struct device *dev, struct page *page, size_t offset) |
2d62ece14 dma-debug: add co... |
1049 |
{ |
b4a0f533e dma-api: Teach th... |
1050 1051 1052 1053 1054 1055 1056 1057 1058 |
void *addr; struct vm_struct *stack_vm_area = task_stack_vm_area(current); if (!stack_vm_area) { /* Stack is direct-mapped. */ if (PageHighMem(page)) return; addr = page_address(page) + offset; if (object_is_on_stack(addr)) |
f737b095c dma-debug: Use pr... |
1059 1060 |
err_printk(dev, NULL, "device driver maps memory from stack [addr=%p] ", addr); |
b4a0f533e dma-api: Teach th... |
1061 1062 1063 1064 1065 1066 1067 1068 1069 |
} else { /* Stack is vmalloced. */ int i; for (i = 0; i < stack_vm_area->nr_pages; i++) { if (page != stack_vm_area->pages[i]) continue; addr = (u8 *)current->stack + i * PAGE_SIZE + offset; |
f737b095c dma-debug: Use pr... |
1070 1071 |
err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p] ", addr); |
b4a0f533e dma-api: Teach th... |
1072 1073 1074 |
break; } } |
2d62ece14 dma-debug: add co... |
1075 |
} |
f39d1b979 dma-debug: Fix th... |
1076 |
static inline bool overlap(void *addr, unsigned long len, void *start, void *end) |
2e34bde18 dma-debug: add ch... |
1077 |
{ |
f39d1b979 dma-debug: Fix th... |
1078 1079 1080 1081 |
unsigned long a1 = (unsigned long)addr; unsigned long b1 = a1 + len; unsigned long a2 = (unsigned long)start; unsigned long b2 = (unsigned long)end; |
2e34bde18 dma-debug: add ch... |
1082 |
|
f39d1b979 dma-debug: Fix th... |
1083 |
return !(b1 <= a2 || a1 >= b2); |
2e34bde18 dma-debug: add ch... |
1084 |
} |
f39d1b979 dma-debug: Fix th... |
1085 |
static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) |
2e34bde18 dma-debug: add ch... |
1086 |
{ |
ea535e418 dma-debug: switch... |
1087 |
if (overlap(addr, len, _stext, _etext) || |
f39d1b979 dma-debug: Fix th... |
1088 |
overlap(addr, len, __start_rodata, __end_rodata)) |
f737b095c dma-debug: Use pr... |
1089 1090 |
err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu] ", addr, len); |
2e34bde18 dma-debug: add ch... |
1091 |
} |
aa010efb7 dma-debug: be mor... |
1092 1093 1094 |
static void check_sync(struct device *dev, struct dma_debug_entry *ref, bool to_cpu) |
2d62ece14 dma-debug: add co... |
1095 |
{ |
2d62ece14 dma-debug: add co... |
1096 1097 1098 |
struct dma_debug_entry *entry; struct hash_bucket *bucket; unsigned long flags; |
aa010efb7 dma-debug: be mor... |
1099 |
bucket = get_hash_bucket(ref, &flags); |
2d62ece14 dma-debug: add co... |
1100 |
|
c6a21d0b8 dma-debug: hash_b... |
1101 |
entry = bucket_find_contain(&bucket, ref, &flags); |
2d62ece14 dma-debug: add co... |
1102 1103 |
if (!entry) { |
f737b095c dma-debug: Use pr... |
1104 |
err_printk(dev, NULL, "device driver tries " |
2d62ece14 dma-debug: add co... |
1105 1106 1107 |
"to sync DMA memory it has not allocated " "[device address=0x%016llx] [size=%llu bytes] ", |
aa010efb7 dma-debug: be mor... |
1108 |
(unsigned long long)ref->dev_addr, ref->size); |
2d62ece14 dma-debug: add co... |
1109 1110 |
goto out; } |
aa010efb7 dma-debug: be mor... |
1111 |
if (ref->size > entry->size) { |
f737b095c dma-debug: Use pr... |
1112 |
err_printk(dev, entry, "device driver syncs" |
2d62ece14 dma-debug: add co... |
1113 1114 |
" DMA memory outside allocated range " "[device address=0x%016llx] " |
aa010efb7 dma-debug: be mor... |
1115 1116 1117 1118 1119 |
"[allocation size=%llu bytes] " "[sync offset+size=%llu] ", entry->dev_addr, entry->size, ref->size); |
2d62ece14 dma-debug: add co... |
1120 |
} |
42d53b4ff dma-debug: allow ... |
1121 1122 |
if (entry->direction == DMA_BIDIRECTIONAL) goto out; |
aa010efb7 dma-debug: be mor... |
1123 |
if (ref->direction != entry->direction) { |
f737b095c dma-debug: Use pr... |
1124 |
err_printk(dev, entry, "device driver syncs " |
2d62ece14 dma-debug: add co... |
1125 1126 1127 1128 |
"DMA memory with different direction " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [synced with %s] ", |
aa010efb7 dma-debug: be mor... |
1129 |
(unsigned long long)ref->dev_addr, entry->size, |
2d62ece14 dma-debug: add co... |
1130 |
dir2name[entry->direction], |
aa010efb7 dma-debug: be mor... |
1131 |
dir2name[ref->direction]); |
2d62ece14 dma-debug: add co... |
1132 |
} |
2d62ece14 dma-debug: add co... |
1133 |
if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && |
aa010efb7 dma-debug: be mor... |
1134 |
!(ref->direction == DMA_TO_DEVICE)) |
f737b095c dma-debug: Use pr... |
1135 |
err_printk(dev, entry, "device driver syncs " |
2d62ece14 dma-debug: add co... |
1136 1137 1138 1139 |
"device read-only DMA memory for cpu " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [synced with %s] ", |
aa010efb7 dma-debug: be mor... |
1140 |
(unsigned long long)ref->dev_addr, entry->size, |
2d62ece14 dma-debug: add co... |
1141 |
dir2name[entry->direction], |
aa010efb7 dma-debug: be mor... |
1142 |
dir2name[ref->direction]); |
2d62ece14 dma-debug: add co... |
1143 1144 |
if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && |
aa010efb7 dma-debug: be mor... |
1145 |
!(ref->direction == DMA_FROM_DEVICE)) |
f737b095c dma-debug: Use pr... |
1146 |
err_printk(dev, entry, "device driver syncs " |
2d62ece14 dma-debug: add co... |
1147 1148 1149 1150 |
"device write-only DMA memory to device " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [synced with %s] ", |
aa010efb7 dma-debug: be mor... |
1151 |
(unsigned long long)ref->dev_addr, entry->size, |
2d62ece14 dma-debug: add co... |
1152 |
dir2name[entry->direction], |
aa010efb7 dma-debug: be mor... |
1153 |
dir2name[ref->direction]); |
2d62ece14 dma-debug: add co... |
1154 |
|
7f8306429 dma-debug: check ... |
1155 1156 |
if (ref->sg_call_ents && ref->type == dma_debug_sg && ref->sg_call_ents != entry->sg_call_ents) { |
f737b095c dma-debug: Use pr... |
1157 |
err_printk(ref->dev, entry, "device driver syncs " |
7f8306429 dma-debug: check ... |
1158 1159 1160 1161 1162 |
"DMA sg list with different entry count " "[map count=%d] [sync count=%d] ", entry->sg_call_ents, ref->sg_call_ents); } |
2d62ece14 dma-debug: add co... |
1163 1164 |
out: put_hash_bucket(bucket, &flags); |
2d62ece14 dma-debug: add co... |
1165 |
} |
78c47830a dma-debug: check ... |
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 |
static void check_sg_segment(struct device *dev, struct scatterlist *sg) { #ifdef CONFIG_DMA_API_DEBUG_SG unsigned int max_seg = dma_get_max_seg_size(dev); u64 start, end, boundary = dma_get_seg_boundary(dev); /* * Either the driver forgot to set dma_parms appropriately, or * whoever generated the list forgot to check them. */ if (sg->length > max_seg) |
f737b095c dma-debug: Use pr... |
1177 1178 |
err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u] ", |
78c47830a dma-debug: check ... |
1179 1180 1181 1182 1183 1184 1185 1186 1187 |
sg->length, max_seg); /* * In some cases this could potentially be the DMA API * implementation's fault, but it would usually imply that * the scatterlist was built inappropriately to begin with. */ start = sg_dma_address(sg); end = start + sg_dma_len(sg) - 1; if ((start ^ end) & ~boundary) |
f737b095c dma-debug: Use pr... |
1188 1189 |
err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx] ", |
78c47830a dma-debug: check ... |
1190 1191 1192 |
start, end, boundary); #endif } |
99c65fa7c dma-debug: Check ... |
1193 1194 1195 1196 1197 1198 1199 |
void debug_dma_map_single(struct device *dev, const void *addr, unsigned long len) { if (unlikely(dma_debug_disabled())) return; if (!virt_addr_valid(addr)) |
f737b095c dma-debug: Use pr... |
1200 1201 |
err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu] ", |
99c65fa7c dma-debug: Check ... |
1202 1203 1204 |
addr, len); if (is_vmalloc_addr(addr)) |
f737b095c dma-debug: Use pr... |
1205 1206 |
err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu] ", |
99c65fa7c dma-debug: Check ... |
1207 1208 1209 |
addr, len); } EXPORT_SYMBOL(debug_dma_map_single); |
f62bc980e dma-debug: add ch... |
1210 |
void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, |
2e05ea5cd dma-mapping: impl... |
1211 |
size_t size, int direction, dma_addr_t dma_addr) |
f62bc980e dma-debug: add ch... |
1212 1213 |
{ struct dma_debug_entry *entry; |
01ce18b31 dma-debug: introd... |
1214 |
if (unlikely(dma_debug_disabled())) |
f62bc980e dma-debug: add ch... |
1215 |
return; |
bfe0fb0f1 dma-debug: fix to... |
1216 |
if (dma_mapping_error(dev, dma_addr)) |
f62bc980e dma-debug: add ch... |
1217 1218 1219 1220 1221 1222 1223 |
return; entry = dma_entry_alloc(); if (!entry) return; entry->dev = dev; |
2e05ea5cd dma-mapping: impl... |
1224 |
entry->type = dma_debug_single; |
0abdd7a81 dma-debug: introd... |
1225 1226 |
entry->pfn = page_to_pfn(page); entry->offset = offset, |
f62bc980e dma-debug: add ch... |
1227 1228 1229 |
entry->dev_addr = dma_addr; entry->size = size; entry->direction = direction; |
6c9c6d630 dma-debug: New in... |
1230 |
entry->map_err_type = MAP_ERR_NOT_CHECKED; |
f62bc980e dma-debug: add ch... |
1231 |
|
b4a0f533e dma-api: Teach th... |
1232 |
check_for_stack(dev, page, offset); |
9537a48ed dma-debug: make m... |
1233 |
if (!PageHighMem(page)) { |
f39d1b979 dma-debug: Fix th... |
1234 |
void *addr = page_address(page) + offset; |
2e34bde18 dma-debug: add ch... |
1235 |
check_for_illegal_area(dev, addr, size); |
f62bc980e dma-debug: add ch... |
1236 1237 1238 1239 1240 |
} add_dma_entry(entry); } EXPORT_SYMBOL(debug_dma_map_page); |
6c9c6d630 dma-debug: New in... |
1241 1242 1243 1244 1245 1246 |
void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { struct dma_debug_entry ref; struct dma_debug_entry *entry; struct hash_bucket *bucket; unsigned long flags; |
01ce18b31 dma-debug: introd... |
1247 |
if (unlikely(dma_debug_disabled())) |
6c9c6d630 dma-debug: New in... |
1248 1249 1250 1251 1252 |
return; ref.dev = dev; ref.dev_addr = dma_addr; bucket = get_hash_bucket(&ref, &flags); |
6c9c6d630 dma-debug: New in... |
1253 |
|
96e7d7a1e dma-debug: update... |
1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 |
list_for_each_entry(entry, &bucket->list, list) { if (!exact_match(&ref, entry)) continue; /* * The same physical address can be mapped multiple * times. Without a hardware IOMMU this results in the * same device addresses being put into the dma-debug * hash multiple times too. This can result in false * positives being reported. Therefore we implement a * best-fit algorithm here which updates the first entry * from the hash which fits the reference value and is * not currently listed as being checked. */ if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { entry->map_err_type = MAP_ERR_CHECKED; break; } } |
6c9c6d630 dma-debug: New in... |
1273 |
|
6c9c6d630 dma-debug: New in... |
1274 1275 1276 |
put_hash_bucket(bucket, &flags); } EXPORT_SYMBOL(debug_dma_mapping_error); |
f62bc980e dma-debug: add ch... |
1277 |
void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, |
2e05ea5cd dma-mapping: impl... |
1278 |
size_t size, int direction) |
f62bc980e dma-debug: add ch... |
1279 1280 |
{ struct dma_debug_entry ref = { |
2e05ea5cd dma-mapping: impl... |
1281 |
.type = dma_debug_single, |
f62bc980e dma-debug: add ch... |
1282 1283 1284 1285 1286 |
.dev = dev, .dev_addr = addr, .size = size, .direction = direction, }; |
01ce18b31 dma-debug: introd... |
1287 |
if (unlikely(dma_debug_disabled())) |
f62bc980e dma-debug: add ch... |
1288 |
return; |
f62bc980e dma-debug: add ch... |
1289 1290 1291 |
check_unmap(&ref); } EXPORT_SYMBOL(debug_dma_unmap_page); |
972aa45ce dma-debug: add ad... |
1292 1293 1294 1295 1296 1297 |
void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, int mapped_ents, int direction) { struct dma_debug_entry *entry; struct scatterlist *s; int i; |
01ce18b31 dma-debug: introd... |
1298 |
if (unlikely(dma_debug_disabled())) |
972aa45ce dma-debug: add ad... |
1299 1300 1301 1302 1303 1304 1305 1306 1307 |
return; for_each_sg(sg, s, mapped_ents, i) { entry = dma_entry_alloc(); if (!entry) return; entry->type = dma_debug_sg; entry->dev = dev; |
0abdd7a81 dma-debug: introd... |
1308 1309 |
entry->pfn = page_to_pfn(sg_page(s)); entry->offset = s->offset, |
884d05970 dma-debug: use sg... |
1310 |
entry->size = sg_dma_len(s); |
15aedea43 dma-debug: use sg... |
1311 |
entry->dev_addr = sg_dma_address(s); |
972aa45ce dma-debug: add ad... |
1312 1313 1314 |
entry->direction = direction; entry->sg_call_ents = nents; entry->sg_mapped_ents = mapped_ents; |
b4a0f533e dma-api: Teach th... |
1315 |
check_for_stack(dev, sg_page(s), s->offset); |
9537a48ed dma-debug: make m... |
1316 |
if (!PageHighMem(sg_page(s))) { |
884d05970 dma-debug: use sg... |
1317 |
check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s)); |
9537a48ed dma-debug: make m... |
1318 |
} |
972aa45ce dma-debug: add ad... |
1319 |
|
78c47830a dma-debug: check ... |
1320 |
check_sg_segment(dev, s); |
972aa45ce dma-debug: add ad... |
1321 1322 1323 1324 |
add_dma_entry(entry); } } EXPORT_SYMBOL(debug_dma_map_sg); |
aa010efb7 dma-debug: be mor... |
1325 1326 |
static int get_nr_mapped_entries(struct device *dev, struct dma_debug_entry *ref) |
88f3907f6 dma-debug: fix de... |
1327 |
{ |
aa010efb7 dma-debug: be mor... |
1328 |
struct dma_debug_entry *entry; |
88f3907f6 dma-debug: fix de... |
1329 1330 |
struct hash_bucket *bucket; unsigned long flags; |
c17e2cf73 dma-debug: code s... |
1331 |
int mapped_ents; |
88f3907f6 dma-debug: fix de... |
1332 |
|
aa010efb7 dma-debug: be mor... |
1333 |
bucket = get_hash_bucket(ref, &flags); |
c6a21d0b8 dma-debug: hash_b... |
1334 |
entry = bucket_find_exact(bucket, ref); |
c17e2cf73 dma-debug: code s... |
1335 |
mapped_ents = 0; |
88f3907f6 dma-debug: fix de... |
1336 |
|
88f3907f6 dma-debug: fix de... |
1337 1338 1339 1340 1341 1342 |
if (entry) mapped_ents = entry->sg_mapped_ents; put_hash_bucket(bucket, &flags); return mapped_ents; } |
972aa45ce dma-debug: add ad... |
1343 1344 1345 |
void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, int dir) { |
972aa45ce dma-debug: add ad... |
1346 1347 |
struct scatterlist *s; int mapped_ents = 0, i; |
972aa45ce dma-debug: add ad... |
1348 |
|
01ce18b31 dma-debug: introd... |
1349 |
if (unlikely(dma_debug_disabled())) |
972aa45ce dma-debug: add ad... |
1350 1351 1352 1353 1354 1355 1356 |
return; for_each_sg(sglist, s, nelems, i) { struct dma_debug_entry ref = { .type = dma_debug_sg, .dev = dev, |
0abdd7a81 dma-debug: introd... |
1357 1358 |
.pfn = page_to_pfn(sg_page(s)), .offset = s->offset, |
15aedea43 dma-debug: use sg... |
1359 |
.dev_addr = sg_dma_address(s), |
884d05970 dma-debug: use sg... |
1360 |
.size = sg_dma_len(s), |
972aa45ce dma-debug: add ad... |
1361 |
.direction = dir, |
e5e8c5b90 dma-debug: check ... |
1362 |
.sg_call_ents = nelems, |
972aa45ce dma-debug: add ad... |
1363 1364 1365 1366 |
}; if (mapped_ents && i >= mapped_ents) break; |
e5e8c5b90 dma-debug: check ... |
1367 |
if (!i) |
aa010efb7 dma-debug: be mor... |
1368 |
mapped_ents = get_nr_mapped_entries(dev, &ref); |
972aa45ce dma-debug: add ad... |
1369 1370 1371 1372 1373 |
check_unmap(&ref); } } EXPORT_SYMBOL(debug_dma_unmap_sg); |
6bfd44987 dma-debug: add ch... |
1374 1375 1376 1377 |
void debug_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t dma_addr, void *virt) { struct dma_debug_entry *entry; |
01ce18b31 dma-debug: introd... |
1378 |
if (unlikely(dma_debug_disabled())) |
6bfd44987 dma-debug: add ch... |
1379 1380 1381 1382 |
return; if (unlikely(virt == NULL)) return; |
af1da6868 dma-debug: fix me... |
1383 1384 |
/* handle vmalloc and linear addresses */ if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) |
6bfd44987 dma-debug: add ch... |
1385 |
return; |
af1da6868 dma-debug: fix me... |
1386 1387 |
entry = dma_entry_alloc(); if (!entry) |
3aaabbf1c lib/dma-debug.c: ... |
1388 |
return; |
6bfd44987 dma-debug: add ch... |
1389 1390 |
entry->type = dma_debug_coherent; entry->dev = dev; |
e57d05520 dma-debug: use of... |
1391 |
entry->offset = offset_in_page(virt); |
6bfd44987 dma-debug: add ch... |
1392 1393 1394 |
entry->size = size; entry->dev_addr = dma_addr; entry->direction = DMA_BIDIRECTIONAL; |
3aaabbf1c lib/dma-debug.c: ... |
1395 1396 1397 1398 |
if (is_vmalloc_addr(virt)) entry->pfn = vmalloc_to_pfn(virt); else entry->pfn = page_to_pfn(virt_to_page(virt)); |
6bfd44987 dma-debug: add ch... |
1399 1400 |
add_dma_entry(entry); } |
6bfd44987 dma-debug: add ch... |
1401 1402 1403 1404 1405 1406 1407 |
void debug_dma_free_coherent(struct device *dev, size_t size, void *virt, dma_addr_t addr) { struct dma_debug_entry ref = { .type = dma_debug_coherent, .dev = dev, |
e57d05520 dma-debug: use of... |
1408 |
.offset = offset_in_page(virt), |
6bfd44987 dma-debug: add ch... |
1409 1410 1411 1412 |
.dev_addr = addr, .size = size, .direction = DMA_BIDIRECTIONAL, }; |
3aaabbf1c lib/dma-debug.c: ... |
1413 |
/* handle vmalloc and linear addresses */ |
af1da6868 dma-debug: fix me... |
1414 |
if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) |
3aaabbf1c lib/dma-debug.c: ... |
1415 1416 1417 1418 1419 1420 |
return; if (is_vmalloc_addr(virt)) ref.pfn = vmalloc_to_pfn(virt); else ref.pfn = page_to_pfn(virt_to_page(virt)); |
01ce18b31 dma-debug: introd... |
1421 |
if (unlikely(dma_debug_disabled())) |
6bfd44987 dma-debug: add ch... |
1422 1423 1424 1425 |
return; check_unmap(&ref); } |
6bfd44987 dma-debug: add ch... |
1426 |
|
0e74b34df dma-debug: add su... |
1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 |
void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, int direction, dma_addr_t dma_addr) { struct dma_debug_entry *entry; if (unlikely(dma_debug_disabled())) return; entry = dma_entry_alloc(); if (!entry) return; entry->type = dma_debug_resource; entry->dev = dev; |
2e0cc304e dma-debug: fix ia... |
1441 |
entry->pfn = PHYS_PFN(addr); |
0e74b34df dma-debug: add su... |
1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 |
entry->offset = offset_in_page(addr); entry->size = size; entry->dev_addr = dma_addr; entry->direction = direction; entry->map_err_type = MAP_ERR_NOT_CHECKED; add_dma_entry(entry); } EXPORT_SYMBOL(debug_dma_map_resource); void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, size_t size, int direction) { struct dma_debug_entry ref = { .type = dma_debug_resource, .dev = dev, .dev_addr = dma_addr, .size = size, .direction = direction, }; if (unlikely(dma_debug_disabled())) return; check_unmap(&ref); } EXPORT_SYMBOL(debug_dma_unmap_resource); |
b9d2317e0 dma-debug: add ch... |
1469 1470 1471 |
void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, int direction) { |
aa010efb7 dma-debug: be mor... |
1472 |
struct dma_debug_entry ref; |
01ce18b31 dma-debug: introd... |
1473 |
if (unlikely(dma_debug_disabled())) |
b9d2317e0 dma-debug: add ch... |
1474 |
return; |
aa010efb7 dma-debug: be mor... |
1475 1476 1477 1478 1479 1480 1481 1482 |
ref.type = dma_debug_single; ref.dev = dev; ref.dev_addr = dma_handle; ref.size = size; ref.direction = direction; ref.sg_call_ents = 0; check_sync(dev, &ref, true); |
b9d2317e0 dma-debug: add ch... |
1483 1484 1485 1486 1487 1488 1489 |
} EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); void debug_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, int direction) { |
aa010efb7 dma-debug: be mor... |
1490 |
struct dma_debug_entry ref; |
01ce18b31 dma-debug: introd... |
1491 |
if (unlikely(dma_debug_disabled())) |
b9d2317e0 dma-debug: add ch... |
1492 |
return; |
aa010efb7 dma-debug: be mor... |
1493 1494 1495 1496 1497 1498 1499 1500 |
ref.type = dma_debug_single; ref.dev = dev; ref.dev_addr = dma_handle; ref.size = size; ref.direction = direction; ref.sg_call_ents = 0; check_sync(dev, &ref, false); |
b9d2317e0 dma-debug: add ch... |
1501 1502 |
} EXPORT_SYMBOL(debug_dma_sync_single_for_device); |
a31fba5d6 dma-debug: add ch... |
1503 1504 1505 1506 |
void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, int direction) { struct scatterlist *s; |
88f3907f6 dma-debug: fix de... |
1507 |
int mapped_ents = 0, i; |
a31fba5d6 dma-debug: add ch... |
1508 |
|
01ce18b31 dma-debug: introd... |
1509 |
if (unlikely(dma_debug_disabled())) |
a31fba5d6 dma-debug: add ch... |
1510 1511 1512 |
return; for_each_sg(sg, s, nelems, i) { |
aa010efb7 dma-debug: be mor... |
1513 1514 1515 1516 |
struct dma_debug_entry ref = { .type = dma_debug_sg, .dev = dev, |
0abdd7a81 dma-debug: introd... |
1517 1518 |
.pfn = page_to_pfn(sg_page(s)), .offset = s->offset, |
aa010efb7 dma-debug: be mor... |
1519 1520 1521 1522 1523 |
.dev_addr = sg_dma_address(s), .size = sg_dma_len(s), .direction = direction, .sg_call_ents = nelems, }; |
88f3907f6 dma-debug: fix de... |
1524 |
if (!i) |
aa010efb7 dma-debug: be mor... |
1525 |
mapped_ents = get_nr_mapped_entries(dev, &ref); |
88f3907f6 dma-debug: fix de... |
1526 1527 1528 |
if (i >= mapped_ents) break; |
aa010efb7 dma-debug: be mor... |
1529 |
check_sync(dev, &ref, true); |
a31fba5d6 dma-debug: add ch... |
1530 1531 1532 1533 1534 1535 1536 1537 |
} } EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, int direction) { struct scatterlist *s; |
88f3907f6 dma-debug: fix de... |
1538 |
int mapped_ents = 0, i; |
a31fba5d6 dma-debug: add ch... |
1539 |
|
01ce18b31 dma-debug: introd... |
1540 |
if (unlikely(dma_debug_disabled())) |
a31fba5d6 dma-debug: add ch... |
1541 1542 1543 |
return; for_each_sg(sg, s, nelems, i) { |
aa010efb7 dma-debug: be mor... |
1544 1545 1546 1547 |
struct dma_debug_entry ref = { .type = dma_debug_sg, .dev = dev, |
0abdd7a81 dma-debug: introd... |
1548 1549 |
.pfn = page_to_pfn(sg_page(s)), .offset = s->offset, |
aa010efb7 dma-debug: be mor... |
1550 1551 1552 1553 1554 |
.dev_addr = sg_dma_address(s), .size = sg_dma_len(s), .direction = direction, .sg_call_ents = nelems, }; |
88f3907f6 dma-debug: fix de... |
1555 |
if (!i) |
aa010efb7 dma-debug: be mor... |
1556 |
mapped_ents = get_nr_mapped_entries(dev, &ref); |
88f3907f6 dma-debug: fix de... |
1557 1558 1559 |
if (i >= mapped_ents) break; |
aa010efb7 dma-debug: be mor... |
1560 |
check_sync(dev, &ref, false); |
a31fba5d6 dma-debug: add ch... |
1561 1562 1563 |
} } EXPORT_SYMBOL(debug_dma_sync_sg_for_device); |
1745de5e5 dma-debug: add dm... |
1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 |
static int __init dma_debug_driver_setup(char *str) { int i; for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) { current_driver_name[i] = *str; if (*str == 0) break; } if (current_driver_name[0]) |
f737b095c dma-debug: Use pr... |
1575 1576 |
pr_info("enable driver filter for driver [%s] ", |
e7ed70eed dma-debug: use pr... |
1577 |
current_driver_name); |
1745de5e5 dma-debug: add dm... |
1578 1579 1580 1581 1582 |
return 1; } __setup("dma_debug_driver=", dma_debug_driver_setup); |