Commit a31fba5d68cebf8f5fefd03e079dab94875e25f5

Authored by Joerg Roedel
1 parent 948408ba3e

dma-debug: add checks for sync_single_sg_*

Impact: add debug callbacks for dma_sync_sg_* functions

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>

Showing 2 changed files with 52 additions and 0 deletions Inline Diff

include/linux/dma-debug.h
1 /* 1 /*
2 * Copyright (C) 2008 Advanced Micro Devices, Inc. 2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
3 * 3 *
4 * Author: Joerg Roedel <joerg.roedel@amd.com> 4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation. 8 * by the Free Software Foundation.
9 * 9 *
10 * This program is distributed in the hope that it will be useful, 10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License 15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19 19
20 #ifndef __DMA_DEBUG_H 20 #ifndef __DMA_DEBUG_H
21 #define __DMA_DEBUG_H 21 #define __DMA_DEBUG_H
22 22
23 #include <linux/types.h> 23 #include <linux/types.h>
24 24
25 struct device; 25 struct device;
26 struct scatterlist; 26 struct scatterlist;
27 27
28 #ifdef CONFIG_DMA_API_DEBUG 28 #ifdef CONFIG_DMA_API_DEBUG
29 29
30 extern void dma_debug_init(u32 num_entries); 30 extern void dma_debug_init(u32 num_entries);
31 31
32 extern void debug_dma_map_page(struct device *dev, struct page *page, 32 extern void debug_dma_map_page(struct device *dev, struct page *page,
33 size_t offset, size_t size, 33 size_t offset, size_t size,
34 int direction, dma_addr_t dma_addr, 34 int direction, dma_addr_t dma_addr,
35 bool map_single); 35 bool map_single);
36 36
37 extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, 37 extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
38 size_t size, int direction, bool map_single); 38 size_t size, int direction, bool map_single);
39 39
40 extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, 40 extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
41 int nents, int mapped_ents, int direction); 41 int nents, int mapped_ents, int direction);
42 42
43 extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, 43 extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
44 int nelems, int dir); 44 int nelems, int dir);
45 45
46 extern void debug_dma_alloc_coherent(struct device *dev, size_t size, 46 extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
47 dma_addr_t dma_addr, void *virt); 47 dma_addr_t dma_addr, void *virt);
48 48
49 extern void debug_dma_free_coherent(struct device *dev, size_t size, 49 extern void debug_dma_free_coherent(struct device *dev, size_t size,
50 void *virt, dma_addr_t addr); 50 void *virt, dma_addr_t addr);
51 51
52 extern void debug_dma_sync_single_for_cpu(struct device *dev, 52 extern void debug_dma_sync_single_for_cpu(struct device *dev,
53 dma_addr_t dma_handle, size_t size, 53 dma_addr_t dma_handle, size_t size,
54 int direction); 54 int direction);
55 55
56 extern void debug_dma_sync_single_for_device(struct device *dev, 56 extern void debug_dma_sync_single_for_device(struct device *dev,
57 dma_addr_t dma_handle, 57 dma_addr_t dma_handle,
58 size_t size, int direction); 58 size_t size, int direction);
59 59
60 extern void debug_dma_sync_single_range_for_cpu(struct device *dev, 60 extern void debug_dma_sync_single_range_for_cpu(struct device *dev,
61 dma_addr_t dma_handle, 61 dma_addr_t dma_handle,
62 unsigned long offset, 62 unsigned long offset,
63 size_t size, 63 size_t size,
64 int direction); 64 int direction);
65 65
66 extern void debug_dma_sync_single_range_for_device(struct device *dev, 66 extern void debug_dma_sync_single_range_for_device(struct device *dev,
67 dma_addr_t dma_handle, 67 dma_addr_t dma_handle,
68 unsigned long offset, 68 unsigned long offset,
69 size_t size, int direction); 69 size_t size, int direction);
70 70
71 extern void debug_dma_sync_sg_for_cpu(struct device *dev,
72 struct scatterlist *sg,
73 int nelems, int direction);
74
75 extern void debug_dma_sync_sg_for_device(struct device *dev,
76 struct scatterlist *sg,
77 int nelems, int direction);
78
71 #else /* CONFIG_DMA_API_DEBUG */ 79 #else /* CONFIG_DMA_API_DEBUG */
72 80
73 static inline void dma_debug_init(u32 num_entries) 81 static inline void dma_debug_init(u32 num_entries)
74 { 82 {
75 } 83 }
76 84
77 static inline void debug_dma_map_page(struct device *dev, struct page *page, 85 static inline void debug_dma_map_page(struct device *dev, struct page *page,
78 size_t offset, size_t size, 86 size_t offset, size_t size,
79 int direction, dma_addr_t dma_addr, 87 int direction, dma_addr_t dma_addr,
80 bool map_single) 88 bool map_single)
81 { 89 {
82 } 90 }
83 91
84 static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, 92 static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
85 size_t size, int direction, 93 size_t size, int direction,
86 bool map_single) 94 bool map_single)
87 { 95 {
88 } 96 }
89 97
90 static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, 98 static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
91 int nents, int mapped_ents, int direction) 99 int nents, int mapped_ents, int direction)
92 { 100 {
93 } 101 }
94 102
95 static inline void debug_dma_unmap_sg(struct device *dev, 103 static inline void debug_dma_unmap_sg(struct device *dev,
96 struct scatterlist *sglist, 104 struct scatterlist *sglist,
97 int nelems, int dir) 105 int nelems, int dir)
98 { 106 {
99 } 107 }
100 108
101 static inline void debug_dma_alloc_coherent(struct device *dev, size_t size, 109 static inline void debug_dma_alloc_coherent(struct device *dev, size_t size,
102 dma_addr_t dma_addr, void *virt) 110 dma_addr_t dma_addr, void *virt)
103 { 111 {
104 } 112 }
105 113
106 static inline void debug_dma_free_coherent(struct device *dev, size_t size, 114 static inline void debug_dma_free_coherent(struct device *dev, size_t size,
107 void *virt, dma_addr_t addr) 115 void *virt, dma_addr_t addr)
108 { 116 {
109 } 117 }
110 118
111 static inline void debug_dma_sync_single_for_cpu(struct device *dev, 119 static inline void debug_dma_sync_single_for_cpu(struct device *dev,
112 dma_addr_t dma_handle, 120 dma_addr_t dma_handle,
113 size_t size, int direction) 121 size_t size, int direction)
114 { 122 {
115 } 123 }
116 124
117 static inline void debug_dma_sync_single_for_device(struct device *dev, 125 static inline void debug_dma_sync_single_for_device(struct device *dev,
118 dma_addr_t dma_handle, 126 dma_addr_t dma_handle,
119 size_t size, int direction) 127 size_t size, int direction)
120 { 128 {
121 } 129 }
122 130
123 static inline void debug_dma_sync_single_range_for_cpu(struct device *dev, 131 static inline void debug_dma_sync_single_range_for_cpu(struct device *dev,
124 dma_addr_t dma_handle, 132 dma_addr_t dma_handle,
125 unsigned long offset, 133 unsigned long offset,
126 size_t size, 134 size_t size,
127 int direction) 135 int direction)
128 { 136 {
129 } 137 }
130 138
131 static inline void debug_dma_sync_single_range_for_device(struct device *dev, 139 static inline void debug_dma_sync_single_range_for_device(struct device *dev,
132 dma_addr_t dma_handle, 140 dma_addr_t dma_handle,
133 unsigned long offset, 141 unsigned long offset,
134 size_t size, 142 size_t size,
135 int direction) 143 int direction)
144 {
145 }
146
147 static inline void debug_dma_sync_sg_for_cpu(struct device *dev,
148 struct scatterlist *sg,
149 int nelems, int direction)
150 {
151 }
152
153 static inline void debug_dma_sync_sg_for_device(struct device *dev,
154 struct scatterlist *sg,
155 int nelems, int direction)
136 { 156 {
137 } 157 }
138 158
139 #endif /* CONFIG_DMA_API_DEBUG */ 159 #endif /* CONFIG_DMA_API_DEBUG */
140 160
141 #endif /* __DMA_DEBUG_H */ 161 #endif /* __DMA_DEBUG_H */
142 162
1 /* 1 /*
2 * Copyright (C) 2008 Advanced Micro Devices, Inc. 2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
3 * 3 *
4 * Author: Joerg Roedel <joerg.roedel@amd.com> 4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation. 8 * by the Free Software Foundation.
9 * 9 *
10 * This program is distributed in the hope that it will be useful, 10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License 15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19 19
20 #include <linux/scatterlist.h> 20 #include <linux/scatterlist.h>
21 #include <linux/dma-mapping.h> 21 #include <linux/dma-mapping.h>
22 #include <linux/dma-debug.h> 22 #include <linux/dma-debug.h>
23 #include <linux/spinlock.h> 23 #include <linux/spinlock.h>
24 #include <linux/debugfs.h> 24 #include <linux/debugfs.h>
25 #include <linux/device.h> 25 #include <linux/device.h>
26 #include <linux/types.h> 26 #include <linux/types.h>
27 #include <linux/sched.h> 27 #include <linux/sched.h>
28 #include <linux/list.h> 28 #include <linux/list.h>
29 #include <linux/slab.h> 29 #include <linux/slab.h>
30 30
31 #define HASH_SIZE 1024ULL 31 #define HASH_SIZE 1024ULL
32 #define HASH_FN_SHIFT 13 32 #define HASH_FN_SHIFT 13
33 #define HASH_FN_MASK (HASH_SIZE - 1) 33 #define HASH_FN_MASK (HASH_SIZE - 1)
34 34
35 enum { 35 enum {
36 dma_debug_single, 36 dma_debug_single,
37 dma_debug_page, 37 dma_debug_page,
38 dma_debug_sg, 38 dma_debug_sg,
39 dma_debug_coherent, 39 dma_debug_coherent,
40 }; 40 };
41 41
42 struct dma_debug_entry { 42 struct dma_debug_entry {
43 struct list_head list; 43 struct list_head list;
44 struct device *dev; 44 struct device *dev;
45 int type; 45 int type;
46 phys_addr_t paddr; 46 phys_addr_t paddr;
47 u64 dev_addr; 47 u64 dev_addr;
48 u64 size; 48 u64 size;
49 int direction; 49 int direction;
50 int sg_call_ents; 50 int sg_call_ents;
51 int sg_mapped_ents; 51 int sg_mapped_ents;
52 }; 52 };
53 53
54 struct hash_bucket { 54 struct hash_bucket {
55 struct list_head list; 55 struct list_head list;
56 spinlock_t lock; 56 spinlock_t lock;
57 } ____cacheline_aligned_in_smp; 57 } ____cacheline_aligned_in_smp;
58 58
59 /* Hash list to save the allocated dma addresses */ 59 /* Hash list to save the allocated dma addresses */
60 static struct hash_bucket dma_entry_hash[HASH_SIZE]; 60 static struct hash_bucket dma_entry_hash[HASH_SIZE];
61 /* List of pre-allocated dma_debug_entry's */ 61 /* List of pre-allocated dma_debug_entry's */
62 static LIST_HEAD(free_entries); 62 static LIST_HEAD(free_entries);
63 /* Lock for the list above */ 63 /* Lock for the list above */
64 static DEFINE_SPINLOCK(free_entries_lock); 64 static DEFINE_SPINLOCK(free_entries_lock);
65 65
66 /* Global disable flag - will be set in case of an error */ 66 /* Global disable flag - will be set in case of an error */
67 static bool global_disable __read_mostly; 67 static bool global_disable __read_mostly;
68 68
69 /* Global error count */ 69 /* Global error count */
70 static u32 error_count; 70 static u32 error_count;
71 71
72 /* Global error show enable*/ 72 /* Global error show enable*/
73 static u32 show_all_errors __read_mostly; 73 static u32 show_all_errors __read_mostly;
74 /* Number of errors to show */ 74 /* Number of errors to show */
75 static u32 show_num_errors = 1; 75 static u32 show_num_errors = 1;
76 76
77 static u32 num_free_entries; 77 static u32 num_free_entries;
78 static u32 min_free_entries; 78 static u32 min_free_entries;
79 79
80 /* number of preallocated entries requested by kernel cmdline */ 80 /* number of preallocated entries requested by kernel cmdline */
81 static u32 req_entries; 81 static u32 req_entries;
82 82
83 /* debugfs dentry's for the stuff above */ 83 /* debugfs dentry's for the stuff above */
84 static struct dentry *dma_debug_dent __read_mostly; 84 static struct dentry *dma_debug_dent __read_mostly;
85 static struct dentry *global_disable_dent __read_mostly; 85 static struct dentry *global_disable_dent __read_mostly;
86 static struct dentry *error_count_dent __read_mostly; 86 static struct dentry *error_count_dent __read_mostly;
87 static struct dentry *show_all_errors_dent __read_mostly; 87 static struct dentry *show_all_errors_dent __read_mostly;
88 static struct dentry *show_num_errors_dent __read_mostly; 88 static struct dentry *show_num_errors_dent __read_mostly;
89 static struct dentry *num_free_entries_dent __read_mostly; 89 static struct dentry *num_free_entries_dent __read_mostly;
90 static struct dentry *min_free_entries_dent __read_mostly; 90 static struct dentry *min_free_entries_dent __read_mostly;
91 91
92 static const char *type2name[4] = { "single", "page", 92 static const char *type2name[4] = { "single", "page",
93 "scather-gather", "coherent" }; 93 "scather-gather", "coherent" };
94 94
95 static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", 95 static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
96 "DMA_FROM_DEVICE", "DMA_NONE" }; 96 "DMA_FROM_DEVICE", "DMA_NONE" };
97 97
98 /* 98 /*
99 * The access to some variables in this macro is racy. We can't use atomic_t 99 * The access to some variables in this macro is racy. We can't use atomic_t
100 * here because all these variables are exported to debugfs. Some of them even 100 * here because all these variables are exported to debugfs. Some of them even
101 * writeable. This is also the reason why a lock won't help much. But anyway, 101 * writeable. This is also the reason why a lock won't help much. But anyway,
102 * the races are no big deal. Here is why: 102 * the races are no big deal. Here is why:
103 * 103 *
104 * error_count: the addition is racy, but the worst thing that can happen is 104 * error_count: the addition is racy, but the worst thing that can happen is
105 * that we don't count some errors 105 * that we don't count some errors
106 * show_num_errors: the subtraction is racy. Also no big deal because in 106 * show_num_errors: the subtraction is racy. Also no big deal because in
107 * worst case this will result in one warning more in the 107 * worst case this will result in one warning more in the
108 * system log than the user configured. This variable is 108 * system log than the user configured. This variable is
109 * writeable via debugfs. 109 * writeable via debugfs.
110 */ 110 */
111 #define err_printk(dev, format, arg...) do { \ 111 #define err_printk(dev, format, arg...) do { \
112 error_count += 1; \ 112 error_count += 1; \
113 if (show_all_errors || show_num_errors > 0) { \ 113 if (show_all_errors || show_num_errors > 0) { \
114 WARN(1, "%s %s: " format, \ 114 WARN(1, "%s %s: " format, \
115 dev_driver_string(dev), \ 115 dev_driver_string(dev), \
116 dev_name(dev) , ## arg); \ 116 dev_name(dev) , ## arg); \
117 } \ 117 } \
118 if (!show_all_errors && show_num_errors > 0) \ 118 if (!show_all_errors && show_num_errors > 0) \
119 show_num_errors -= 1; \ 119 show_num_errors -= 1; \
120 } while (0); 120 } while (0);
121 121
122 /* 122 /*
123 * Hash related functions 123 * Hash related functions
124 * 124 *
125 * Every DMA-API request is saved into a struct dma_debug_entry. To 125 * Every DMA-API request is saved into a struct dma_debug_entry. To
126 * have quick access to these structs they are stored into a hash. 126 * have quick access to these structs they are stored into a hash.
127 */ 127 */
128 static int hash_fn(struct dma_debug_entry *entry) 128 static int hash_fn(struct dma_debug_entry *entry)
129 { 129 {
130 /* 130 /*
131 * Hash function is based on the dma address. 131 * Hash function is based on the dma address.
132 * We use bits 20-27 here as the index into the hash 132 * We use bits 20-27 here as the index into the hash
133 */ 133 */
134 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; 134 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
135 } 135 }
136 136
137 /* 137 /*
138 * Request exclusive access to a hash bucket for a given dma_debug_entry. 138 * Request exclusive access to a hash bucket for a given dma_debug_entry.
139 */ 139 */
140 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, 140 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
141 unsigned long *flags) 141 unsigned long *flags)
142 { 142 {
143 int idx = hash_fn(entry); 143 int idx = hash_fn(entry);
144 unsigned long __flags; 144 unsigned long __flags;
145 145
146 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); 146 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
147 *flags = __flags; 147 *flags = __flags;
148 return &dma_entry_hash[idx]; 148 return &dma_entry_hash[idx];
149 } 149 }
150 150
151 /* 151 /*
152 * Give up exclusive access to the hash bucket 152 * Give up exclusive access to the hash bucket
153 */ 153 */
154 static void put_hash_bucket(struct hash_bucket *bucket, 154 static void put_hash_bucket(struct hash_bucket *bucket,
155 unsigned long *flags) 155 unsigned long *flags)
156 { 156 {
157 unsigned long __flags = *flags; 157 unsigned long __flags = *flags;
158 158
159 spin_unlock_irqrestore(&bucket->lock, __flags); 159 spin_unlock_irqrestore(&bucket->lock, __flags);
160 } 160 }
161 161
162 /* 162 /*
163 * Search a given entry in the hash bucket list 163 * Search a given entry in the hash bucket list
164 */ 164 */
165 static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, 165 static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
166 struct dma_debug_entry *ref) 166 struct dma_debug_entry *ref)
167 { 167 {
168 struct dma_debug_entry *entry; 168 struct dma_debug_entry *entry;
169 169
170 list_for_each_entry(entry, &bucket->list, list) { 170 list_for_each_entry(entry, &bucket->list, list) {
171 if ((entry->dev_addr == ref->dev_addr) && 171 if ((entry->dev_addr == ref->dev_addr) &&
172 (entry->dev == ref->dev)) 172 (entry->dev == ref->dev))
173 return entry; 173 return entry;
174 } 174 }
175 175
176 return NULL; 176 return NULL;
177 } 177 }
178 178
179 /* 179 /*
180 * Add an entry to a hash bucket 180 * Add an entry to a hash bucket
181 */ 181 */
182 static void hash_bucket_add(struct hash_bucket *bucket, 182 static void hash_bucket_add(struct hash_bucket *bucket,
183 struct dma_debug_entry *entry) 183 struct dma_debug_entry *entry)
184 { 184 {
185 list_add_tail(&entry->list, &bucket->list); 185 list_add_tail(&entry->list, &bucket->list);
186 } 186 }
187 187
188 /* 188 /*
189 * Remove entry from a hash bucket list 189 * Remove entry from a hash bucket list
190 */ 190 */
191 static void hash_bucket_del(struct dma_debug_entry *entry) 191 static void hash_bucket_del(struct dma_debug_entry *entry)
192 { 192 {
193 list_del(&entry->list); 193 list_del(&entry->list);
194 } 194 }
195 195
196 /* 196 /*
197 * Wrapper function for adding an entry to the hash. 197 * Wrapper function for adding an entry to the hash.
198 * This function takes care of locking itself. 198 * This function takes care of locking itself.
199 */ 199 */
200 static void add_dma_entry(struct dma_debug_entry *entry) 200 static void add_dma_entry(struct dma_debug_entry *entry)
201 { 201 {
202 struct hash_bucket *bucket; 202 struct hash_bucket *bucket;
203 unsigned long flags; 203 unsigned long flags;
204 204
205 bucket = get_hash_bucket(entry, &flags); 205 bucket = get_hash_bucket(entry, &flags);
206 hash_bucket_add(bucket, entry); 206 hash_bucket_add(bucket, entry);
207 put_hash_bucket(bucket, &flags); 207 put_hash_bucket(bucket, &flags);
208 } 208 }
209 209
210 /* struct dma_entry allocator 210 /* struct dma_entry allocator
211 * 211 *
212 * The next two functions implement the allocator for 212 * The next two functions implement the allocator for
213 * struct dma_debug_entries. 213 * struct dma_debug_entries.
214 */ 214 */
215 static struct dma_debug_entry *dma_entry_alloc(void) 215 static struct dma_debug_entry *dma_entry_alloc(void)
216 { 216 {
217 struct dma_debug_entry *entry = NULL; 217 struct dma_debug_entry *entry = NULL;
218 unsigned long flags; 218 unsigned long flags;
219 219
220 spin_lock_irqsave(&free_entries_lock, flags); 220 spin_lock_irqsave(&free_entries_lock, flags);
221 221
222 if (list_empty(&free_entries)) { 222 if (list_empty(&free_entries)) {
223 printk(KERN_ERR "DMA-API: debugging out of memory " 223 printk(KERN_ERR "DMA-API: debugging out of memory "
224 "- disabling\n"); 224 "- disabling\n");
225 global_disable = true; 225 global_disable = true;
226 goto out; 226 goto out;
227 } 227 }
228 228
229 entry = list_entry(free_entries.next, struct dma_debug_entry, list); 229 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
230 list_del(&entry->list); 230 list_del(&entry->list);
231 memset(entry, 0, sizeof(*entry)); 231 memset(entry, 0, sizeof(*entry));
232 232
233 num_free_entries -= 1; 233 num_free_entries -= 1;
234 if (num_free_entries < min_free_entries) 234 if (num_free_entries < min_free_entries)
235 min_free_entries = num_free_entries; 235 min_free_entries = num_free_entries;
236 236
237 out: 237 out:
238 spin_unlock_irqrestore(&free_entries_lock, flags); 238 spin_unlock_irqrestore(&free_entries_lock, flags);
239 239
240 return entry; 240 return entry;
241 } 241 }
242 242
243 static void dma_entry_free(struct dma_debug_entry *entry) 243 static void dma_entry_free(struct dma_debug_entry *entry)
244 { 244 {
245 unsigned long flags; 245 unsigned long flags;
246 246
247 /* 247 /*
248 * add to beginning of the list - this way the entries are 248 * add to beginning of the list - this way the entries are
249 * more likely cache hot when they are reallocated. 249 * more likely cache hot when they are reallocated.
250 */ 250 */
251 spin_lock_irqsave(&free_entries_lock, flags); 251 spin_lock_irqsave(&free_entries_lock, flags);
252 list_add(&entry->list, &free_entries); 252 list_add(&entry->list, &free_entries);
253 num_free_entries += 1; 253 num_free_entries += 1;
254 spin_unlock_irqrestore(&free_entries_lock, flags); 254 spin_unlock_irqrestore(&free_entries_lock, flags);
255 } 255 }
256 256
257 /* 257 /*
258 * DMA-API debugging init code 258 * DMA-API debugging init code
259 * 259 *
260 * The init code does two things: 260 * The init code does two things:
261 * 1. Initialize core data structures 261 * 1. Initialize core data structures
262 * 2. Preallocate a given number of dma_debug_entry structs 262 * 2. Preallocate a given number of dma_debug_entry structs
263 */ 263 */
264 264
265 static int prealloc_memory(u32 num_entries) 265 static int prealloc_memory(u32 num_entries)
266 { 266 {
267 struct dma_debug_entry *entry, *next_entry; 267 struct dma_debug_entry *entry, *next_entry;
268 int i; 268 int i;
269 269
270 for (i = 0; i < num_entries; ++i) { 270 for (i = 0; i < num_entries; ++i) {
271 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 271 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
272 if (!entry) 272 if (!entry)
273 goto out_err; 273 goto out_err;
274 274
275 list_add_tail(&entry->list, &free_entries); 275 list_add_tail(&entry->list, &free_entries);
276 } 276 }
277 277
278 num_free_entries = num_entries; 278 num_free_entries = num_entries;
279 min_free_entries = num_entries; 279 min_free_entries = num_entries;
280 280
281 printk(KERN_INFO "DMA-API: preallocated %d debug entries\n", 281 printk(KERN_INFO "DMA-API: preallocated %d debug entries\n",
282 num_entries); 282 num_entries);
283 283
284 return 0; 284 return 0;
285 285
286 out_err: 286 out_err:
287 287
288 list_for_each_entry_safe(entry, next_entry, &free_entries, list) { 288 list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
289 list_del(&entry->list); 289 list_del(&entry->list);
290 kfree(entry); 290 kfree(entry);
291 } 291 }
292 292
293 return -ENOMEM; 293 return -ENOMEM;
294 } 294 }
295 295
296 static int dma_debug_fs_init(void) 296 static int dma_debug_fs_init(void)
297 { 297 {
298 dma_debug_dent = debugfs_create_dir("dma-api", NULL); 298 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
299 if (!dma_debug_dent) { 299 if (!dma_debug_dent) {
300 printk(KERN_ERR "DMA-API: can not create debugfs directory\n"); 300 printk(KERN_ERR "DMA-API: can not create debugfs directory\n");
301 return -ENOMEM; 301 return -ENOMEM;
302 } 302 }
303 303
304 global_disable_dent = debugfs_create_bool("disabled", 0444, 304 global_disable_dent = debugfs_create_bool("disabled", 0444,
305 dma_debug_dent, 305 dma_debug_dent,
306 (u32 *)&global_disable); 306 (u32 *)&global_disable);
307 if (!global_disable_dent) 307 if (!global_disable_dent)
308 goto out_err; 308 goto out_err;
309 309
310 error_count_dent = debugfs_create_u32("error_count", 0444, 310 error_count_dent = debugfs_create_u32("error_count", 0444,
311 dma_debug_dent, &error_count); 311 dma_debug_dent, &error_count);
312 if (!error_count_dent) 312 if (!error_count_dent)
313 goto out_err; 313 goto out_err;
314 314
315 show_all_errors_dent = debugfs_create_u32("all_errors", 0644, 315 show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
316 dma_debug_dent, 316 dma_debug_dent,
317 &show_all_errors); 317 &show_all_errors);
318 if (!show_all_errors_dent) 318 if (!show_all_errors_dent)
319 goto out_err; 319 goto out_err;
320 320
321 show_num_errors_dent = debugfs_create_u32("num_errors", 0644, 321 show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
322 dma_debug_dent, 322 dma_debug_dent,
323 &show_num_errors); 323 &show_num_errors);
324 if (!show_num_errors_dent) 324 if (!show_num_errors_dent)
325 goto out_err; 325 goto out_err;
326 326
327 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444, 327 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
328 dma_debug_dent, 328 dma_debug_dent,
329 &num_free_entries); 329 &num_free_entries);
330 if (!num_free_entries_dent) 330 if (!num_free_entries_dent)
331 goto out_err; 331 goto out_err;
332 332
333 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444, 333 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
334 dma_debug_dent, 334 dma_debug_dent,
335 &min_free_entries); 335 &min_free_entries);
336 if (!min_free_entries_dent) 336 if (!min_free_entries_dent)
337 goto out_err; 337 goto out_err;
338 338
339 return 0; 339 return 0;
340 340
341 out_err: 341 out_err:
342 debugfs_remove_recursive(dma_debug_dent); 342 debugfs_remove_recursive(dma_debug_dent);
343 343
344 return -ENOMEM; 344 return -ENOMEM;
345 } 345 }
346 346
347 347
348 /* 348 /*
349 * Let the architectures decide how many entries should be preallocated. 349 * Let the architectures decide how many entries should be preallocated.
350 */ 350 */
351 void dma_debug_init(u32 num_entries) 351 void dma_debug_init(u32 num_entries)
352 { 352 {
353 int i; 353 int i;
354 354
355 if (global_disable) 355 if (global_disable)
356 return; 356 return;
357 357
358 for (i = 0; i < HASH_SIZE; ++i) { 358 for (i = 0; i < HASH_SIZE; ++i) {
359 INIT_LIST_HEAD(&dma_entry_hash[i].list); 359 INIT_LIST_HEAD(&dma_entry_hash[i].list);
360 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED; 360 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED;
361 } 361 }
362 362
363 if (dma_debug_fs_init() != 0) { 363 if (dma_debug_fs_init() != 0) {
364 printk(KERN_ERR "DMA-API: error creating debugfs entries " 364 printk(KERN_ERR "DMA-API: error creating debugfs entries "
365 "- disabling\n"); 365 "- disabling\n");
366 global_disable = true; 366 global_disable = true;
367 367
368 return; 368 return;
369 } 369 }
370 370
371 if (req_entries) 371 if (req_entries)
372 num_entries = req_entries; 372 num_entries = req_entries;
373 373
374 if (prealloc_memory(num_entries) != 0) { 374 if (prealloc_memory(num_entries) != 0) {
375 printk(KERN_ERR "DMA-API: debugging out of memory error " 375 printk(KERN_ERR "DMA-API: debugging out of memory error "
376 "- disabled\n"); 376 "- disabled\n");
377 global_disable = true; 377 global_disable = true;
378 378
379 return; 379 return;
380 } 380 }
381 381
382 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n"); 382 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n");
383 } 383 }
384 384
385 static __init int dma_debug_cmdline(char *str) 385 static __init int dma_debug_cmdline(char *str)
386 { 386 {
387 if (!str) 387 if (!str)
388 return -EINVAL; 388 return -EINVAL;
389 389
390 if (strncmp(str, "off", 3) == 0) { 390 if (strncmp(str, "off", 3) == 0) {
391 printk(KERN_INFO "DMA-API: debugging disabled on kernel " 391 printk(KERN_INFO "DMA-API: debugging disabled on kernel "
392 "command line\n"); 392 "command line\n");
393 global_disable = true; 393 global_disable = true;
394 } 394 }
395 395
396 return 0; 396 return 0;
397 } 397 }
398 398
399 static __init int dma_debug_entries_cmdline(char *str) 399 static __init int dma_debug_entries_cmdline(char *str)
400 { 400 {
401 int res; 401 int res;
402 402
403 if (!str) 403 if (!str)
404 return -EINVAL; 404 return -EINVAL;
405 405
406 res = get_option(&str, &req_entries); 406 res = get_option(&str, &req_entries);
407 407
408 if (!res) 408 if (!res)
409 req_entries = 0; 409 req_entries = 0;
410 410
411 return 0; 411 return 0;
412 } 412 }
413 413
414 __setup("dma_debug=", dma_debug_cmdline); 414 __setup("dma_debug=", dma_debug_cmdline);
415 __setup("dma_debug_entries=", dma_debug_entries_cmdline); 415 __setup("dma_debug_entries=", dma_debug_entries_cmdline);
416 416
417 static void check_unmap(struct dma_debug_entry *ref) 417 static void check_unmap(struct dma_debug_entry *ref)
418 { 418 {
419 struct dma_debug_entry *entry; 419 struct dma_debug_entry *entry;
420 struct hash_bucket *bucket; 420 struct hash_bucket *bucket;
421 unsigned long flags; 421 unsigned long flags;
422 422
423 if (dma_mapping_error(ref->dev, ref->dev_addr)) 423 if (dma_mapping_error(ref->dev, ref->dev_addr))
424 return; 424 return;
425 425
426 bucket = get_hash_bucket(ref, &flags); 426 bucket = get_hash_bucket(ref, &flags);
427 entry = hash_bucket_find(bucket, ref); 427 entry = hash_bucket_find(bucket, ref);
428 428
429 if (!entry) { 429 if (!entry) {
430 err_printk(ref->dev, "DMA-API: device driver tries " 430 err_printk(ref->dev, "DMA-API: device driver tries "
431 "to free DMA memory it has not allocated " 431 "to free DMA memory it has not allocated "
432 "[device address=0x%016llx] [size=%llu bytes]\n", 432 "[device address=0x%016llx] [size=%llu bytes]\n",
433 ref->dev_addr, ref->size); 433 ref->dev_addr, ref->size);
434 goto out; 434 goto out;
435 } 435 }
436 436
437 if (ref->size != entry->size) { 437 if (ref->size != entry->size) {
438 err_printk(ref->dev, "DMA-API: device driver frees " 438 err_printk(ref->dev, "DMA-API: device driver frees "
439 "DMA memory with different size " 439 "DMA memory with different size "
440 "[device address=0x%016llx] [map size=%llu bytes] " 440 "[device address=0x%016llx] [map size=%llu bytes] "
441 "[unmap size=%llu bytes]\n", 441 "[unmap size=%llu bytes]\n",
442 ref->dev_addr, entry->size, ref->size); 442 ref->dev_addr, entry->size, ref->size);
443 } 443 }
444 444
445 if (ref->type != entry->type) { 445 if (ref->type != entry->type) {
446 err_printk(ref->dev, "DMA-API: device driver frees " 446 err_printk(ref->dev, "DMA-API: device driver frees "
447 "DMA memory with wrong function " 447 "DMA memory with wrong function "
448 "[device address=0x%016llx] [size=%llu bytes] " 448 "[device address=0x%016llx] [size=%llu bytes] "
449 "[mapped as %s] [unmapped as %s]\n", 449 "[mapped as %s] [unmapped as %s]\n",
450 ref->dev_addr, ref->size, 450 ref->dev_addr, ref->size,
451 type2name[entry->type], type2name[ref->type]); 451 type2name[entry->type], type2name[ref->type]);
452 } else if ((entry->type == dma_debug_coherent) && 452 } else if ((entry->type == dma_debug_coherent) &&
453 (ref->paddr != entry->paddr)) { 453 (ref->paddr != entry->paddr)) {
454 err_printk(ref->dev, "DMA-API: device driver frees " 454 err_printk(ref->dev, "DMA-API: device driver frees "
455 "DMA memory with different CPU address " 455 "DMA memory with different CPU address "
456 "[device address=0x%016llx] [size=%llu bytes] " 456 "[device address=0x%016llx] [size=%llu bytes] "
457 "[cpu alloc address=%p] [cpu free address=%p]", 457 "[cpu alloc address=%p] [cpu free address=%p]",
458 ref->dev_addr, ref->size, 458 ref->dev_addr, ref->size,
459 (void *)entry->paddr, (void *)ref->paddr); 459 (void *)entry->paddr, (void *)ref->paddr);
460 } 460 }
461 461
462 if (ref->sg_call_ents && ref->type == dma_debug_sg && 462 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
463 ref->sg_call_ents != entry->sg_call_ents) { 463 ref->sg_call_ents != entry->sg_call_ents) {
464 err_printk(ref->dev, "DMA-API: device driver frees " 464 err_printk(ref->dev, "DMA-API: device driver frees "
465 "DMA sg list with different entry count " 465 "DMA sg list with different entry count "
466 "[map count=%d] [unmap count=%d]\n", 466 "[map count=%d] [unmap count=%d]\n",
467 entry->sg_call_ents, ref->sg_call_ents); 467 entry->sg_call_ents, ref->sg_call_ents);
468 } 468 }
469 469
470 /* 470 /*
471 * This may be no bug in reality - but most implementations of the 471 * This may be no bug in reality - but most implementations of the
472 * DMA API don't handle this properly, so check for it here 472 * DMA API don't handle this properly, so check for it here
473 */ 473 */
474 if (ref->direction != entry->direction) { 474 if (ref->direction != entry->direction) {
475 err_printk(ref->dev, "DMA-API: device driver frees " 475 err_printk(ref->dev, "DMA-API: device driver frees "
476 "DMA memory with different direction " 476 "DMA memory with different direction "
477 "[device address=0x%016llx] [size=%llu bytes] " 477 "[device address=0x%016llx] [size=%llu bytes] "
478 "[mapped with %s] [unmapped with %s]\n", 478 "[mapped with %s] [unmapped with %s]\n",
479 ref->dev_addr, ref->size, 479 ref->dev_addr, ref->size,
480 dir2name[entry->direction], 480 dir2name[entry->direction],
481 dir2name[ref->direction]); 481 dir2name[ref->direction]);
482 } 482 }
483 483
484 hash_bucket_del(entry); 484 hash_bucket_del(entry);
485 dma_entry_free(entry); 485 dma_entry_free(entry);
486 486
487 out: 487 out:
488 put_hash_bucket(bucket, &flags); 488 put_hash_bucket(bucket, &flags);
489 } 489 }
490 490
491 static void check_for_stack(struct device *dev, void *addr) 491 static void check_for_stack(struct device *dev, void *addr)
492 { 492 {
493 if (object_is_on_stack(addr)) 493 if (object_is_on_stack(addr))
494 err_printk(dev, "DMA-API: device driver maps memory from stack" 494 err_printk(dev, "DMA-API: device driver maps memory from stack"
495 " [addr=%p]\n", addr); 495 " [addr=%p]\n", addr);
496 } 496 }
497 497
498 static void check_sync(struct device *dev, dma_addr_t addr, 498 static void check_sync(struct device *dev, dma_addr_t addr,
499 u64 size, u64 offset, int direction, bool to_cpu) 499 u64 size, u64 offset, int direction, bool to_cpu)
500 { 500 {
501 struct dma_debug_entry ref = { 501 struct dma_debug_entry ref = {
502 .dev = dev, 502 .dev = dev,
503 .dev_addr = addr, 503 .dev_addr = addr,
504 .size = size, 504 .size = size,
505 .direction = direction, 505 .direction = direction,
506 }; 506 };
507 struct dma_debug_entry *entry; 507 struct dma_debug_entry *entry;
508 struct hash_bucket *bucket; 508 struct hash_bucket *bucket;
509 unsigned long flags; 509 unsigned long flags;
510 510
511 bucket = get_hash_bucket(&ref, &flags); 511 bucket = get_hash_bucket(&ref, &flags);
512 512
513 entry = hash_bucket_find(bucket, &ref); 513 entry = hash_bucket_find(bucket, &ref);
514 514
515 if (!entry) { 515 if (!entry) {
516 err_printk(dev, "DMA-API: device driver tries " 516 err_printk(dev, "DMA-API: device driver tries "
517 "to sync DMA memory it has not allocated " 517 "to sync DMA memory it has not allocated "
518 "[device address=0x%016llx] [size=%llu bytes]\n", 518 "[device address=0x%016llx] [size=%llu bytes]\n",
519 addr, size); 519 addr, size);
520 goto out; 520 goto out;
521 } 521 }
522 522
523 if ((offset + size) > entry->size) { 523 if ((offset + size) > entry->size) {
524 err_printk(dev, "DMA-API: device driver syncs" 524 err_printk(dev, "DMA-API: device driver syncs"
525 " DMA memory outside allocated range " 525 " DMA memory outside allocated range "
526 "[device address=0x%016llx] " 526 "[device address=0x%016llx] "
527 "[allocation size=%llu bytes] [sync offset=%llu] " 527 "[allocation size=%llu bytes] [sync offset=%llu] "
528 "[sync size=%llu]\n", entry->dev_addr, entry->size, 528 "[sync size=%llu]\n", entry->dev_addr, entry->size,
529 offset, size); 529 offset, size);
530 } 530 }
531 531
532 if (direction != entry->direction) { 532 if (direction != entry->direction) {
533 err_printk(dev, "DMA-API: device driver syncs " 533 err_printk(dev, "DMA-API: device driver syncs "
534 "DMA memory with different direction " 534 "DMA memory with different direction "
535 "[device address=0x%016llx] [size=%llu bytes] " 535 "[device address=0x%016llx] [size=%llu bytes] "
536 "[mapped with %s] [synced with %s]\n", 536 "[mapped with %s] [synced with %s]\n",
537 addr, entry->size, 537 addr, entry->size,
538 dir2name[entry->direction], 538 dir2name[entry->direction],
539 dir2name[direction]); 539 dir2name[direction]);
540 } 540 }
541 541
542 if (entry->direction == DMA_BIDIRECTIONAL) 542 if (entry->direction == DMA_BIDIRECTIONAL)
543 goto out; 543 goto out;
544 544
545 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && 545 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
546 !(direction == DMA_TO_DEVICE)) 546 !(direction == DMA_TO_DEVICE))
547 err_printk(dev, "DMA-API: device driver syncs " 547 err_printk(dev, "DMA-API: device driver syncs "
548 "device read-only DMA memory for cpu " 548 "device read-only DMA memory for cpu "
549 "[device address=0x%016llx] [size=%llu bytes] " 549 "[device address=0x%016llx] [size=%llu bytes] "
550 "[mapped with %s] [synced with %s]\n", 550 "[mapped with %s] [synced with %s]\n",
551 addr, entry->size, 551 addr, entry->size,
552 dir2name[entry->direction], 552 dir2name[entry->direction],
553 dir2name[direction]); 553 dir2name[direction]);
554 554
555 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && 555 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
556 !(direction == DMA_FROM_DEVICE)) 556 !(direction == DMA_FROM_DEVICE))
557 err_printk(dev, "DMA-API: device driver syncs " 557 err_printk(dev, "DMA-API: device driver syncs "
558 "device write-only DMA memory to device " 558 "device write-only DMA memory to device "
559 "[device address=0x%016llx] [size=%llu bytes] " 559 "[device address=0x%016llx] [size=%llu bytes] "
560 "[mapped with %s] [synced with %s]\n", 560 "[mapped with %s] [synced with %s]\n",
561 addr, entry->size, 561 addr, entry->size,
562 dir2name[entry->direction], 562 dir2name[entry->direction],
563 dir2name[direction]); 563 dir2name[direction]);
564 564
565 out: 565 out:
566 put_hash_bucket(bucket, &flags); 566 put_hash_bucket(bucket, &flags);
567 567
568 } 568 }
569 569
570 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, 570 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
571 size_t size, int direction, dma_addr_t dma_addr, 571 size_t size, int direction, dma_addr_t dma_addr,
572 bool map_single) 572 bool map_single)
573 { 573 {
574 struct dma_debug_entry *entry; 574 struct dma_debug_entry *entry;
575 575
576 if (unlikely(global_disable)) 576 if (unlikely(global_disable))
577 return; 577 return;
578 578
579 if (unlikely(dma_mapping_error(dev, dma_addr))) 579 if (unlikely(dma_mapping_error(dev, dma_addr)))
580 return; 580 return;
581 581
582 entry = dma_entry_alloc(); 582 entry = dma_entry_alloc();
583 if (!entry) 583 if (!entry)
584 return; 584 return;
585 585
586 entry->dev = dev; 586 entry->dev = dev;
587 entry->type = dma_debug_page; 587 entry->type = dma_debug_page;
588 entry->paddr = page_to_phys(page) + offset; 588 entry->paddr = page_to_phys(page) + offset;
589 entry->dev_addr = dma_addr; 589 entry->dev_addr = dma_addr;
590 entry->size = size; 590 entry->size = size;
591 entry->direction = direction; 591 entry->direction = direction;
592 592
593 if (map_single) { 593 if (map_single) {
594 entry->type = dma_debug_single; 594 entry->type = dma_debug_single;
595 check_for_stack(dev, page_address(page) + offset); 595 check_for_stack(dev, page_address(page) + offset);
596 } 596 }
597 597
598 add_dma_entry(entry); 598 add_dma_entry(entry);
599 } 599 }
600 EXPORT_SYMBOL(debug_dma_map_page); 600 EXPORT_SYMBOL(debug_dma_map_page);
601 601
602 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, 602 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
603 size_t size, int direction, bool map_single) 603 size_t size, int direction, bool map_single)
604 { 604 {
605 struct dma_debug_entry ref = { 605 struct dma_debug_entry ref = {
606 .type = dma_debug_page, 606 .type = dma_debug_page,
607 .dev = dev, 607 .dev = dev,
608 .dev_addr = addr, 608 .dev_addr = addr,
609 .size = size, 609 .size = size,
610 .direction = direction, 610 .direction = direction,
611 }; 611 };
612 612
613 if (unlikely(global_disable)) 613 if (unlikely(global_disable))
614 return; 614 return;
615 615
616 if (map_single) 616 if (map_single)
617 ref.type = dma_debug_single; 617 ref.type = dma_debug_single;
618 618
619 check_unmap(&ref); 619 check_unmap(&ref);
620 } 620 }
621 EXPORT_SYMBOL(debug_dma_unmap_page); 621 EXPORT_SYMBOL(debug_dma_unmap_page);
622 622
623 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, 623 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
624 int nents, int mapped_ents, int direction) 624 int nents, int mapped_ents, int direction)
625 { 625 {
626 struct dma_debug_entry *entry; 626 struct dma_debug_entry *entry;
627 struct scatterlist *s; 627 struct scatterlist *s;
628 int i; 628 int i;
629 629
630 if (unlikely(global_disable)) 630 if (unlikely(global_disable))
631 return; 631 return;
632 632
633 for_each_sg(sg, s, mapped_ents, i) { 633 for_each_sg(sg, s, mapped_ents, i) {
634 entry = dma_entry_alloc(); 634 entry = dma_entry_alloc();
635 if (!entry) 635 if (!entry)
636 return; 636 return;
637 637
638 entry->type = dma_debug_sg; 638 entry->type = dma_debug_sg;
639 entry->dev = dev; 639 entry->dev = dev;
640 entry->paddr = sg_phys(s); 640 entry->paddr = sg_phys(s);
641 entry->size = s->length; 641 entry->size = s->length;
642 entry->dev_addr = s->dma_address; 642 entry->dev_addr = s->dma_address;
643 entry->direction = direction; 643 entry->direction = direction;
644 entry->sg_call_ents = nents; 644 entry->sg_call_ents = nents;
645 entry->sg_mapped_ents = mapped_ents; 645 entry->sg_mapped_ents = mapped_ents;
646 646
647 check_for_stack(dev, sg_virt(s)); 647 check_for_stack(dev, sg_virt(s));
648 648
649 add_dma_entry(entry); 649 add_dma_entry(entry);
650 } 650 }
651 } 651 }
652 EXPORT_SYMBOL(debug_dma_map_sg); 652 EXPORT_SYMBOL(debug_dma_map_sg);
653 653
654 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, 654 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
655 int nelems, int dir) 655 int nelems, int dir)
656 { 656 {
657 struct dma_debug_entry *entry; 657 struct dma_debug_entry *entry;
658 struct scatterlist *s; 658 struct scatterlist *s;
659 int mapped_ents = 0, i; 659 int mapped_ents = 0, i;
660 unsigned long flags; 660 unsigned long flags;
661 661
662 if (unlikely(global_disable)) 662 if (unlikely(global_disable))
663 return; 663 return;
664 664
665 for_each_sg(sglist, s, nelems, i) { 665 for_each_sg(sglist, s, nelems, i) {
666 666
667 struct dma_debug_entry ref = { 667 struct dma_debug_entry ref = {
668 .type = dma_debug_sg, 668 .type = dma_debug_sg,
669 .dev = dev, 669 .dev = dev,
670 .paddr = sg_phys(s), 670 .paddr = sg_phys(s),
671 .dev_addr = s->dma_address, 671 .dev_addr = s->dma_address,
672 .size = s->length, 672 .size = s->length,
673 .direction = dir, 673 .direction = dir,
674 .sg_call_ents = 0, 674 .sg_call_ents = 0,
675 }; 675 };
676 676
677 if (mapped_ents && i >= mapped_ents) 677 if (mapped_ents && i >= mapped_ents)
678 break; 678 break;
679 679
680 if (mapped_ents == 0) { 680 if (mapped_ents == 0) {
681 struct hash_bucket *bucket; 681 struct hash_bucket *bucket;
682 ref.sg_call_ents = nelems; 682 ref.sg_call_ents = nelems;
683 bucket = get_hash_bucket(&ref, &flags); 683 bucket = get_hash_bucket(&ref, &flags);
684 entry = hash_bucket_find(bucket, &ref); 684 entry = hash_bucket_find(bucket, &ref);
685 if (entry) 685 if (entry)
686 mapped_ents = entry->sg_mapped_ents; 686 mapped_ents = entry->sg_mapped_ents;
687 put_hash_bucket(bucket, &flags); 687 put_hash_bucket(bucket, &flags);
688 } 688 }
689 689
690 check_unmap(&ref); 690 check_unmap(&ref);
691 } 691 }
692 } 692 }
693 EXPORT_SYMBOL(debug_dma_unmap_sg); 693 EXPORT_SYMBOL(debug_dma_unmap_sg);
694 694
695 void debug_dma_alloc_coherent(struct device *dev, size_t size, 695 void debug_dma_alloc_coherent(struct device *dev, size_t size,
696 dma_addr_t dma_addr, void *virt) 696 dma_addr_t dma_addr, void *virt)
697 { 697 {
698 struct dma_debug_entry *entry; 698 struct dma_debug_entry *entry;
699 699
700 if (unlikely(global_disable)) 700 if (unlikely(global_disable))
701 return; 701 return;
702 702
703 if (unlikely(virt == NULL)) 703 if (unlikely(virt == NULL))
704 return; 704 return;
705 705
706 entry = dma_entry_alloc(); 706 entry = dma_entry_alloc();
707 if (!entry) 707 if (!entry)
708 return; 708 return;
709 709
710 entry->type = dma_debug_coherent; 710 entry->type = dma_debug_coherent;
711 entry->dev = dev; 711 entry->dev = dev;
712 entry->paddr = virt_to_phys(virt); 712 entry->paddr = virt_to_phys(virt);
713 entry->size = size; 713 entry->size = size;
714 entry->dev_addr = dma_addr; 714 entry->dev_addr = dma_addr;
715 entry->direction = DMA_BIDIRECTIONAL; 715 entry->direction = DMA_BIDIRECTIONAL;
716 716
717 add_dma_entry(entry); 717 add_dma_entry(entry);
718 } 718 }
719 EXPORT_SYMBOL(debug_dma_alloc_coherent); 719 EXPORT_SYMBOL(debug_dma_alloc_coherent);
720 720
721 void debug_dma_free_coherent(struct device *dev, size_t size, 721 void debug_dma_free_coherent(struct device *dev, size_t size,
722 void *virt, dma_addr_t addr) 722 void *virt, dma_addr_t addr)
723 { 723 {
724 struct dma_debug_entry ref = { 724 struct dma_debug_entry ref = {
725 .type = dma_debug_coherent, 725 .type = dma_debug_coherent,
726 .dev = dev, 726 .dev = dev,
727 .paddr = virt_to_phys(virt), 727 .paddr = virt_to_phys(virt),
728 .dev_addr = addr, 728 .dev_addr = addr,
729 .size = size, 729 .size = size,
730 .direction = DMA_BIDIRECTIONAL, 730 .direction = DMA_BIDIRECTIONAL,
731 }; 731 };
732 732
733 if (unlikely(global_disable)) 733 if (unlikely(global_disable))
734 return; 734 return;
735 735
736 check_unmap(&ref); 736 check_unmap(&ref);
737 } 737 }
738 EXPORT_SYMBOL(debug_dma_free_coherent); 738 EXPORT_SYMBOL(debug_dma_free_coherent);
739 739
740 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 740 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
741 size_t size, int direction) 741 size_t size, int direction)
742 { 742 {
743 if (unlikely(global_disable)) 743 if (unlikely(global_disable))
744 return; 744 return;
745 745
746 check_sync(dev, dma_handle, size, 0, direction, true); 746 check_sync(dev, dma_handle, size, 0, direction, true);
747 } 747 }
748 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); 748 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
749 749
750 void debug_dma_sync_single_for_device(struct device *dev, 750 void debug_dma_sync_single_for_device(struct device *dev,
751 dma_addr_t dma_handle, size_t size, 751 dma_addr_t dma_handle, size_t size,
752 int direction) 752 int direction)
753 { 753 {
754 if (unlikely(global_disable)) 754 if (unlikely(global_disable))
755 return; 755 return;
756 756
757 check_sync(dev, dma_handle, size, 0, direction, false); 757 check_sync(dev, dma_handle, size, 0, direction, false);
758 } 758 }
759 EXPORT_SYMBOL(debug_dma_sync_single_for_device); 759 EXPORT_SYMBOL(debug_dma_sync_single_for_device);
760 760
761 void debug_dma_sync_single_range_for_cpu(struct device *dev, 761 void debug_dma_sync_single_range_for_cpu(struct device *dev,
762 dma_addr_t dma_handle, 762 dma_addr_t dma_handle,
763 unsigned long offset, size_t size, 763 unsigned long offset, size_t size,
764 int direction) 764 int direction)
765 { 765 {
766 if (unlikely(global_disable)) 766 if (unlikely(global_disable))
767 return; 767 return;
768 768
769 check_sync(dev, dma_handle, size, offset, direction, true); 769 check_sync(dev, dma_handle, size, offset, direction, true);
770 } 770 }
771 EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); 771 EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
772 772
773 void debug_dma_sync_single_range_for_device(struct device *dev, 773 void debug_dma_sync_single_range_for_device(struct device *dev,
774 dma_addr_t dma_handle, 774 dma_addr_t dma_handle,
775 unsigned long offset, 775 unsigned long offset,
776 size_t size, int direction) 776 size_t size, int direction)
777 { 777 {
778 if (unlikely(global_disable)) 778 if (unlikely(global_disable))
779 return; 779 return;
780 780
781 check_sync(dev, dma_handle, size, offset, direction, false); 781 check_sync(dev, dma_handle, size, offset, direction, false);
782 } 782 }
783 EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); 783 EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
784
785 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
786 int nelems, int direction)
787 {
788 struct scatterlist *s;
789 int i;
790
791 if (unlikely(global_disable))
792 return;
793
794 for_each_sg(sg, s, nelems, i) {
795 check_sync(dev, s->dma_address, s->dma_length, 0,
796 direction, true);
797 }
798 }
799 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
800
801 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
802 int nelems, int direction)
803 {
804 struct scatterlist *s;
805 int i;
806
807 if (unlikely(global_disable))
808 return;
809
810 for_each_sg(sg, s, nelems, i) {
811 check_sync(dev, s->dma_address, s->dma_length, 0,
812 direction, false);
813 }
814 }
815 EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
784 816
785 817