Commit ea05c8444e451f1cfbf78c68733e717ad7b8602b

Authored by Dima Zavin
Committed by Linus Torvalds
1 parent d65bfacb04

mm: add a might_sleep_if() to dma_pool_alloc()

Buggy drivers (e.g.  fsl_udc) could call dma_pool_alloc from atomic
context with GFP_KERNEL.  In most instances, the first pool_alloc_page
call would succeed and the sleeping functions would never be called.  This
allowed the buggy drivers to slip through the cracks.

Add a might_sleep_if() checking for __GFP_WAIT in flags.

Signed-off-by: Dima Zavin <dima@android.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 2 additions and 0 deletions Inline Diff

1 /* 1 /*
2 * DMA Pool allocator 2 * DMA Pool allocator
3 * 3 *
4 * Copyright 2001 David Brownell 4 * Copyright 2001 David Brownell
5 * Copyright 2007 Intel Corporation 5 * Copyright 2007 Intel Corporation
6 * Author: Matthew Wilcox <willy@linux.intel.com> 6 * Author: Matthew Wilcox <willy@linux.intel.com>
7 * 7 *
8 * This software may be redistributed and/or modified under the terms of 8 * This software may be redistributed and/or modified under the terms of
9 * the GNU General Public License ("GPL") version 2 as published by the 9 * the GNU General Public License ("GPL") version 2 as published by the
10 * Free Software Foundation. 10 * Free Software Foundation.
11 * 11 *
12 * This allocator returns small blocks of a given size which are DMA-able by 12 * This allocator returns small blocks of a given size which are DMA-able by
13 * the given device. It uses the dma_alloc_coherent page allocator to get 13 * the given device. It uses the dma_alloc_coherent page allocator to get
14 * new pages, then splits them up into blocks of the required size. 14 * new pages, then splits them up into blocks of the required size.
15 * Many older drivers still have their own code to do this. 15 * Many older drivers still have their own code to do this.
16 * 16 *
17 * The current design of this allocator is fairly simple. The pool is 17 * The current design of this allocator is fairly simple. The pool is
18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of 18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19 * allocated pages. Each page in the page_list is split into blocks of at 19 * allocated pages. Each page in the page_list is split into blocks of at
20 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked 20 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
21 * list of free blocks within the page. Used blocks aren't tracked, but we 21 * list of free blocks within the page. Used blocks aren't tracked, but we
22 * keep a count of how many are currently allocated from each page. 22 * keep a count of how many are currently allocated from each page.
23 */ 23 */
24 24
25 #include <linux/device.h> 25 #include <linux/device.h>
26 #include <linux/dma-mapping.h> 26 #include <linux/dma-mapping.h>
27 #include <linux/dmapool.h> 27 #include <linux/dmapool.h>
28 #include <linux/kernel.h> 28 #include <linux/kernel.h>
29 #include <linux/list.h> 29 #include <linux/list.h>
30 #include <linux/module.h> 30 #include <linux/module.h>
31 #include <linux/mutex.h> 31 #include <linux/mutex.h>
32 #include <linux/poison.h> 32 #include <linux/poison.h>
33 #include <linux/sched.h> 33 #include <linux/sched.h>
34 #include <linux/slab.h> 34 #include <linux/slab.h>
35 #include <linux/spinlock.h> 35 #include <linux/spinlock.h>
36 #include <linux/string.h> 36 #include <linux/string.h>
37 #include <linux/types.h> 37 #include <linux/types.h>
38 #include <linux/wait.h> 38 #include <linux/wait.h>
39 39
40 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) 40 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
41 #define DMAPOOL_DEBUG 1 41 #define DMAPOOL_DEBUG 1
42 #endif 42 #endif
43 43
44 struct dma_pool { /* the pool */ 44 struct dma_pool { /* the pool */
45 struct list_head page_list; 45 struct list_head page_list;
46 spinlock_t lock; 46 spinlock_t lock;
47 size_t size; 47 size_t size;
48 struct device *dev; 48 struct device *dev;
49 size_t allocation; 49 size_t allocation;
50 size_t boundary; 50 size_t boundary;
51 char name[32]; 51 char name[32];
52 wait_queue_head_t waitq; 52 wait_queue_head_t waitq;
53 struct list_head pools; 53 struct list_head pools;
54 }; 54 };
55 55
56 struct dma_page { /* cacheable header for 'allocation' bytes */ 56 struct dma_page { /* cacheable header for 'allocation' bytes */
57 struct list_head page_list; 57 struct list_head page_list;
58 void *vaddr; 58 void *vaddr;
59 dma_addr_t dma; 59 dma_addr_t dma;
60 unsigned int in_use; 60 unsigned int in_use;
61 unsigned int offset; 61 unsigned int offset;
62 }; 62 };
63 63
64 #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) 64 #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
65 65
66 static DEFINE_MUTEX(pools_lock); 66 static DEFINE_MUTEX(pools_lock);
67 67
68 static ssize_t 68 static ssize_t
69 show_pools(struct device *dev, struct device_attribute *attr, char *buf) 69 show_pools(struct device *dev, struct device_attribute *attr, char *buf)
70 { 70 {
71 unsigned temp; 71 unsigned temp;
72 unsigned size; 72 unsigned size;
73 char *next; 73 char *next;
74 struct dma_page *page; 74 struct dma_page *page;
75 struct dma_pool *pool; 75 struct dma_pool *pool;
76 76
77 next = buf; 77 next = buf;
78 size = PAGE_SIZE; 78 size = PAGE_SIZE;
79 79
80 temp = scnprintf(next, size, "poolinfo - 0.1\n"); 80 temp = scnprintf(next, size, "poolinfo - 0.1\n");
81 size -= temp; 81 size -= temp;
82 next += temp; 82 next += temp;
83 83
84 mutex_lock(&pools_lock); 84 mutex_lock(&pools_lock);
85 list_for_each_entry(pool, &dev->dma_pools, pools) { 85 list_for_each_entry(pool, &dev->dma_pools, pools) {
86 unsigned pages = 0; 86 unsigned pages = 0;
87 unsigned blocks = 0; 87 unsigned blocks = 0;
88 88
89 spin_lock_irq(&pool->lock); 89 spin_lock_irq(&pool->lock);
90 list_for_each_entry(page, &pool->page_list, page_list) { 90 list_for_each_entry(page, &pool->page_list, page_list) {
91 pages++; 91 pages++;
92 blocks += page->in_use; 92 blocks += page->in_use;
93 } 93 }
94 spin_unlock_irq(&pool->lock); 94 spin_unlock_irq(&pool->lock);
95 95
96 /* per-pool info, no real statistics yet */ 96 /* per-pool info, no real statistics yet */
97 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", 97 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
98 pool->name, blocks, 98 pool->name, blocks,
99 pages * (pool->allocation / pool->size), 99 pages * (pool->allocation / pool->size),
100 pool->size, pages); 100 pool->size, pages);
101 size -= temp; 101 size -= temp;
102 next += temp; 102 next += temp;
103 } 103 }
104 mutex_unlock(&pools_lock); 104 mutex_unlock(&pools_lock);
105 105
106 return PAGE_SIZE - size; 106 return PAGE_SIZE - size;
107 } 107 }
108 108
109 static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL); 109 static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
110 110
111 /** 111 /**
112 * dma_pool_create - Creates a pool of consistent memory blocks, for dma. 112 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
113 * @name: name of pool, for diagnostics 113 * @name: name of pool, for diagnostics
114 * @dev: device that will be doing the DMA 114 * @dev: device that will be doing the DMA
115 * @size: size of the blocks in this pool. 115 * @size: size of the blocks in this pool.
116 * @align: alignment requirement for blocks; must be a power of two 116 * @align: alignment requirement for blocks; must be a power of two
117 * @boundary: returned blocks won't cross this power of two boundary 117 * @boundary: returned blocks won't cross this power of two boundary
118 * Context: !in_interrupt() 118 * Context: !in_interrupt()
119 * 119 *
120 * Returns a dma allocation pool with the requested characteristics, or 120 * Returns a dma allocation pool with the requested characteristics, or
121 * null if one can't be created. Given one of these pools, dma_pool_alloc() 121 * null if one can't be created. Given one of these pools, dma_pool_alloc()
122 * may be used to allocate memory. Such memory will all have "consistent" 122 * may be used to allocate memory. Such memory will all have "consistent"
123 * DMA mappings, accessible by the device and its driver without using 123 * DMA mappings, accessible by the device and its driver without using
124 * cache flushing primitives. The actual size of blocks allocated may be 124 * cache flushing primitives. The actual size of blocks allocated may be
125 * larger than requested because of alignment. 125 * larger than requested because of alignment.
126 * 126 *
127 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't 127 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
128 * cross that size boundary. This is useful for devices which have 128 * cross that size boundary. This is useful for devices which have
129 * addressing restrictions on individual DMA transfers, such as not crossing 129 * addressing restrictions on individual DMA transfers, such as not crossing
130 * boundaries of 4KBytes. 130 * boundaries of 4KBytes.
131 */ 131 */
132 struct dma_pool *dma_pool_create(const char *name, struct device *dev, 132 struct dma_pool *dma_pool_create(const char *name, struct device *dev,
133 size_t size, size_t align, size_t boundary) 133 size_t size, size_t align, size_t boundary)
134 { 134 {
135 struct dma_pool *retval; 135 struct dma_pool *retval;
136 size_t allocation; 136 size_t allocation;
137 137
138 if (align == 0) { 138 if (align == 0) {
139 align = 1; 139 align = 1;
140 } else if (align & (align - 1)) { 140 } else if (align & (align - 1)) {
141 return NULL; 141 return NULL;
142 } 142 }
143 143
144 if (size == 0) { 144 if (size == 0) {
145 return NULL; 145 return NULL;
146 } else if (size < 4) { 146 } else if (size < 4) {
147 size = 4; 147 size = 4;
148 } 148 }
149 149
150 if ((size % align) != 0) 150 if ((size % align) != 0)
151 size = ALIGN(size, align); 151 size = ALIGN(size, align);
152 152
153 allocation = max_t(size_t, size, PAGE_SIZE); 153 allocation = max_t(size_t, size, PAGE_SIZE);
154 154
155 if (!boundary) { 155 if (!boundary) {
156 boundary = allocation; 156 boundary = allocation;
157 } else if ((boundary < size) || (boundary & (boundary - 1))) { 157 } else if ((boundary < size) || (boundary & (boundary - 1))) {
158 return NULL; 158 return NULL;
159 } 159 }
160 160
161 retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev)); 161 retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
162 if (!retval) 162 if (!retval)
163 return retval; 163 return retval;
164 164
165 strlcpy(retval->name, name, sizeof(retval->name)); 165 strlcpy(retval->name, name, sizeof(retval->name));
166 166
167 retval->dev = dev; 167 retval->dev = dev;
168 168
169 INIT_LIST_HEAD(&retval->page_list); 169 INIT_LIST_HEAD(&retval->page_list);
170 spin_lock_init(&retval->lock); 170 spin_lock_init(&retval->lock);
171 retval->size = size; 171 retval->size = size;
172 retval->boundary = boundary; 172 retval->boundary = boundary;
173 retval->allocation = allocation; 173 retval->allocation = allocation;
174 init_waitqueue_head(&retval->waitq); 174 init_waitqueue_head(&retval->waitq);
175 175
176 if (dev) { 176 if (dev) {
177 int ret; 177 int ret;
178 178
179 mutex_lock(&pools_lock); 179 mutex_lock(&pools_lock);
180 if (list_empty(&dev->dma_pools)) 180 if (list_empty(&dev->dma_pools))
181 ret = device_create_file(dev, &dev_attr_pools); 181 ret = device_create_file(dev, &dev_attr_pools);
182 else 182 else
183 ret = 0; 183 ret = 0;
184 /* note: not currently insisting "name" be unique */ 184 /* note: not currently insisting "name" be unique */
185 if (!ret) 185 if (!ret)
186 list_add(&retval->pools, &dev->dma_pools); 186 list_add(&retval->pools, &dev->dma_pools);
187 else { 187 else {
188 kfree(retval); 188 kfree(retval);
189 retval = NULL; 189 retval = NULL;
190 } 190 }
191 mutex_unlock(&pools_lock); 191 mutex_unlock(&pools_lock);
192 } else 192 } else
193 INIT_LIST_HEAD(&retval->pools); 193 INIT_LIST_HEAD(&retval->pools);
194 194
195 return retval; 195 return retval;
196 } 196 }
197 EXPORT_SYMBOL(dma_pool_create); 197 EXPORT_SYMBOL(dma_pool_create);
198 198
199 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) 199 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
200 { 200 {
201 unsigned int offset = 0; 201 unsigned int offset = 0;
202 unsigned int next_boundary = pool->boundary; 202 unsigned int next_boundary = pool->boundary;
203 203
204 do { 204 do {
205 unsigned int next = offset + pool->size; 205 unsigned int next = offset + pool->size;
206 if (unlikely((next + pool->size) >= next_boundary)) { 206 if (unlikely((next + pool->size) >= next_boundary)) {
207 next = next_boundary; 207 next = next_boundary;
208 next_boundary += pool->boundary; 208 next_boundary += pool->boundary;
209 } 209 }
210 *(int *)(page->vaddr + offset) = next; 210 *(int *)(page->vaddr + offset) = next;
211 offset = next; 211 offset = next;
212 } while (offset < pool->allocation); 212 } while (offset < pool->allocation);
213 } 213 }
214 214
215 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) 215 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
216 { 216 {
217 struct dma_page *page; 217 struct dma_page *page;
218 218
219 page = kmalloc(sizeof(*page), mem_flags); 219 page = kmalloc(sizeof(*page), mem_flags);
220 if (!page) 220 if (!page)
221 return NULL; 221 return NULL;
222 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, 222 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
223 &page->dma, mem_flags); 223 &page->dma, mem_flags);
224 if (page->vaddr) { 224 if (page->vaddr) {
225 #ifdef DMAPOOL_DEBUG 225 #ifdef DMAPOOL_DEBUG
226 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); 226 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
227 #endif 227 #endif
228 pool_initialise_page(pool, page); 228 pool_initialise_page(pool, page);
229 list_add(&page->page_list, &pool->page_list); 229 list_add(&page->page_list, &pool->page_list);
230 page->in_use = 0; 230 page->in_use = 0;
231 page->offset = 0; 231 page->offset = 0;
232 } else { 232 } else {
233 kfree(page); 233 kfree(page);
234 page = NULL; 234 page = NULL;
235 } 235 }
236 return page; 236 return page;
237 } 237 }
238 238
239 static inline int is_page_busy(struct dma_page *page) 239 static inline int is_page_busy(struct dma_page *page)
240 { 240 {
241 return page->in_use != 0; 241 return page->in_use != 0;
242 } 242 }
243 243
244 static void pool_free_page(struct dma_pool *pool, struct dma_page *page) 244 static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
245 { 245 {
246 dma_addr_t dma = page->dma; 246 dma_addr_t dma = page->dma;
247 247
248 #ifdef DMAPOOL_DEBUG 248 #ifdef DMAPOOL_DEBUG
249 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); 249 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
250 #endif 250 #endif
251 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); 251 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
252 list_del(&page->page_list); 252 list_del(&page->page_list);
253 kfree(page); 253 kfree(page);
254 } 254 }
255 255
256 /** 256 /**
257 * dma_pool_destroy - destroys a pool of dma memory blocks. 257 * dma_pool_destroy - destroys a pool of dma memory blocks.
258 * @pool: dma pool that will be destroyed 258 * @pool: dma pool that will be destroyed
259 * Context: !in_interrupt() 259 * Context: !in_interrupt()
260 * 260 *
261 * Caller guarantees that no more memory from the pool is in use, 261 * Caller guarantees that no more memory from the pool is in use,
262 * and that nothing will try to use the pool after this call. 262 * and that nothing will try to use the pool after this call.
263 */ 263 */
264 void dma_pool_destroy(struct dma_pool *pool) 264 void dma_pool_destroy(struct dma_pool *pool)
265 { 265 {
266 mutex_lock(&pools_lock); 266 mutex_lock(&pools_lock);
267 list_del(&pool->pools); 267 list_del(&pool->pools);
268 if (pool->dev && list_empty(&pool->dev->dma_pools)) 268 if (pool->dev && list_empty(&pool->dev->dma_pools))
269 device_remove_file(pool->dev, &dev_attr_pools); 269 device_remove_file(pool->dev, &dev_attr_pools);
270 mutex_unlock(&pools_lock); 270 mutex_unlock(&pools_lock);
271 271
272 while (!list_empty(&pool->page_list)) { 272 while (!list_empty(&pool->page_list)) {
273 struct dma_page *page; 273 struct dma_page *page;
274 page = list_entry(pool->page_list.next, 274 page = list_entry(pool->page_list.next,
275 struct dma_page, page_list); 275 struct dma_page, page_list);
276 if (is_page_busy(page)) { 276 if (is_page_busy(page)) {
277 if (pool->dev) 277 if (pool->dev)
278 dev_err(pool->dev, 278 dev_err(pool->dev,
279 "dma_pool_destroy %s, %p busy\n", 279 "dma_pool_destroy %s, %p busy\n",
280 pool->name, page->vaddr); 280 pool->name, page->vaddr);
281 else 281 else
282 printk(KERN_ERR 282 printk(KERN_ERR
283 "dma_pool_destroy %s, %p busy\n", 283 "dma_pool_destroy %s, %p busy\n",
284 pool->name, page->vaddr); 284 pool->name, page->vaddr);
285 /* leak the still-in-use consistent memory */ 285 /* leak the still-in-use consistent memory */
286 list_del(&page->page_list); 286 list_del(&page->page_list);
287 kfree(page); 287 kfree(page);
288 } else 288 } else
289 pool_free_page(pool, page); 289 pool_free_page(pool, page);
290 } 290 }
291 291
292 kfree(pool); 292 kfree(pool);
293 } 293 }
294 EXPORT_SYMBOL(dma_pool_destroy); 294 EXPORT_SYMBOL(dma_pool_destroy);
295 295
296 /** 296 /**
297 * dma_pool_alloc - get a block of consistent memory 297 * dma_pool_alloc - get a block of consistent memory
298 * @pool: dma pool that will produce the block 298 * @pool: dma pool that will produce the block
299 * @mem_flags: GFP_* bitmask 299 * @mem_flags: GFP_* bitmask
300 * @handle: pointer to dma address of block 300 * @handle: pointer to dma address of block
301 * 301 *
302 * This returns the kernel virtual address of a currently unused block, 302 * This returns the kernel virtual address of a currently unused block,
303 * and reports its dma address through the handle. 303 * and reports its dma address through the handle.
304 * If such a memory block can't be allocated, %NULL is returned. 304 * If such a memory block can't be allocated, %NULL is returned.
305 */ 305 */
306 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 306 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
307 dma_addr_t *handle) 307 dma_addr_t *handle)
308 { 308 {
309 unsigned long flags; 309 unsigned long flags;
310 struct dma_page *page; 310 struct dma_page *page;
311 size_t offset; 311 size_t offset;
312 void *retval; 312 void *retval;
313 313
314 might_sleep_if(mem_flags & __GFP_WAIT);
315
314 spin_lock_irqsave(&pool->lock, flags); 316 spin_lock_irqsave(&pool->lock, flags);
315 restart: 317 restart:
316 list_for_each_entry(page, &pool->page_list, page_list) { 318 list_for_each_entry(page, &pool->page_list, page_list) {
317 if (page->offset < pool->allocation) 319 if (page->offset < pool->allocation)
318 goto ready; 320 goto ready;
319 } 321 }
320 page = pool_alloc_page(pool, GFP_ATOMIC); 322 page = pool_alloc_page(pool, GFP_ATOMIC);
321 if (!page) { 323 if (!page) {
322 if (mem_flags & __GFP_WAIT) { 324 if (mem_flags & __GFP_WAIT) {
323 DECLARE_WAITQUEUE(wait, current); 325 DECLARE_WAITQUEUE(wait, current);
324 326
325 __set_current_state(TASK_INTERRUPTIBLE); 327 __set_current_state(TASK_INTERRUPTIBLE);
326 __add_wait_queue(&pool->waitq, &wait); 328 __add_wait_queue(&pool->waitq, &wait);
327 spin_unlock_irqrestore(&pool->lock, flags); 329 spin_unlock_irqrestore(&pool->lock, flags);
328 330
329 schedule_timeout(POOL_TIMEOUT_JIFFIES); 331 schedule_timeout(POOL_TIMEOUT_JIFFIES);
330 332
331 spin_lock_irqsave(&pool->lock, flags); 333 spin_lock_irqsave(&pool->lock, flags);
332 __remove_wait_queue(&pool->waitq, &wait); 334 __remove_wait_queue(&pool->waitq, &wait);
333 goto restart; 335 goto restart;
334 } 336 }
335 retval = NULL; 337 retval = NULL;
336 goto done; 338 goto done;
337 } 339 }
338 340
339 ready: 341 ready:
340 page->in_use++; 342 page->in_use++;
341 offset = page->offset; 343 offset = page->offset;
342 page->offset = *(int *)(page->vaddr + offset); 344 page->offset = *(int *)(page->vaddr + offset);
343 retval = offset + page->vaddr; 345 retval = offset + page->vaddr;
344 *handle = offset + page->dma; 346 *handle = offset + page->dma;
345 #ifdef DMAPOOL_DEBUG 347 #ifdef DMAPOOL_DEBUG
346 memset(retval, POOL_POISON_ALLOCATED, pool->size); 348 memset(retval, POOL_POISON_ALLOCATED, pool->size);
347 #endif 349 #endif
348 done: 350 done:
349 spin_unlock_irqrestore(&pool->lock, flags); 351 spin_unlock_irqrestore(&pool->lock, flags);
350 return retval; 352 return retval;
351 } 353 }
352 EXPORT_SYMBOL(dma_pool_alloc); 354 EXPORT_SYMBOL(dma_pool_alloc);
353 355
354 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) 356 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
355 { 357 {
356 unsigned long flags; 358 unsigned long flags;
357 struct dma_page *page; 359 struct dma_page *page;
358 360
359 spin_lock_irqsave(&pool->lock, flags); 361 spin_lock_irqsave(&pool->lock, flags);
360 list_for_each_entry(page, &pool->page_list, page_list) { 362 list_for_each_entry(page, &pool->page_list, page_list) {
361 if (dma < page->dma) 363 if (dma < page->dma)
362 continue; 364 continue;
363 if (dma < (page->dma + pool->allocation)) 365 if (dma < (page->dma + pool->allocation))
364 goto done; 366 goto done;
365 } 367 }
366 page = NULL; 368 page = NULL;
367 done: 369 done:
368 spin_unlock_irqrestore(&pool->lock, flags); 370 spin_unlock_irqrestore(&pool->lock, flags);
369 return page; 371 return page;
370 } 372 }
371 373
372 /** 374 /**
373 * dma_pool_free - put block back into dma pool 375 * dma_pool_free - put block back into dma pool
374 * @pool: the dma pool holding the block 376 * @pool: the dma pool holding the block
375 * @vaddr: virtual address of block 377 * @vaddr: virtual address of block
376 * @dma: dma address of block 378 * @dma: dma address of block
377 * 379 *
378 * Caller promises neither device nor driver will again touch this block 380 * Caller promises neither device nor driver will again touch this block
379 * unless it is first re-allocated. 381 * unless it is first re-allocated.
380 */ 382 */
381 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) 383 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
382 { 384 {
383 struct dma_page *page; 385 struct dma_page *page;
384 unsigned long flags; 386 unsigned long flags;
385 unsigned int offset; 387 unsigned int offset;
386 388
387 page = pool_find_page(pool, dma); 389 page = pool_find_page(pool, dma);
388 if (!page) { 390 if (!page) {
389 if (pool->dev) 391 if (pool->dev)
390 dev_err(pool->dev, 392 dev_err(pool->dev,
391 "dma_pool_free %s, %p/%lx (bad dma)\n", 393 "dma_pool_free %s, %p/%lx (bad dma)\n",
392 pool->name, vaddr, (unsigned long)dma); 394 pool->name, vaddr, (unsigned long)dma);
393 else 395 else
394 printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", 396 printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
395 pool->name, vaddr, (unsigned long)dma); 397 pool->name, vaddr, (unsigned long)dma);
396 return; 398 return;
397 } 399 }
398 400
399 offset = vaddr - page->vaddr; 401 offset = vaddr - page->vaddr;
400 #ifdef DMAPOOL_DEBUG 402 #ifdef DMAPOOL_DEBUG
401 if ((dma - page->dma) != offset) { 403 if ((dma - page->dma) != offset) {
402 if (pool->dev) 404 if (pool->dev)
403 dev_err(pool->dev, 405 dev_err(pool->dev,
404 "dma_pool_free %s, %p (bad vaddr)/%Lx\n", 406 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
405 pool->name, vaddr, (unsigned long long)dma); 407 pool->name, vaddr, (unsigned long long)dma);
406 else 408 else
407 printk(KERN_ERR 409 printk(KERN_ERR
408 "dma_pool_free %s, %p (bad vaddr)/%Lx\n", 410 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
409 pool->name, vaddr, (unsigned long long)dma); 411 pool->name, vaddr, (unsigned long long)dma);
410 return; 412 return;
411 } 413 }
412 { 414 {
413 unsigned int chain = page->offset; 415 unsigned int chain = page->offset;
414 while (chain < pool->allocation) { 416 while (chain < pool->allocation) {
415 if (chain != offset) { 417 if (chain != offset) {
416 chain = *(int *)(page->vaddr + chain); 418 chain = *(int *)(page->vaddr + chain);
417 continue; 419 continue;
418 } 420 }
419 if (pool->dev) 421 if (pool->dev)
420 dev_err(pool->dev, "dma_pool_free %s, dma %Lx " 422 dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
421 "already free\n", pool->name, 423 "already free\n", pool->name,
422 (unsigned long long)dma); 424 (unsigned long long)dma);
423 else 425 else
424 printk(KERN_ERR "dma_pool_free %s, dma %Lx " 426 printk(KERN_ERR "dma_pool_free %s, dma %Lx "
425 "already free\n", pool->name, 427 "already free\n", pool->name,
426 (unsigned long long)dma); 428 (unsigned long long)dma);
427 return; 429 return;
428 } 430 }
429 } 431 }
430 memset(vaddr, POOL_POISON_FREED, pool->size); 432 memset(vaddr, POOL_POISON_FREED, pool->size);
431 #endif 433 #endif
432 434
433 spin_lock_irqsave(&pool->lock, flags); 435 spin_lock_irqsave(&pool->lock, flags);
434 page->in_use--; 436 page->in_use--;
435 *(int *)vaddr = page->offset; 437 *(int *)vaddr = page->offset;
436 page->offset = offset; 438 page->offset = offset;
437 if (waitqueue_active(&pool->waitq)) 439 if (waitqueue_active(&pool->waitq))
438 wake_up_locked(&pool->waitq); 440 wake_up_locked(&pool->waitq);
439 /* 441 /*
440 * Resist a temptation to do 442 * Resist a temptation to do
441 * if (!is_page_busy(page)) pool_free_page(pool, page); 443 * if (!is_page_busy(page)) pool_free_page(pool, page);
442 * Better have a few empty pages hang around. 444 * Better have a few empty pages hang around.
443 */ 445 */
444 spin_unlock_irqrestore(&pool->lock, flags); 446 spin_unlock_irqrestore(&pool->lock, flags);
445 } 447 }
446 EXPORT_SYMBOL(dma_pool_free); 448 EXPORT_SYMBOL(dma_pool_free);
447 449
448 /* 450 /*
449 * Managed DMA pool 451 * Managed DMA pool
450 */ 452 */
451 static void dmam_pool_release(struct device *dev, void *res) 453 static void dmam_pool_release(struct device *dev, void *res)
452 { 454 {
453 struct dma_pool *pool = *(struct dma_pool **)res; 455 struct dma_pool *pool = *(struct dma_pool **)res;
454 456
455 dma_pool_destroy(pool); 457 dma_pool_destroy(pool);
456 } 458 }
457 459
458 static int dmam_pool_match(struct device *dev, void *res, void *match_data) 460 static int dmam_pool_match(struct device *dev, void *res, void *match_data)
459 { 461 {
460 return *(struct dma_pool **)res == match_data; 462 return *(struct dma_pool **)res == match_data;
461 } 463 }
462 464
463 /** 465 /**
464 * dmam_pool_create - Managed dma_pool_create() 466 * dmam_pool_create - Managed dma_pool_create()
465 * @name: name of pool, for diagnostics 467 * @name: name of pool, for diagnostics
466 * @dev: device that will be doing the DMA 468 * @dev: device that will be doing the DMA
467 * @size: size of the blocks in this pool. 469 * @size: size of the blocks in this pool.
468 * @align: alignment requirement for blocks; must be a power of two 470 * @align: alignment requirement for blocks; must be a power of two
469 * @allocation: returned blocks won't cross this boundary (or zero) 471 * @allocation: returned blocks won't cross this boundary (or zero)
470 * 472 *
471 * Managed dma_pool_create(). DMA pool created with this function is 473 * Managed dma_pool_create(). DMA pool created with this function is
472 * automatically destroyed on driver detach. 474 * automatically destroyed on driver detach.
473 */ 475 */
474 struct dma_pool *dmam_pool_create(const char *name, struct device *dev, 476 struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
475 size_t size, size_t align, size_t allocation) 477 size_t size, size_t align, size_t allocation)
476 { 478 {
477 struct dma_pool **ptr, *pool; 479 struct dma_pool **ptr, *pool;
478 480
479 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); 481 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
480 if (!ptr) 482 if (!ptr)
481 return NULL; 483 return NULL;
482 484
483 pool = *ptr = dma_pool_create(name, dev, size, align, allocation); 485 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
484 if (pool) 486 if (pool)
485 devres_add(dev, ptr); 487 devres_add(dev, ptr);
486 else 488 else
487 devres_free(ptr); 489 devres_free(ptr);
488 490
489 return pool; 491 return pool;
490 } 492 }
491 EXPORT_SYMBOL(dmam_pool_create); 493 EXPORT_SYMBOL(dmam_pool_create);
492 494
493 /** 495 /**
494 * dmam_pool_destroy - Managed dma_pool_destroy() 496 * dmam_pool_destroy - Managed dma_pool_destroy()
495 * @pool: dma pool that will be destroyed 497 * @pool: dma pool that will be destroyed
496 * 498 *
497 * Managed dma_pool_destroy(). 499 * Managed dma_pool_destroy().
498 */ 500 */
499 void dmam_pool_destroy(struct dma_pool *pool) 501 void dmam_pool_destroy(struct dma_pool *pool)
500 { 502 {
501 struct device *dev = pool->dev; 503 struct device *dev = pool->dev;
502 504
503 dma_pool_destroy(pool); 505 dma_pool_destroy(pool);
504 WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool)); 506 WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
505 } 507 }
506 EXPORT_SYMBOL(dmam_pool_destroy); 508 EXPORT_SYMBOL(dmam_pool_destroy);
507 509