Commit 84bc227d7fde049a568cd58a5610613feedc0dff

Authored by Rolf Eike Beer
Committed by Linus Torvalds
1 parent 43506fad21

mm/dmapool.c: take lock only once in dma_pool_free()

dma_pool_free() scans for the page to free in the pool list holding the
pool lock.  Then it releases the lock basically to acquire it immediately
again.  Modify the code to only take the lock once.

This will do some additional loops and computations with the lock held in
if memory debugging is activated.  If it is not activated the only new
operations with this lock is one if and one substraction.

Signed-off-by: Rolf Eike Beer <eike-kernel@sf-tec.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 6 additions and 8 deletions Inline Diff

1 /* 1 /*
2 * DMA Pool allocator 2 * DMA Pool allocator
3 * 3 *
4 * Copyright 2001 David Brownell 4 * Copyright 2001 David Brownell
5 * Copyright 2007 Intel Corporation 5 * Copyright 2007 Intel Corporation
6 * Author: Matthew Wilcox <willy@linux.intel.com> 6 * Author: Matthew Wilcox <willy@linux.intel.com>
7 * 7 *
8 * This software may be redistributed and/or modified under the terms of 8 * This software may be redistributed and/or modified under the terms of
9 * the GNU General Public License ("GPL") version 2 as published by the 9 * the GNU General Public License ("GPL") version 2 as published by the
10 * Free Software Foundation. 10 * Free Software Foundation.
11 * 11 *
12 * This allocator returns small blocks of a given size which are DMA-able by 12 * This allocator returns small blocks of a given size which are DMA-able by
13 * the given device. It uses the dma_alloc_coherent page allocator to get 13 * the given device. It uses the dma_alloc_coherent page allocator to get
14 * new pages, then splits them up into blocks of the required size. 14 * new pages, then splits them up into blocks of the required size.
15 * Many older drivers still have their own code to do this. 15 * Many older drivers still have their own code to do this.
16 * 16 *
17 * The current design of this allocator is fairly simple. The pool is 17 * The current design of this allocator is fairly simple. The pool is
18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of 18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19 * allocated pages. Each page in the page_list is split into blocks of at 19 * allocated pages. Each page in the page_list is split into blocks of at
20 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked 20 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
21 * list of free blocks within the page. Used blocks aren't tracked, but we 21 * list of free blocks within the page. Used blocks aren't tracked, but we
22 * keep a count of how many are currently allocated from each page. 22 * keep a count of how many are currently allocated from each page.
23 */ 23 */
24 24
25 #include <linux/device.h> 25 #include <linux/device.h>
26 #include <linux/dma-mapping.h> 26 #include <linux/dma-mapping.h>
27 #include <linux/dmapool.h> 27 #include <linux/dmapool.h>
28 #include <linux/kernel.h> 28 #include <linux/kernel.h>
29 #include <linux/list.h> 29 #include <linux/list.h>
30 #include <linux/module.h> 30 #include <linux/module.h>
31 #include <linux/mutex.h> 31 #include <linux/mutex.h>
32 #include <linux/poison.h> 32 #include <linux/poison.h>
33 #include <linux/sched.h> 33 #include <linux/sched.h>
34 #include <linux/slab.h> 34 #include <linux/slab.h>
35 #include <linux/spinlock.h> 35 #include <linux/spinlock.h>
36 #include <linux/string.h> 36 #include <linux/string.h>
37 #include <linux/types.h> 37 #include <linux/types.h>
38 #include <linux/wait.h> 38 #include <linux/wait.h>
39 39
40 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) 40 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
41 #define DMAPOOL_DEBUG 1 41 #define DMAPOOL_DEBUG 1
42 #endif 42 #endif
43 43
44 struct dma_pool { /* the pool */ 44 struct dma_pool { /* the pool */
45 struct list_head page_list; 45 struct list_head page_list;
46 spinlock_t lock; 46 spinlock_t lock;
47 size_t size; 47 size_t size;
48 struct device *dev; 48 struct device *dev;
49 size_t allocation; 49 size_t allocation;
50 size_t boundary; 50 size_t boundary;
51 char name[32]; 51 char name[32];
52 wait_queue_head_t waitq; 52 wait_queue_head_t waitq;
53 struct list_head pools; 53 struct list_head pools;
54 }; 54 };
55 55
56 struct dma_page { /* cacheable header for 'allocation' bytes */ 56 struct dma_page { /* cacheable header for 'allocation' bytes */
57 struct list_head page_list; 57 struct list_head page_list;
58 void *vaddr; 58 void *vaddr;
59 dma_addr_t dma; 59 dma_addr_t dma;
60 unsigned int in_use; 60 unsigned int in_use;
61 unsigned int offset; 61 unsigned int offset;
62 }; 62 };
63 63
64 #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) 64 #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
65 65
66 static DEFINE_MUTEX(pools_lock); 66 static DEFINE_MUTEX(pools_lock);
67 67
68 static ssize_t 68 static ssize_t
69 show_pools(struct device *dev, struct device_attribute *attr, char *buf) 69 show_pools(struct device *dev, struct device_attribute *attr, char *buf)
70 { 70 {
71 unsigned temp; 71 unsigned temp;
72 unsigned size; 72 unsigned size;
73 char *next; 73 char *next;
74 struct dma_page *page; 74 struct dma_page *page;
75 struct dma_pool *pool; 75 struct dma_pool *pool;
76 76
77 next = buf; 77 next = buf;
78 size = PAGE_SIZE; 78 size = PAGE_SIZE;
79 79
80 temp = scnprintf(next, size, "poolinfo - 0.1\n"); 80 temp = scnprintf(next, size, "poolinfo - 0.1\n");
81 size -= temp; 81 size -= temp;
82 next += temp; 82 next += temp;
83 83
84 mutex_lock(&pools_lock); 84 mutex_lock(&pools_lock);
85 list_for_each_entry(pool, &dev->dma_pools, pools) { 85 list_for_each_entry(pool, &dev->dma_pools, pools) {
86 unsigned pages = 0; 86 unsigned pages = 0;
87 unsigned blocks = 0; 87 unsigned blocks = 0;
88 88
89 spin_lock_irq(&pool->lock); 89 spin_lock_irq(&pool->lock);
90 list_for_each_entry(page, &pool->page_list, page_list) { 90 list_for_each_entry(page, &pool->page_list, page_list) {
91 pages++; 91 pages++;
92 blocks += page->in_use; 92 blocks += page->in_use;
93 } 93 }
94 spin_unlock_irq(&pool->lock); 94 spin_unlock_irq(&pool->lock);
95 95
96 /* per-pool info, no real statistics yet */ 96 /* per-pool info, no real statistics yet */
97 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", 97 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
98 pool->name, blocks, 98 pool->name, blocks,
99 pages * (pool->allocation / pool->size), 99 pages * (pool->allocation / pool->size),
100 pool->size, pages); 100 pool->size, pages);
101 size -= temp; 101 size -= temp;
102 next += temp; 102 next += temp;
103 } 103 }
104 mutex_unlock(&pools_lock); 104 mutex_unlock(&pools_lock);
105 105
106 return PAGE_SIZE - size; 106 return PAGE_SIZE - size;
107 } 107 }
108 108
109 static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL); 109 static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
110 110
111 /** 111 /**
112 * dma_pool_create - Creates a pool of consistent memory blocks, for dma. 112 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
113 * @name: name of pool, for diagnostics 113 * @name: name of pool, for diagnostics
114 * @dev: device that will be doing the DMA 114 * @dev: device that will be doing the DMA
115 * @size: size of the blocks in this pool. 115 * @size: size of the blocks in this pool.
116 * @align: alignment requirement for blocks; must be a power of two 116 * @align: alignment requirement for blocks; must be a power of two
117 * @boundary: returned blocks won't cross this power of two boundary 117 * @boundary: returned blocks won't cross this power of two boundary
118 * Context: !in_interrupt() 118 * Context: !in_interrupt()
119 * 119 *
120 * Returns a dma allocation pool with the requested characteristics, or 120 * Returns a dma allocation pool with the requested characteristics, or
121 * null if one can't be created. Given one of these pools, dma_pool_alloc() 121 * null if one can't be created. Given one of these pools, dma_pool_alloc()
122 * may be used to allocate memory. Such memory will all have "consistent" 122 * may be used to allocate memory. Such memory will all have "consistent"
123 * DMA mappings, accessible by the device and its driver without using 123 * DMA mappings, accessible by the device and its driver without using
124 * cache flushing primitives. The actual size of blocks allocated may be 124 * cache flushing primitives. The actual size of blocks allocated may be
125 * larger than requested because of alignment. 125 * larger than requested because of alignment.
126 * 126 *
127 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't 127 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
128 * cross that size boundary. This is useful for devices which have 128 * cross that size boundary. This is useful for devices which have
129 * addressing restrictions on individual DMA transfers, such as not crossing 129 * addressing restrictions on individual DMA transfers, such as not crossing
130 * boundaries of 4KBytes. 130 * boundaries of 4KBytes.
131 */ 131 */
132 struct dma_pool *dma_pool_create(const char *name, struct device *dev, 132 struct dma_pool *dma_pool_create(const char *name, struct device *dev,
133 size_t size, size_t align, size_t boundary) 133 size_t size, size_t align, size_t boundary)
134 { 134 {
135 struct dma_pool *retval; 135 struct dma_pool *retval;
136 size_t allocation; 136 size_t allocation;
137 137
138 if (align == 0) { 138 if (align == 0) {
139 align = 1; 139 align = 1;
140 } else if (align & (align - 1)) { 140 } else if (align & (align - 1)) {
141 return NULL; 141 return NULL;
142 } 142 }
143 143
144 if (size == 0) { 144 if (size == 0) {
145 return NULL; 145 return NULL;
146 } else if (size < 4) { 146 } else if (size < 4) {
147 size = 4; 147 size = 4;
148 } 148 }
149 149
150 if ((size % align) != 0) 150 if ((size % align) != 0)
151 size = ALIGN(size, align); 151 size = ALIGN(size, align);
152 152
153 allocation = max_t(size_t, size, PAGE_SIZE); 153 allocation = max_t(size_t, size, PAGE_SIZE);
154 154
155 if (!boundary) { 155 if (!boundary) {
156 boundary = allocation; 156 boundary = allocation;
157 } else if ((boundary < size) || (boundary & (boundary - 1))) { 157 } else if ((boundary < size) || (boundary & (boundary - 1))) {
158 return NULL; 158 return NULL;
159 } 159 }
160 160
161 retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev)); 161 retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
162 if (!retval) 162 if (!retval)
163 return retval; 163 return retval;
164 164
165 strlcpy(retval->name, name, sizeof(retval->name)); 165 strlcpy(retval->name, name, sizeof(retval->name));
166 166
167 retval->dev = dev; 167 retval->dev = dev;
168 168
169 INIT_LIST_HEAD(&retval->page_list); 169 INIT_LIST_HEAD(&retval->page_list);
170 spin_lock_init(&retval->lock); 170 spin_lock_init(&retval->lock);
171 retval->size = size; 171 retval->size = size;
172 retval->boundary = boundary; 172 retval->boundary = boundary;
173 retval->allocation = allocation; 173 retval->allocation = allocation;
174 init_waitqueue_head(&retval->waitq); 174 init_waitqueue_head(&retval->waitq);
175 175
176 if (dev) { 176 if (dev) {
177 int ret; 177 int ret;
178 178
179 mutex_lock(&pools_lock); 179 mutex_lock(&pools_lock);
180 if (list_empty(&dev->dma_pools)) 180 if (list_empty(&dev->dma_pools))
181 ret = device_create_file(dev, &dev_attr_pools); 181 ret = device_create_file(dev, &dev_attr_pools);
182 else 182 else
183 ret = 0; 183 ret = 0;
184 /* note: not currently insisting "name" be unique */ 184 /* note: not currently insisting "name" be unique */
185 if (!ret) 185 if (!ret)
186 list_add(&retval->pools, &dev->dma_pools); 186 list_add(&retval->pools, &dev->dma_pools);
187 else { 187 else {
188 kfree(retval); 188 kfree(retval);
189 retval = NULL; 189 retval = NULL;
190 } 190 }
191 mutex_unlock(&pools_lock); 191 mutex_unlock(&pools_lock);
192 } else 192 } else
193 INIT_LIST_HEAD(&retval->pools); 193 INIT_LIST_HEAD(&retval->pools);
194 194
195 return retval; 195 return retval;
196 } 196 }
197 EXPORT_SYMBOL(dma_pool_create); 197 EXPORT_SYMBOL(dma_pool_create);
198 198
199 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) 199 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
200 { 200 {
201 unsigned int offset = 0; 201 unsigned int offset = 0;
202 unsigned int next_boundary = pool->boundary; 202 unsigned int next_boundary = pool->boundary;
203 203
204 do { 204 do {
205 unsigned int next = offset + pool->size; 205 unsigned int next = offset + pool->size;
206 if (unlikely((next + pool->size) >= next_boundary)) { 206 if (unlikely((next + pool->size) >= next_boundary)) {
207 next = next_boundary; 207 next = next_boundary;
208 next_boundary += pool->boundary; 208 next_boundary += pool->boundary;
209 } 209 }
210 *(int *)(page->vaddr + offset) = next; 210 *(int *)(page->vaddr + offset) = next;
211 offset = next; 211 offset = next;
212 } while (offset < pool->allocation); 212 } while (offset < pool->allocation);
213 } 213 }
214 214
215 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) 215 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
216 { 216 {
217 struct dma_page *page; 217 struct dma_page *page;
218 218
219 page = kmalloc(sizeof(*page), mem_flags); 219 page = kmalloc(sizeof(*page), mem_flags);
220 if (!page) 220 if (!page)
221 return NULL; 221 return NULL;
222 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, 222 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
223 &page->dma, mem_flags); 223 &page->dma, mem_flags);
224 if (page->vaddr) { 224 if (page->vaddr) {
225 #ifdef DMAPOOL_DEBUG 225 #ifdef DMAPOOL_DEBUG
226 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); 226 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
227 #endif 227 #endif
228 pool_initialise_page(pool, page); 228 pool_initialise_page(pool, page);
229 list_add(&page->page_list, &pool->page_list); 229 list_add(&page->page_list, &pool->page_list);
230 page->in_use = 0; 230 page->in_use = 0;
231 page->offset = 0; 231 page->offset = 0;
232 } else { 232 } else {
233 kfree(page); 233 kfree(page);
234 page = NULL; 234 page = NULL;
235 } 235 }
236 return page; 236 return page;
237 } 237 }
238 238
239 static inline int is_page_busy(struct dma_page *page) 239 static inline int is_page_busy(struct dma_page *page)
240 { 240 {
241 return page->in_use != 0; 241 return page->in_use != 0;
242 } 242 }
243 243
244 static void pool_free_page(struct dma_pool *pool, struct dma_page *page) 244 static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
245 { 245 {
246 dma_addr_t dma = page->dma; 246 dma_addr_t dma = page->dma;
247 247
248 #ifdef DMAPOOL_DEBUG 248 #ifdef DMAPOOL_DEBUG
249 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); 249 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
250 #endif 250 #endif
251 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); 251 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
252 list_del(&page->page_list); 252 list_del(&page->page_list);
253 kfree(page); 253 kfree(page);
254 } 254 }
255 255
256 /** 256 /**
257 * dma_pool_destroy - destroys a pool of dma memory blocks. 257 * dma_pool_destroy - destroys a pool of dma memory blocks.
258 * @pool: dma pool that will be destroyed 258 * @pool: dma pool that will be destroyed
259 * Context: !in_interrupt() 259 * Context: !in_interrupt()
260 * 260 *
261 * Caller guarantees that no more memory from the pool is in use, 261 * Caller guarantees that no more memory from the pool is in use,
262 * and that nothing will try to use the pool after this call. 262 * and that nothing will try to use the pool after this call.
263 */ 263 */
264 void dma_pool_destroy(struct dma_pool *pool) 264 void dma_pool_destroy(struct dma_pool *pool)
265 { 265 {
266 mutex_lock(&pools_lock); 266 mutex_lock(&pools_lock);
267 list_del(&pool->pools); 267 list_del(&pool->pools);
268 if (pool->dev && list_empty(&pool->dev->dma_pools)) 268 if (pool->dev && list_empty(&pool->dev->dma_pools))
269 device_remove_file(pool->dev, &dev_attr_pools); 269 device_remove_file(pool->dev, &dev_attr_pools);
270 mutex_unlock(&pools_lock); 270 mutex_unlock(&pools_lock);
271 271
272 while (!list_empty(&pool->page_list)) { 272 while (!list_empty(&pool->page_list)) {
273 struct dma_page *page; 273 struct dma_page *page;
274 page = list_entry(pool->page_list.next, 274 page = list_entry(pool->page_list.next,
275 struct dma_page, page_list); 275 struct dma_page, page_list);
276 if (is_page_busy(page)) { 276 if (is_page_busy(page)) {
277 if (pool->dev) 277 if (pool->dev)
278 dev_err(pool->dev, 278 dev_err(pool->dev,
279 "dma_pool_destroy %s, %p busy\n", 279 "dma_pool_destroy %s, %p busy\n",
280 pool->name, page->vaddr); 280 pool->name, page->vaddr);
281 else 281 else
282 printk(KERN_ERR 282 printk(KERN_ERR
283 "dma_pool_destroy %s, %p busy\n", 283 "dma_pool_destroy %s, %p busy\n",
284 pool->name, page->vaddr); 284 pool->name, page->vaddr);
285 /* leak the still-in-use consistent memory */ 285 /* leak the still-in-use consistent memory */
286 list_del(&page->page_list); 286 list_del(&page->page_list);
287 kfree(page); 287 kfree(page);
288 } else 288 } else
289 pool_free_page(pool, page); 289 pool_free_page(pool, page);
290 } 290 }
291 291
292 kfree(pool); 292 kfree(pool);
293 } 293 }
294 EXPORT_SYMBOL(dma_pool_destroy); 294 EXPORT_SYMBOL(dma_pool_destroy);
295 295
296 /** 296 /**
297 * dma_pool_alloc - get a block of consistent memory 297 * dma_pool_alloc - get a block of consistent memory
298 * @pool: dma pool that will produce the block 298 * @pool: dma pool that will produce the block
299 * @mem_flags: GFP_* bitmask 299 * @mem_flags: GFP_* bitmask
300 * @handle: pointer to dma address of block 300 * @handle: pointer to dma address of block
301 * 301 *
302 * This returns the kernel virtual address of a currently unused block, 302 * This returns the kernel virtual address of a currently unused block,
303 * and reports its dma address through the handle. 303 * and reports its dma address through the handle.
304 * If such a memory block can't be allocated, %NULL is returned. 304 * If such a memory block can't be allocated, %NULL is returned.
305 */ 305 */
306 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 306 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
307 dma_addr_t *handle) 307 dma_addr_t *handle)
308 { 308 {
309 unsigned long flags; 309 unsigned long flags;
310 struct dma_page *page; 310 struct dma_page *page;
311 size_t offset; 311 size_t offset;
312 void *retval; 312 void *retval;
313 313
314 might_sleep_if(mem_flags & __GFP_WAIT); 314 might_sleep_if(mem_flags & __GFP_WAIT);
315 315
316 spin_lock_irqsave(&pool->lock, flags); 316 spin_lock_irqsave(&pool->lock, flags);
317 restart: 317 restart:
318 list_for_each_entry(page, &pool->page_list, page_list) { 318 list_for_each_entry(page, &pool->page_list, page_list) {
319 if (page->offset < pool->allocation) 319 if (page->offset < pool->allocation)
320 goto ready; 320 goto ready;
321 } 321 }
322 page = pool_alloc_page(pool, GFP_ATOMIC); 322 page = pool_alloc_page(pool, GFP_ATOMIC);
323 if (!page) { 323 if (!page) {
324 if (mem_flags & __GFP_WAIT) { 324 if (mem_flags & __GFP_WAIT) {
325 DECLARE_WAITQUEUE(wait, current); 325 DECLARE_WAITQUEUE(wait, current);
326 326
327 __set_current_state(TASK_INTERRUPTIBLE); 327 __set_current_state(TASK_INTERRUPTIBLE);
328 __add_wait_queue(&pool->waitq, &wait); 328 __add_wait_queue(&pool->waitq, &wait);
329 spin_unlock_irqrestore(&pool->lock, flags); 329 spin_unlock_irqrestore(&pool->lock, flags);
330 330
331 schedule_timeout(POOL_TIMEOUT_JIFFIES); 331 schedule_timeout(POOL_TIMEOUT_JIFFIES);
332 332
333 spin_lock_irqsave(&pool->lock, flags); 333 spin_lock_irqsave(&pool->lock, flags);
334 __remove_wait_queue(&pool->waitq, &wait); 334 __remove_wait_queue(&pool->waitq, &wait);
335 goto restart; 335 goto restart;
336 } 336 }
337 retval = NULL; 337 retval = NULL;
338 goto done; 338 goto done;
339 } 339 }
340 340
341 ready: 341 ready:
342 page->in_use++; 342 page->in_use++;
343 offset = page->offset; 343 offset = page->offset;
344 page->offset = *(int *)(page->vaddr + offset); 344 page->offset = *(int *)(page->vaddr + offset);
345 retval = offset + page->vaddr; 345 retval = offset + page->vaddr;
346 *handle = offset + page->dma; 346 *handle = offset + page->dma;
347 #ifdef DMAPOOL_DEBUG 347 #ifdef DMAPOOL_DEBUG
348 memset(retval, POOL_POISON_ALLOCATED, pool->size); 348 memset(retval, POOL_POISON_ALLOCATED, pool->size);
349 #endif 349 #endif
350 done: 350 done:
351 spin_unlock_irqrestore(&pool->lock, flags); 351 spin_unlock_irqrestore(&pool->lock, flags);
352 return retval; 352 return retval;
353 } 353 }
354 EXPORT_SYMBOL(dma_pool_alloc); 354 EXPORT_SYMBOL(dma_pool_alloc);
355 355
356 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) 356 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
357 { 357 {
358 unsigned long flags;
359 struct dma_page *page; 358 struct dma_page *page;
360 359
361 spin_lock_irqsave(&pool->lock, flags);
362 list_for_each_entry(page, &pool->page_list, page_list) { 360 list_for_each_entry(page, &pool->page_list, page_list) {
363 if (dma < page->dma) 361 if (dma < page->dma)
364 continue; 362 continue;
365 if (dma < (page->dma + pool->allocation)) 363 if (dma < (page->dma + pool->allocation))
366 goto done; 364 return page;
367 } 365 }
368 page = NULL; 366 return NULL;
369 done:
370 spin_unlock_irqrestore(&pool->lock, flags);
371 return page;
372 } 367 }
373 368
374 /** 369 /**
375 * dma_pool_free - put block back into dma pool 370 * dma_pool_free - put block back into dma pool
376 * @pool: the dma pool holding the block 371 * @pool: the dma pool holding the block
377 * @vaddr: virtual address of block 372 * @vaddr: virtual address of block
378 * @dma: dma address of block 373 * @dma: dma address of block
379 * 374 *
380 * Caller promises neither device nor driver will again touch this block 375 * Caller promises neither device nor driver will again touch this block
381 * unless it is first re-allocated. 376 * unless it is first re-allocated.
382 */ 377 */
383 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) 378 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
384 { 379 {
385 struct dma_page *page; 380 struct dma_page *page;
386 unsigned long flags; 381 unsigned long flags;
387 unsigned int offset; 382 unsigned int offset;
388 383
384 spin_lock_irqsave(&pool->lock, flags);
389 page = pool_find_page(pool, dma); 385 page = pool_find_page(pool, dma);
390 if (!page) { 386 if (!page) {
387 spin_unlock_irqrestore(&pool->lock, flags);
391 if (pool->dev) 388 if (pool->dev)
392 dev_err(pool->dev, 389 dev_err(pool->dev,
393 "dma_pool_free %s, %p/%lx (bad dma)\n", 390 "dma_pool_free %s, %p/%lx (bad dma)\n",
394 pool->name, vaddr, (unsigned long)dma); 391 pool->name, vaddr, (unsigned long)dma);
395 else 392 else
396 printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", 393 printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
397 pool->name, vaddr, (unsigned long)dma); 394 pool->name, vaddr, (unsigned long)dma);
398 return; 395 return;
399 } 396 }
400 397
401 offset = vaddr - page->vaddr; 398 offset = vaddr - page->vaddr;
402 #ifdef DMAPOOL_DEBUG 399 #ifdef DMAPOOL_DEBUG
403 if ((dma - page->dma) != offset) { 400 if ((dma - page->dma) != offset) {
401 spin_unlock_irqrestore(&pool->lock, flags);
404 if (pool->dev) 402 if (pool->dev)
405 dev_err(pool->dev, 403 dev_err(pool->dev,
406 "dma_pool_free %s, %p (bad vaddr)/%Lx\n", 404 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
407 pool->name, vaddr, (unsigned long long)dma); 405 pool->name, vaddr, (unsigned long long)dma);
408 else 406 else
409 printk(KERN_ERR 407 printk(KERN_ERR
410 "dma_pool_free %s, %p (bad vaddr)/%Lx\n", 408 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
411 pool->name, vaddr, (unsigned long long)dma); 409 pool->name, vaddr, (unsigned long long)dma);
412 return; 410 return;
413 } 411 }
414 { 412 {
415 unsigned int chain = page->offset; 413 unsigned int chain = page->offset;
416 while (chain < pool->allocation) { 414 while (chain < pool->allocation) {
417 if (chain != offset) { 415 if (chain != offset) {
418 chain = *(int *)(page->vaddr + chain); 416 chain = *(int *)(page->vaddr + chain);
419 continue; 417 continue;
420 } 418 }
419 spin_unlock_irqrestore(&pool->lock, flags);
421 if (pool->dev) 420 if (pool->dev)
422 dev_err(pool->dev, "dma_pool_free %s, dma %Lx " 421 dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
423 "already free\n", pool->name, 422 "already free\n", pool->name,
424 (unsigned long long)dma); 423 (unsigned long long)dma);
425 else 424 else
426 printk(KERN_ERR "dma_pool_free %s, dma %Lx " 425 printk(KERN_ERR "dma_pool_free %s, dma %Lx "
427 "already free\n", pool->name, 426 "already free\n", pool->name,
428 (unsigned long long)dma); 427 (unsigned long long)dma);
429 return; 428 return;
430 } 429 }
431 } 430 }
432 memset(vaddr, POOL_POISON_FREED, pool->size); 431 memset(vaddr, POOL_POISON_FREED, pool->size);
433 #endif 432 #endif
434 433
435 spin_lock_irqsave(&pool->lock, flags);
436 page->in_use--; 434 page->in_use--;
437 *(int *)vaddr = page->offset; 435 *(int *)vaddr = page->offset;
438 page->offset = offset; 436 page->offset = offset;
439 if (waitqueue_active(&pool->waitq)) 437 if (waitqueue_active(&pool->waitq))
440 wake_up_locked(&pool->waitq); 438 wake_up_locked(&pool->waitq);
441 /* 439 /*
442 * Resist a temptation to do 440 * Resist a temptation to do
443 * if (!is_page_busy(page)) pool_free_page(pool, page); 441 * if (!is_page_busy(page)) pool_free_page(pool, page);
444 * Better have a few empty pages hang around. 442 * Better have a few empty pages hang around.
445 */ 443 */
446 spin_unlock_irqrestore(&pool->lock, flags); 444 spin_unlock_irqrestore(&pool->lock, flags);
447 } 445 }
448 EXPORT_SYMBOL(dma_pool_free); 446 EXPORT_SYMBOL(dma_pool_free);
449 447
450 /* 448 /*
451 * Managed DMA pool 449 * Managed DMA pool
452 */ 450 */
453 static void dmam_pool_release(struct device *dev, void *res) 451 static void dmam_pool_release(struct device *dev, void *res)
454 { 452 {
455 struct dma_pool *pool = *(struct dma_pool **)res; 453 struct dma_pool *pool = *(struct dma_pool **)res;
456 454
457 dma_pool_destroy(pool); 455 dma_pool_destroy(pool);
458 } 456 }
459 457
460 static int dmam_pool_match(struct device *dev, void *res, void *match_data) 458 static int dmam_pool_match(struct device *dev, void *res, void *match_data)
461 { 459 {
462 return *(struct dma_pool **)res == match_data; 460 return *(struct dma_pool **)res == match_data;
463 } 461 }
464 462
465 /** 463 /**
466 * dmam_pool_create - Managed dma_pool_create() 464 * dmam_pool_create - Managed dma_pool_create()
467 * @name: name of pool, for diagnostics 465 * @name: name of pool, for diagnostics
468 * @dev: device that will be doing the DMA 466 * @dev: device that will be doing the DMA
469 * @size: size of the blocks in this pool. 467 * @size: size of the blocks in this pool.
470 * @align: alignment requirement for blocks; must be a power of two 468 * @align: alignment requirement for blocks; must be a power of two
471 * @allocation: returned blocks won't cross this boundary (or zero) 469 * @allocation: returned blocks won't cross this boundary (or zero)
472 * 470 *
473 * Managed dma_pool_create(). DMA pool created with this function is 471 * Managed dma_pool_create(). DMA pool created with this function is
474 * automatically destroyed on driver detach. 472 * automatically destroyed on driver detach.
475 */ 473 */
476 struct dma_pool *dmam_pool_create(const char *name, struct device *dev, 474 struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
477 size_t size, size_t align, size_t allocation) 475 size_t size, size_t align, size_t allocation)
478 { 476 {
479 struct dma_pool **ptr, *pool; 477 struct dma_pool **ptr, *pool;
480 478
481 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); 479 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
482 if (!ptr) 480 if (!ptr)
483 return NULL; 481 return NULL;
484 482
485 pool = *ptr = dma_pool_create(name, dev, size, align, allocation); 483 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
486 if (pool) 484 if (pool)
487 devres_add(dev, ptr); 485 devres_add(dev, ptr);
488 else 486 else
489 devres_free(ptr); 487 devres_free(ptr);
490 488
491 return pool; 489 return pool;
492 } 490 }
493 EXPORT_SYMBOL(dmam_pool_create); 491 EXPORT_SYMBOL(dmam_pool_create);
494 492
495 /** 493 /**
496 * dmam_pool_destroy - Managed dma_pool_destroy() 494 * dmam_pool_destroy - Managed dma_pool_destroy()
497 * @pool: dma pool that will be destroyed 495 * @pool: dma pool that will be destroyed
498 * 496 *
499 * Managed dma_pool_destroy(). 497 * Managed dma_pool_destroy().
500 */ 498 */
501 void dmam_pool_destroy(struct dma_pool *pool) 499 void dmam_pool_destroy(struct dma_pool *pool)
502 { 500 {
503 struct device *dev = pool->dev; 501 struct device *dev = pool->dev;
504 502
505 dma_pool_destroy(pool); 503 dma_pool_destroy(pool);