Blame view
mm/dmapool.c
13.7 KB
b2139ce04
|
1 |
// SPDX-License-Identifier: GPL-2.0-only |
6182a0943
|
2 3 4 5 6 7 8 |
/* * DMA Pool allocator * * Copyright 2001 David Brownell * Copyright 2007 Intel Corporation * Author: Matthew Wilcox <willy@linux.intel.com> * |
6182a0943
|
9 10 11 12 13 14 15 16 |
* This allocator returns small blocks of a given size which are DMA-able by * the given device. It uses the dma_alloc_coherent page allocator to get * new pages, then splits them up into blocks of the required size. * Many older drivers still have their own code to do this. * * The current design of this allocator is fairly simple. The pool is * represented by the 'struct dma_pool' which keeps a doubly-linked list of * allocated pages. Each page in the page_list is split into blocks of at |
a35a34551
|
17 18 19 |
* least 'size' bytes. Free blocks are tracked in an unsorted singly-linked * list of free blocks within the page. Used blocks aren't tracked, but we * keep a count of how many are currently allocated from each page. |
6182a0943
|
20 |
*/ |
1da177e4c
|
21 22 |
#include <linux/device.h> |
1da177e4c
|
23 24 |
#include <linux/dma-mapping.h> #include <linux/dmapool.h> |
6182a0943
|
25 26 |
#include <linux/kernel.h> #include <linux/list.h> |
b95f1b31b
|
27 |
#include <linux/export.h> |
6182a0943
|
28 |
#include <linux/mutex.h> |
c9cf55285
|
29 |
#include <linux/poison.h> |
e8edc6e03
|
30 |
#include <linux/sched.h> |
6182a0943
|
31 |
#include <linux/slab.h> |
7c77509c5
|
32 |
#include <linux/stat.h> |
6182a0943
|
33 34 35 36 |
#include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> #include <linux/wait.h> |
1da177e4c
|
37 |
|
b5ee5befa
|
38 39 40 |
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) #define DMAPOOL_DEBUG 1 #endif |
e87aa7737
|
41 42 43 |
struct dma_pool { /* the pool */ struct list_head page_list; spinlock_t lock; |
e87aa7737
|
44 45 46 |
size_t size; struct device *dev; size_t allocation; |
e34f44b35
|
47 |
size_t boundary; |
e87aa7737
|
48 |
char name[32]; |
e87aa7737
|
49 |
struct list_head pools; |
1da177e4c
|
50 |
}; |
e87aa7737
|
51 52 53 54 |
struct dma_page { /* cacheable header for 'allocation' bytes */ struct list_head page_list; void *vaddr; dma_addr_t dma; |
a35a34551
|
55 56 |
unsigned int in_use; unsigned int offset; |
1da177e4c
|
57 |
}; |
e87aa7737
|
58 |
static DEFINE_MUTEX(pools_lock); |
01c2965f0
|
59 |
static DEFINE_MUTEX(pools_reg_lock); |
1da177e4c
|
60 61 |
static ssize_t |
e87aa7737
|
62 |
show_pools(struct device *dev, struct device_attribute *attr, char *buf) |
1da177e4c
|
63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
{ unsigned temp; unsigned size; char *next; struct dma_page *page; struct dma_pool *pool; next = buf; size = PAGE_SIZE; temp = scnprintf(next, size, "poolinfo - 0.1 "); size -= temp; next += temp; |
b2366d68d
|
77 |
mutex_lock(&pools_lock); |
1da177e4c
|
78 79 80 |
list_for_each_entry(pool, &dev->dma_pools, pools) { unsigned pages = 0; unsigned blocks = 0; |
c49568235
|
81 |
spin_lock_irq(&pool->lock); |
1da177e4c
|
82 83 84 85 |
list_for_each_entry(page, &pool->page_list, page_list) { pages++; blocks += page->in_use; } |
c49568235
|
86 |
spin_unlock_irq(&pool->lock); |
1da177e4c
|
87 88 |
/* per-pool info, no real statistics yet */ |
5b5e0928f
|
89 90 |
temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u ", |
a35a34551
|
91 92 |
pool->name, blocks, pages * (pool->allocation / pool->size), |
e87aa7737
|
93 |
pool->size, pages); |
1da177e4c
|
94 95 96 |
size -= temp; next += temp; } |
b2366d68d
|
97 |
mutex_unlock(&pools_lock); |
1da177e4c
|
98 99 100 |
return PAGE_SIZE - size; } |
e87aa7737
|
101 |
|
0825a6f98
|
102 |
static DEVICE_ATTR(pools, 0444, show_pools, NULL); |
1da177e4c
|
103 104 105 106 107 108 109 |
/** * dma_pool_create - Creates a pool of consistent memory blocks, for dma. * @name: name of pool, for diagnostics * @dev: device that will be doing the DMA * @size: size of the blocks in this pool. * @align: alignment requirement for blocks; must be a power of two |
e34f44b35
|
110 |
* @boundary: returned blocks won't cross this power of two boundary |
a862f68a8
|
111 |
* Context: not in_interrupt() |
1da177e4c
|
112 |
* |
a862f68a8
|
113 |
* Given one of these pools, dma_pool_alloc() |
1da177e4c
|
114 115 116 117 118 |
* may be used to allocate memory. Such memory will all have "consistent" * DMA mappings, accessible by the device and its driver without using * cache flushing primitives. The actual size of blocks allocated may be * larger than requested because of alignment. * |
e34f44b35
|
119 |
* If @boundary is nonzero, objects returned from dma_pool_alloc() won't |
1da177e4c
|
120 121 122 |
* cross that size boundary. This is useful for devices which have * addressing restrictions on individual DMA transfers, such as not crossing * boundaries of 4KBytes. |
a862f68a8
|
123 124 125 |
* * Return: a dma allocation pool with the requested characteristics, or * %NULL if one can't be created. |
1da177e4c
|
126 |
*/ |
e87aa7737
|
127 |
struct dma_pool *dma_pool_create(const char *name, struct device *dev, |
e34f44b35
|
128 |
size_t size, size_t align, size_t boundary) |
1da177e4c
|
129 |
{ |
e87aa7737
|
130 |
struct dma_pool *retval; |
e34f44b35
|
131 |
size_t allocation; |
01c2965f0
|
132 |
bool empty = false; |
1da177e4c
|
133 |
|
baa2ef839
|
134 |
if (align == 0) |
1da177e4c
|
135 |
align = 1; |
baa2ef839
|
136 |
else if (align & (align - 1)) |
1da177e4c
|
137 |
return NULL; |
1da177e4c
|
138 |
|
baa2ef839
|
139 |
if (size == 0) |
399154be2
|
140 |
return NULL; |
baa2ef839
|
141 |
else if (size < 4) |
a35a34551
|
142 |
size = 4; |
399154be2
|
143 |
|
1386f7a3b
|
144 |
size = ALIGN(size, align); |
e34f44b35
|
145 |
allocation = max_t(size_t, size, PAGE_SIZE); |
baa2ef839
|
146 |
if (!boundary) |
e34f44b35
|
147 |
boundary = allocation; |
baa2ef839
|
148 |
else if ((boundary < size) || (boundary & (boundary - 1))) |
1da177e4c
|
149 |
return NULL; |
e34f44b35
|
150 151 |
retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev)); if (!retval) |
1da177e4c
|
152 |
return retval; |
e34f44b35
|
153 |
strlcpy(retval->name, name, sizeof(retval->name)); |
1da177e4c
|
154 155 |
retval->dev = dev; |
e87aa7737
|
156 157 |
INIT_LIST_HEAD(&retval->page_list); spin_lock_init(&retval->lock); |
1da177e4c
|
158 |
retval->size = size; |
e34f44b35
|
159 |
retval->boundary = boundary; |
1da177e4c
|
160 |
retval->allocation = allocation; |
1da177e4c
|
161 |
|
cc6b664aa
|
162 |
INIT_LIST_HEAD(&retval->pools); |
01c2965f0
|
163 164 165 166 167 168 169 170 171 |
/* * pools_lock ensures that the ->dma_pools list does not get corrupted. * pools_reg_lock ensures that there is not a race between * dma_pool_create() and dma_pool_destroy() or within dma_pool_create() * when the first invocation of dma_pool_create() failed on * device_create_file() and the second assumes that it has been done (I * know it is a short window). */ mutex_lock(&pools_reg_lock); |
cc6b664aa
|
172 |
mutex_lock(&pools_lock); |
01c2965f0
|
173 174 175 |
if (list_empty(&dev->dma_pools)) empty = true; list_add(&retval->pools, &dev->dma_pools); |
cc6b664aa
|
176 |
mutex_unlock(&pools_lock); |
01c2965f0
|
177 178 179 180 181 182 183 184 185 186 187 188 189 190 |
if (empty) { int err; err = device_create_file(dev, &dev_attr_pools); if (err) { mutex_lock(&pools_lock); list_del(&retval->pools); mutex_unlock(&pools_lock); mutex_unlock(&pools_reg_lock); kfree(retval); return NULL; } } mutex_unlock(&pools_reg_lock); |
1da177e4c
|
191 192 |
return retval; } |
e87aa7737
|
193 |
EXPORT_SYMBOL(dma_pool_create); |
1da177e4c
|
194 |
|
a35a34551
|
195 196 197 |
static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) { unsigned int offset = 0; |
e34f44b35
|
198 |
unsigned int next_boundary = pool->boundary; |
a35a34551
|
199 200 201 |
do { unsigned int next = offset + pool->size; |
e34f44b35
|
202 203 204 205 |
if (unlikely((next + pool->size) >= next_boundary)) { next = next_boundary; next_boundary += pool->boundary; } |
a35a34551
|
206 207 208 209 |
*(int *)(page->vaddr + offset) = next; offset = next; } while (offset < pool->allocation); } |
e87aa7737
|
210 |
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) |
1da177e4c
|
211 |
{ |
e87aa7737
|
212 |
struct dma_page *page; |
1da177e4c
|
213 |
|
a35a34551
|
214 |
page = kmalloc(sizeof(*page), mem_flags); |
1da177e4c
|
215 216 |
if (!page) return NULL; |
a35a34551
|
217 |
page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, |
e87aa7737
|
218 |
&page->dma, mem_flags); |
1da177e4c
|
219 |
if (page->vaddr) { |
b5ee5befa
|
220 |
#ifdef DMAPOOL_DEBUG |
e87aa7737
|
221 |
memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
1da177e4c
|
222 |
#endif |
a35a34551
|
223 |
pool_initialise_page(pool, page); |
1da177e4c
|
224 |
page->in_use = 0; |
a35a34551
|
225 |
page->offset = 0; |
1da177e4c
|
226 |
} else { |
e87aa7737
|
227 |
kfree(page); |
1da177e4c
|
228 229 230 231 |
page = NULL; } return page; } |
d9e7e37b4
|
232 |
static inline bool is_page_busy(struct dma_page *page) |
1da177e4c
|
233 |
{ |
a35a34551
|
234 |
return page->in_use != 0; |
1da177e4c
|
235 |
} |
e87aa7737
|
236 |
static void pool_free_page(struct dma_pool *pool, struct dma_page *page) |
1da177e4c
|
237 |
{ |
e87aa7737
|
238 |
dma_addr_t dma = page->dma; |
1da177e4c
|
239 |
|
b5ee5befa
|
240 |
#ifdef DMAPOOL_DEBUG |
e87aa7737
|
241 |
memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
1da177e4c
|
242 |
#endif |
e87aa7737
|
243 244 245 |
dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); list_del(&page->page_list); kfree(page); |
1da177e4c
|
246 |
} |
1da177e4c
|
247 248 249 250 251 252 253 254 |
/** * dma_pool_destroy - destroys a pool of dma memory blocks. * @pool: dma pool that will be destroyed * Context: !in_interrupt() * * Caller guarantees that no more memory from the pool is in use, * and that nothing will try to use the pool after this call. */ |
e87aa7737
|
255 |
void dma_pool_destroy(struct dma_pool *pool) |
1da177e4c
|
256 |
{ |
42286f83f
|
257 |
struct dma_page *page, *tmp; |
01c2965f0
|
258 |
bool empty = false; |
44d7175da
|
259 260 |
if (unlikely(!pool)) return; |
01c2965f0
|
261 |
mutex_lock(&pools_reg_lock); |
b2366d68d
|
262 |
mutex_lock(&pools_lock); |
e87aa7737
|
263 264 |
list_del(&pool->pools); if (pool->dev && list_empty(&pool->dev->dma_pools)) |
01c2965f0
|
265 |
empty = true; |
b2366d68d
|
266 |
mutex_unlock(&pools_lock); |
01c2965f0
|
267 268 269 |
if (empty) device_remove_file(pool->dev, &dev_attr_pools); mutex_unlock(&pools_reg_lock); |
1da177e4c
|
270 |
|
42286f83f
|
271 |
list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) { |
a35a34551
|
272 |
if (is_page_busy(page)) { |
1da177e4c
|
273 |
if (pool->dev) |
41a04814a
|
274 275 |
dev_err(pool->dev, "%s %s, %p busy ", __func__, |
1da177e4c
|
276 277 |
pool->name, page->vaddr); else |
41a04814a
|
278 279 |
pr_err("%s %s, %p busy ", __func__, |
e87aa7737
|
280 |
pool->name, page->vaddr); |
1da177e4c
|
281 |
/* leak the still-in-use consistent memory */ |
e87aa7737
|
282 283 |
list_del(&page->page_list); kfree(page); |
1da177e4c
|
284 |
} else |
e87aa7737
|
285 |
pool_free_page(pool, page); |
1da177e4c
|
286 |
} |
e87aa7737
|
287 |
kfree(pool); |
1da177e4c
|
288 |
} |
e87aa7737
|
289 |
EXPORT_SYMBOL(dma_pool_destroy); |
1da177e4c
|
290 291 292 293 294 295 296 |
/** * dma_pool_alloc - get a block of consistent memory * @pool: dma pool that will produce the block * @mem_flags: GFP_* bitmask * @handle: pointer to dma address of block * |
a862f68a8
|
297 |
* Return: the kernel virtual address of a currently unused block, |
1da177e4c
|
298 |
* and reports its dma address through the handle. |
6182a0943
|
299 |
* If such a memory block can't be allocated, %NULL is returned. |
1da177e4c
|
300 |
*/ |
e87aa7737
|
301 302 |
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) |
1da177e4c
|
303 |
{ |
e87aa7737
|
304 305 |
unsigned long flags; struct dma_page *page; |
e87aa7737
|
306 307 |
size_t offset; void *retval; |
d0164adc8
|
308 |
might_sleep_if(gfpflags_allow_blocking(mem_flags)); |
ea05c8444
|
309 |
|
e87aa7737
|
310 |
spin_lock_irqsave(&pool->lock, flags); |
1da177e4c
|
311 |
list_for_each_entry(page, &pool->page_list, page_list) { |
a35a34551
|
312 313 |
if (page->offset < pool->allocation) goto ready; |
1da177e4c
|
314 |
} |
1da177e4c
|
315 |
|
387870f2d
|
316 317 |
/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ spin_unlock_irqrestore(&pool->lock, flags); |
1da177e4c
|
318 |
|
fa23f56d9
|
319 |
page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO)); |
387870f2d
|
320 321 |
if (!page) return NULL; |
1da177e4c
|
322 |
|
387870f2d
|
323 |
spin_lock_irqsave(&pool->lock, flags); |
1da177e4c
|
324 |
|
387870f2d
|
325 |
list_add(&page->page_list, &pool->page_list); |
e87aa7737
|
326 |
ready: |
1da177e4c
|
327 |
page->in_use++; |
a35a34551
|
328 329 |
offset = page->offset; page->offset = *(int *)(page->vaddr + offset); |
1da177e4c
|
330 331 |
retval = offset + page->vaddr; *handle = offset + page->dma; |
b5ee5befa
|
332 |
#ifdef DMAPOOL_DEBUG |
5de55b265
|
333 334 335 336 337 338 339 340 |
{ int i; u8 *data = retval; /* page->offset is stored in first 4 bytes */ for (i = sizeof(page->offset); i < pool->size; i++) { if (data[i] == POOL_POISON_FREED) continue; if (pool->dev) |
41a04814a
|
341 342 343 |
dev_err(pool->dev, "%s %s, %p (corrupted) ", __func__, pool->name, retval); |
5de55b265
|
344 |
else |
41a04814a
|
345 346 347 |
pr_err("%s %s, %p (corrupted) ", __func__, pool->name, retval); |
5de55b265
|
348 349 350 351 352 353 354 355 356 357 |
/* * Dump the first 4 bytes even if they are not * POOL_POISON_FREED */ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, pool->size, 1); break; } } |
fa23f56d9
|
358 359 |
if (!(mem_flags & __GFP_ZERO)) memset(retval, POOL_POISON_ALLOCATED, pool->size); |
1da177e4c
|
360 |
#endif |
e87aa7737
|
361 |
spin_unlock_irqrestore(&pool->lock, flags); |
fa23f56d9
|
362 |
|
6471384af
|
363 |
if (want_init_on_alloc(mem_flags)) |
fa23f56d9
|
364 |
memset(retval, 0, pool->size); |
1da177e4c
|
365 366 |
return retval; } |
e87aa7737
|
367 |
EXPORT_SYMBOL(dma_pool_alloc); |
1da177e4c
|
368 |
|
e87aa7737
|
369 |
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) |
1da177e4c
|
370 |
{ |
e87aa7737
|
371 |
struct dma_page *page; |
1da177e4c
|
372 |
|
1da177e4c
|
373 374 375 |
list_for_each_entry(page, &pool->page_list, page_list) { if (dma < page->dma) continue; |
676bd9917
|
376 |
if ((dma - page->dma) < pool->allocation) |
84bc227d7
|
377 |
return page; |
1da177e4c
|
378 |
} |
84bc227d7
|
379 |
return NULL; |
1da177e4c
|
380 |
} |
1da177e4c
|
381 382 383 384 385 386 387 388 389 |
/** * dma_pool_free - put block back into dma pool * @pool: the dma pool holding the block * @vaddr: virtual address of block * @dma: dma address of block * * Caller promises neither device nor driver will again touch this block * unless it is first re-allocated. */ |
e87aa7737
|
390 |
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) |
1da177e4c
|
391 |
{ |
e87aa7737
|
392 393 |
struct dma_page *page; unsigned long flags; |
a35a34551
|
394 |
unsigned int offset; |
1da177e4c
|
395 |
|
84bc227d7
|
396 |
spin_lock_irqsave(&pool->lock, flags); |
e87aa7737
|
397 398 |
page = pool_find_page(pool, dma); if (!page) { |
84bc227d7
|
399 |
spin_unlock_irqrestore(&pool->lock, flags); |
1da177e4c
|
400 |
if (pool->dev) |
41a04814a
|
401 402 403 |
dev_err(pool->dev, "%s %s, %p/%pad (bad dma) ", __func__, pool->name, vaddr, &dma); |
1da177e4c
|
404 |
else |
41a04814a
|
405 406 407 |
pr_err("%s %s, %p/%pad (bad dma) ", __func__, pool->name, vaddr, &dma); |
1da177e4c
|
408 409 |
return; } |
a35a34551
|
410 |
offset = vaddr - page->vaddr; |
6471384af
|
411 412 |
if (want_init_on_free()) memset(vaddr, 0, pool->size); |
b5ee5befa
|
413 |
#ifdef DMAPOOL_DEBUG |
a35a34551
|
414 |
if ((dma - page->dma) != offset) { |
84bc227d7
|
415 |
spin_unlock_irqrestore(&pool->lock, flags); |
1da177e4c
|
416 |
if (pool->dev) |
41a04814a
|
417 418 419 |
dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad ", __func__, pool->name, vaddr, &dma); |
1da177e4c
|
420 |
else |
41a04814a
|
421 422 423 |
pr_err("%s %s, %p (bad vaddr)/%pad ", __func__, pool->name, vaddr, &dma); |
1da177e4c
|
424 425 |
return; } |
a35a34551
|
426 427 428 429 430 431 432 |
{ unsigned int chain = page->offset; while (chain < pool->allocation) { if (chain != offset) { chain = *(int *)(page->vaddr + chain); continue; } |
84bc227d7
|
433 |
spin_unlock_irqrestore(&pool->lock, flags); |
a35a34551
|
434 |
if (pool->dev) |
41a04814a
|
435 436 437 |
dev_err(pool->dev, "%s %s, dma %pad already free ", __func__, pool->name, &dma); |
a35a34551
|
438 |
else |
41a04814a
|
439 440 441 |
pr_err("%s %s, dma %pad already free ", __func__, pool->name, &dma); |
a35a34551
|
442 443 |
return; } |
1da177e4c
|
444 |
} |
e87aa7737
|
445 |
memset(vaddr, POOL_POISON_FREED, pool->size); |
1da177e4c
|
446 |
#endif |
1da177e4c
|
447 |
page->in_use--; |
a35a34551
|
448 449 |
*(int *)vaddr = page->offset; page->offset = offset; |
1da177e4c
|
450 451 |
/* * Resist a temptation to do |
a35a34551
|
452 |
* if (!is_page_busy(page)) pool_free_page(pool, page); |
1da177e4c
|
453 454 |
* Better have a few empty pages hang around. */ |
e87aa7737
|
455 |
spin_unlock_irqrestore(&pool->lock, flags); |
1da177e4c
|
456 |
} |
e87aa7737
|
457 |
EXPORT_SYMBOL(dma_pool_free); |
1da177e4c
|
458 |
|
9ac7849e3
|
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 |
/* * Managed DMA pool */ static void dmam_pool_release(struct device *dev, void *res) { struct dma_pool *pool = *(struct dma_pool **)res; dma_pool_destroy(pool); } static int dmam_pool_match(struct device *dev, void *res, void *match_data) { return *(struct dma_pool **)res == match_data; } /** * dmam_pool_create - Managed dma_pool_create() * @name: name of pool, for diagnostics * @dev: device that will be doing the DMA * @size: size of the blocks in this pool. * @align: alignment requirement for blocks; must be a power of two * @allocation: returned blocks won't cross this boundary (or zero) * * Managed dma_pool_create(). DMA pool created with this function is * automatically destroyed on driver detach. |
a862f68a8
|
484 485 486 |
* * Return: a managed dma allocation pool with the requested * characteristics, or %NULL if one can't be created. |
9ac7849e3
|
487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 |
*/ struct dma_pool *dmam_pool_create(const char *name, struct device *dev, size_t size, size_t align, size_t allocation) { struct dma_pool **ptr, *pool; ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return NULL; pool = *ptr = dma_pool_create(name, dev, size, align, allocation); if (pool) devres_add(dev, ptr); else devres_free(ptr); return pool; } |
e87aa7737
|
505 |
EXPORT_SYMBOL(dmam_pool_create); |
9ac7849e3
|
506 507 508 509 510 511 512 513 514 515 |
/** * dmam_pool_destroy - Managed dma_pool_destroy() * @pool: dma pool that will be destroyed * * Managed dma_pool_destroy(). */ void dmam_pool_destroy(struct dma_pool *pool) { struct device *dev = pool->dev; |
172cb4b3d
|
516 |
WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool)); |
9ac7849e3
|
517 |
} |
e87aa7737
|
518 |
EXPORT_SYMBOL(dmam_pool_destroy); |