Blame view

mm/mempool.c 13.9 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
8
  /*
   *  linux/mm/mempool.c
   *
   *  memory buffer pool support. Such pools are mostly used
   *  for guaranteed, deadlock-free memory allocations during
   *  extreme VM load.
   *
   *  started by Ingo Molnar, Copyright (C) 2001
bdfedb76f   David Rientjes   mm, mempool: pois...
9
   *  debugging by David Rientjes, Copyright (C) 2015
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
10
11
12
13
   */
  
  #include <linux/mm.h>
  #include <linux/slab.h>
bdfedb76f   David Rientjes   mm, mempool: pois...
14
  #include <linux/highmem.h>
923936157   Andrey Ryabinin   mm/mempool.c: kas...
15
  #include <linux/kasan.h>
174119628   Catalin Marinas   mm/mempool.c: upd...
16
  #include <linux/kmemleak.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
17
  #include <linux/export.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
18
19
20
  #include <linux/mempool.h>
  #include <linux/blkdev.h>
  #include <linux/writeback.h>
e244c9e66   David Rientjes   mm, mempool: disa...
21
  #include "slab.h"
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
22

bdfedb76f   David Rientjes   mm, mempool: pois...
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
  #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
  static void poison_error(mempool_t *pool, void *element, size_t size,
  			 size_t byte)
  {
  	const int nr = pool->curr_nr;
  	const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
  	const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
  	int i;
  
  	pr_err("BUG: mempool element poison mismatch
  ");
  	pr_err("Mempool %p size %zu
  ", pool, size);
  	pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
  	for (i = start; i < end; i++)
  		pr_cont("%x ", *(u8 *)(element + i));
  	pr_cont("%s
  ", end < size ? "..." : "");
  	dump_stack();
  }
  
  static void __check_element(mempool_t *pool, void *element, size_t size)
  {
  	u8 *obj = element;
  	size_t i;
  
  	for (i = 0; i < size; i++) {
  		u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
  
  		if (obj[i] != exp) {
  			poison_error(pool, element, size, i);
  			return;
  		}
  	}
  	memset(obj, POISON_INUSE, size);
  }
  
  static void check_element(mempool_t *pool, void *element)
  {
  	/* Mempools backed by slab allocator */
  	if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
  		__check_element(pool, element, ksize(element));
  
  	/* Mempools backed by page allocator */
  	if (pool->free == mempool_free_pages) {
  		int order = (int)(long)pool->pool_data;
  		void *addr = kmap_atomic((struct page *)element);
  
  		__check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
  		kunmap_atomic(addr);
  	}
  }
  
  static void __poison_element(void *element, size_t size)
  {
  	u8 *obj = element;
  
  	memset(obj, POISON_FREE, size - 1);
  	obj[size - 1] = POISON_END;
  }
  
  static void poison_element(mempool_t *pool, void *element)
  {
  	/* Mempools backed by slab allocator */
  	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
  		__poison_element(element, ksize(element));
  
  	/* Mempools backed by page allocator */
  	if (pool->alloc == mempool_alloc_pages) {
  		int order = (int)(long)pool->pool_data;
  		void *addr = kmap_atomic((struct page *)element);
  
  		__poison_element(addr, 1UL << (PAGE_SHIFT + order));
  		kunmap_atomic(addr);
  	}
  }
  #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
  static inline void check_element(mempool_t *pool, void *element)
  {
  }
  static inline void poison_element(mempool_t *pool, void *element)
  {
  }
  #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
923936157   Andrey Ryabinin   mm/mempool.c: kas...
107
108
  static void kasan_poison_element(mempool_t *pool, void *element)
  {
9b75a867c   Andrey Ryabinin   mm: mempool: kasa...
109
110
  	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
  		kasan_poison_kfree(element);
923936157   Andrey Ryabinin   mm/mempool.c: kas...
111
112
113
  	if (pool->alloc == mempool_alloc_pages)
  		kasan_free_pages(element, (unsigned long)pool->pool_data);
  }
505f5dcb1   Alexander Potapenko   mm, kasan: add GF...
114
  static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
923936157   Andrey Ryabinin   mm/mempool.c: kas...
115
  {
9b75a867c   Andrey Ryabinin   mm: mempool: kasa...
116
117
  	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
  		kasan_unpoison_slab(element);
923936157   Andrey Ryabinin   mm/mempool.c: kas...
118
119
120
  	if (pool->alloc == mempool_alloc_pages)
  		kasan_alloc_pages(element, (unsigned long)pool->pool_data);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
121
122
123
  static void add_element(mempool_t *pool, void *element)
  {
  	BUG_ON(pool->curr_nr >= pool->min_nr);
bdfedb76f   David Rientjes   mm, mempool: pois...
124
  	poison_element(pool, element);
923936157   Andrey Ryabinin   mm/mempool.c: kas...
125
  	kasan_poison_element(pool, element);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
126
127
  	pool->elements[pool->curr_nr++] = element;
  }
505f5dcb1   Alexander Potapenko   mm, kasan: add GF...
128
  static void *remove_element(mempool_t *pool, gfp_t flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
129
  {
bdfedb76f   David Rientjes   mm, mempool: pois...
130
131
132
  	void *element = pool->elements[--pool->curr_nr];
  
  	BUG_ON(pool->curr_nr < 0);
505f5dcb1   Alexander Potapenko   mm, kasan: add GF...
133
  	kasan_unpoison_element(pool, element, flags);
764013103   Matthew Dawson   mm/mempool: avoid...
134
  	check_element(pool, element);
bdfedb76f   David Rientjes   mm, mempool: pois...
135
  	return element;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
136
  }
0565d3177   Tejun Heo   mempool: drop unn...
137
138
139
140
141
142
143
144
145
  /**
   * mempool_destroy - deallocate a memory pool
   * @pool:      pointer to the memory pool which was allocated via
   *             mempool_create().
   *
   * Free all reserved elements in @pool and @pool itself.  This function
   * only sleeps if the free_fn() function sleeps.
   */
  void mempool_destroy(mempool_t *pool)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
146
  {
4e3ca3e03   Sergey Senozhatsky   mm/mempool: allow...
147
148
  	if (unlikely(!pool))
  		return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
149
  	while (pool->curr_nr) {
505f5dcb1   Alexander Potapenko   mm, kasan: add GF...
150
  		void *element = remove_element(pool, GFP_KERNEL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
151
152
153
154
155
  		pool->free(element, pool->pool_data);
  	}
  	kfree(pool->elements);
  	kfree(pool);
  }
0565d3177   Tejun Heo   mempool: drop unn...
156
  EXPORT_SYMBOL(mempool_destroy);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
157
158
159
160
161
162
163
164
165
166
  
  /**
   * mempool_create - create a memory pool
   * @min_nr:    the minimum number of elements guaranteed to be
   *             allocated for this pool.
   * @alloc_fn:  user-defined element-allocation function.
   * @free_fn:   user-defined element-freeing function.
   * @pool_data: optional private data available to the user-defined functions.
   *
   * this function creates and allocates a guaranteed size, preallocated
72fd4a35a   Robert P. J. Day   [PATCH] Numerous ...
167
   * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
168
   * functions. This function might sleep. Both the alloc_fn() and the free_fn()
72fd4a35a   Robert P. J. Day   [PATCH] Numerous ...
169
   * functions might sleep - as long as the mempool_alloc() function is not called
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
170
171
   * from IRQ contexts.
   */
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
172
  mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
173
174
  				mempool_free_t *free_fn, void *pool_data)
  {
a91a5ac68   Tejun Heo   mempool: add @gfp...
175
176
  	return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
  				   GFP_KERNEL, NUMA_NO_NODE);
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
177
178
  }
  EXPORT_SYMBOL(mempool_create);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
179

1946089a1   Christoph Lameter   [PATCH] NUMA awar...
180
  mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
a91a5ac68   Tejun Heo   mempool: add @gfp...
181
182
  			       mempool_free_t *free_fn, void *pool_data,
  			       gfp_t gfp_mask, int node_id)
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
183
184
  {
  	mempool_t *pool;
7b5219db0   Joe Perches   mm/mempool.c: con...
185
  	pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
186
187
  	if (!pool)
  		return NULL;
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
188
  	pool->elements = kmalloc_node(min_nr * sizeof(void *),
a91a5ac68   Tejun Heo   mempool: add @gfp...
189
  				      gfp_mask, node_id);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
  	if (!pool->elements) {
  		kfree(pool);
  		return NULL;
  	}
  	spin_lock_init(&pool->lock);
  	pool->min_nr = min_nr;
  	pool->pool_data = pool_data;
  	init_waitqueue_head(&pool->wait);
  	pool->alloc = alloc_fn;
  	pool->free = free_fn;
  
  	/*
  	 * First pre-allocate the guaranteed number of buffers.
  	 */
  	while (pool->curr_nr < pool->min_nr) {
  		void *element;
a91a5ac68   Tejun Heo   mempool: add @gfp...
206
  		element = pool->alloc(gfp_mask, pool->pool_data);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
207
  		if (unlikely(!element)) {
0565d3177   Tejun Heo   mempool: drop unn...
208
  			mempool_destroy(pool);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
209
210
211
212
213
214
  			return NULL;
  		}
  		add_element(pool, element);
  	}
  	return pool;
  }
1946089a1   Christoph Lameter   [PATCH] NUMA awar...
215
  EXPORT_SYMBOL(mempool_create_node);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
216
217
218
219
220
221
222
  
  /**
   * mempool_resize - resize an existing memory pool
   * @pool:       pointer to the memory pool which was allocated via
   *              mempool_create().
   * @new_min_nr: the new minimum number of elements guaranteed to be
   *              allocated for this pool.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
223
224
225
226
   *
   * This function shrinks/grows the pool. In the case of growing,
   * it cannot be guaranteed that the pool will be grown to the new
   * size immediately, but new mempool_free() calls will refill it.
11d833604   David Rientjes   mm, mempool: do n...
227
   * This function may sleep.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
228
229
230
231
232
   *
   * Note, the caller must guarantee that no mempool_destroy is called
   * while this function is running. mempool_alloc() & mempool_free()
   * might be called (eg. from IRQ contexts) while this function executes.
   */
11d833604   David Rientjes   mm, mempool: do n...
233
  int mempool_resize(mempool_t *pool, int new_min_nr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
234
235
236
237
238
239
  {
  	void *element;
  	void **new_elements;
  	unsigned long flags;
  
  	BUG_ON(new_min_nr <= 0);
11d833604   David Rientjes   mm, mempool: do n...
240
  	might_sleep();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
241
242
243
244
  
  	spin_lock_irqsave(&pool->lock, flags);
  	if (new_min_nr <= pool->min_nr) {
  		while (new_min_nr < pool->curr_nr) {
505f5dcb1   Alexander Potapenko   mm, kasan: add GF...
245
  			element = remove_element(pool, GFP_KERNEL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
246
247
248
249
250
251
252
253
254
255
  			spin_unlock_irqrestore(&pool->lock, flags);
  			pool->free(element, pool->pool_data);
  			spin_lock_irqsave(&pool->lock, flags);
  		}
  		pool->min_nr = new_min_nr;
  		goto out_unlock;
  	}
  	spin_unlock_irqrestore(&pool->lock, flags);
  
  	/* Grow the pool */
11d833604   David Rientjes   mm, mempool: do n...
256
257
  	new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
  				     GFP_KERNEL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
  	if (!new_elements)
  		return -ENOMEM;
  
  	spin_lock_irqsave(&pool->lock, flags);
  	if (unlikely(new_min_nr <= pool->min_nr)) {
  		/* Raced, other resize will do our work */
  		spin_unlock_irqrestore(&pool->lock, flags);
  		kfree(new_elements);
  		goto out;
  	}
  	memcpy(new_elements, pool->elements,
  			pool->curr_nr * sizeof(*new_elements));
  	kfree(pool->elements);
  	pool->elements = new_elements;
  	pool->min_nr = new_min_nr;
  
  	while (pool->curr_nr < pool->min_nr) {
  		spin_unlock_irqrestore(&pool->lock, flags);
11d833604   David Rientjes   mm, mempool: do n...
276
  		element = pool->alloc(GFP_KERNEL, pool->pool_data);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
  		if (!element)
  			goto out;
  		spin_lock_irqsave(&pool->lock, flags);
  		if (pool->curr_nr < pool->min_nr) {
  			add_element(pool, element);
  		} else {
  			spin_unlock_irqrestore(&pool->lock, flags);
  			pool->free(element, pool->pool_data);	/* Raced */
  			goto out;
  		}
  	}
  out_unlock:
  	spin_unlock_irqrestore(&pool->lock, flags);
  out:
  	return 0;
  }
  EXPORT_SYMBOL(mempool_resize);
  
  /**
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
296
297
298
299
300
   * mempool_alloc - allocate an element from a specific memory pool
   * @pool:      pointer to the memory pool which was allocated via
   *             mempool_create().
   * @gfp_mask:  the usual allocation bitmask.
   *
72fd4a35a   Robert P. J. Day   [PATCH] Numerous ...
301
   * this function only sleeps if the alloc_fn() function sleeps or
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
302
303
304
   * returns NULL. Note that due to preallocation, this function
   * *never* fails when called from process contexts. (it might
   * fail if called from an IRQ context.)
4e390b2b2   Michal Hocko   Revert "mm, mempo...
305
   * Note: using __GFP_ZERO is not supported.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
306
   */
f9054c70d   David Rientjes   mm, mempool: only...
307
  void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
308
309
310
  {
  	void *element;
  	unsigned long flags;
01890a4c1   Benjamin LaHaise   [PATCH] mempool -...
311
  	wait_queue_t wait;
6daa0e286   Al Viro   [PATCH] gfp_t: mm...
312
  	gfp_t gfp_temp;
20a77776c   Nick Piggin   [PATCH] mempool: ...
313

8bf8fcb07   Sebastian Ott   mm/mempool: warn ...
314
  	VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
d0164adc8   Mel Gorman   mm, page_alloc: d...
315
  	might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
b84a35be0   Nick Piggin   [PATCH] mempool: ...
316

4e390b2b2   Michal Hocko   Revert "mm, mempo...
317
  	gfp_mask |= __GFP_NOMEMALLOC;	/* don't allocate emergency reserves */
b84a35be0   Nick Piggin   [PATCH] mempool: ...
318
319
  	gfp_mask |= __GFP_NORETRY;	/* don't loop in __alloc_pages */
  	gfp_mask |= __GFP_NOWARN;	/* failures are OK */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
320

d0164adc8   Mel Gorman   mm, page_alloc: d...
321
  	gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
20a77776c   Nick Piggin   [PATCH] mempool: ...
322

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
323
  repeat_alloc:
20a77776c   Nick Piggin   [PATCH] mempool: ...
324
325
  
  	element = pool->alloc(gfp_temp, pool->pool_data);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
326
327
  	if (likely(element != NULL))
  		return element;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
328
329
  	spin_lock_irqsave(&pool->lock, flags);
  	if (likely(pool->curr_nr)) {
505f5dcb1   Alexander Potapenko   mm, kasan: add GF...
330
  		element = remove_element(pool, gfp_temp);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
331
  		spin_unlock_irqrestore(&pool->lock, flags);
5b990546e   Tejun Heo   mempool: fix and ...
332
333
  		/* paired with rmb in mempool_free(), read comment there */
  		smp_wmb();
174119628   Catalin Marinas   mm/mempool.c: upd...
334
335
336
337
338
  		/*
  		 * Update the allocation stack trace as this is more useful
  		 * for debugging.
  		 */
  		kmemleak_update_trace(element);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
339
340
  		return element;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
341

1ebb7044c   Tejun Heo   mempool: fix firs...
342
  	/*
d0164adc8   Mel Gorman   mm, page_alloc: d...
343
  	 * We use gfp mask w/o direct reclaim or IO for the first round.  If
1ebb7044c   Tejun Heo   mempool: fix firs...
344
345
  	 * alloc failed with that and @pool was empty, retry immediately.
  	 */
4e390b2b2   Michal Hocko   Revert "mm, mempo...
346
  	if (gfp_temp != gfp_mask) {
1ebb7044c   Tejun Heo   mempool: fix firs...
347
348
349
350
  		spin_unlock_irqrestore(&pool->lock, flags);
  		gfp_temp = gfp_mask;
  		goto repeat_alloc;
  	}
d0164adc8   Mel Gorman   mm, page_alloc: d...
351
352
  	/* We must not sleep if !__GFP_DIRECT_RECLAIM */
  	if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
5b990546e   Tejun Heo   mempool: fix and ...
353
  		spin_unlock_irqrestore(&pool->lock, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
354
  		return NULL;
5b990546e   Tejun Heo   mempool: fix and ...
355
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
356

5b990546e   Tejun Heo   mempool: fix and ...
357
  	/* Let's wait for someone else to return an element to @pool */
01890a4c1   Benjamin LaHaise   [PATCH] mempool -...
358
  	init_wait(&wait);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
359
  	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
360

5b990546e   Tejun Heo   mempool: fix and ...
361
362
363
364
365
366
367
368
369
  	spin_unlock_irqrestore(&pool->lock, flags);
  
  	/*
  	 * FIXME: this should be io_schedule().  The timeout is there as a
  	 * workaround for some DM problems in 2.6.18.
  	 */
  	io_schedule_timeout(5*HZ);
  
  	finish_wait(&pool->wait, &wait);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
  	goto repeat_alloc;
  }
  EXPORT_SYMBOL(mempool_alloc);
  
  /**
   * mempool_free - return an element to the pool.
   * @element:   pool element pointer.
   * @pool:      pointer to the memory pool which was allocated via
   *             mempool_create().
   *
   * this function only sleeps if the free_fn() function sleeps.
   */
  void mempool_free(void *element, mempool_t *pool)
  {
  	unsigned long flags;
c80e7a826   Rusty Russell   permit mempool_fr...
385
386
  	if (unlikely(element == NULL))
  		return;
5b990546e   Tejun Heo   mempool: fix and ...
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
  	/*
  	 * Paired with the wmb in mempool_alloc().  The preceding read is
  	 * for @element and the following @pool->curr_nr.  This ensures
  	 * that the visible value of @pool->curr_nr is from after the
  	 * allocation of @element.  This is necessary for fringe cases
  	 * where @element was passed to this task without going through
  	 * barriers.
  	 *
  	 * For example, assume @p is %NULL at the beginning and one task
  	 * performs "p = mempool_alloc(...);" while another task is doing
  	 * "while (!p) cpu_relax(); mempool_free(p, ...);".  This function
  	 * may end up using curr_nr value which is from before allocation
  	 * of @p without the following rmb.
  	 */
  	smp_rmb();
  
  	/*
  	 * For correctness, we need a test which is guaranteed to trigger
  	 * if curr_nr + #allocated == min_nr.  Testing curr_nr < min_nr
  	 * without locking achieves that and refilling as soon as possible
  	 * is desirable.
  	 *
  	 * Because curr_nr visible here is always a value after the
  	 * allocation of @element, any task which decremented curr_nr below
  	 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
  	 * incremented to min_nr afterwards.  If curr_nr gets incremented
  	 * to min_nr after the allocation of @element, the elements
  	 * allocated after that are subject to the same guarantee.
  	 *
  	 * Waiters happen iff curr_nr is 0 and the above guarantee also
  	 * ensures that there will be frees which return elements to the
  	 * pool waking up the waiters.
  	 */
eb9a3c62a   Mikulas Patocka   mempool: add unli...
420
  	if (unlikely(pool->curr_nr < pool->min_nr)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
421
  		spin_lock_irqsave(&pool->lock, flags);
eb9a3c62a   Mikulas Patocka   mempool: add unli...
422
  		if (likely(pool->curr_nr < pool->min_nr)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
423
424
425
426
427
428
429
430
431
432
433
434
435
436
  			add_element(pool, element);
  			spin_unlock_irqrestore(&pool->lock, flags);
  			wake_up(&pool->wait);
  			return;
  		}
  		spin_unlock_irqrestore(&pool->lock, flags);
  	}
  	pool->free(element, pool->pool_data);
  }
  EXPORT_SYMBOL(mempool_free);
  
  /*
   * A commonly used alloc and free fn.
   */
dd0fc66fb   Al Viro   [PATCH] gfp flags...
437
  void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
438
  {
fcc234f88   Pekka Enberg   [PATCH] mm: kill ...
439
  	struct kmem_cache *mem = pool_data;
e244c9e66   David Rientjes   mm, mempool: disa...
440
  	VM_BUG_ON(mem->ctor);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
441
442
443
444
445
446
  	return kmem_cache_alloc(mem, gfp_mask);
  }
  EXPORT_SYMBOL(mempool_alloc_slab);
  
  void mempool_free_slab(void *element, void *pool_data)
  {
fcc234f88   Pekka Enberg   [PATCH] mm: kill ...
447
  	struct kmem_cache *mem = pool_data;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
448
449
450
  	kmem_cache_free(mem, element);
  }
  EXPORT_SYMBOL(mempool_free_slab);
6e0678f39   Matthew Dobson   [PATCH] mempool: ...
451
452
  
  /*
53184082b   Matthew Dobson   [PATCH] mempool: ...
453
   * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
183ff22bb   Simon Arlott   spelling fixes: mm/
454
   * specified by pool_data
53184082b   Matthew Dobson   [PATCH] mempool: ...
455
456
457
   */
  void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
  {
5e2f89b5d   Figo.zhang   mempool.c: clean ...
458
  	size_t size = (size_t)pool_data;
53184082b   Matthew Dobson   [PATCH] mempool: ...
459
460
461
462
463
464
465
466
467
468
469
  	return kmalloc(size, gfp_mask);
  }
  EXPORT_SYMBOL(mempool_kmalloc);
  
  void mempool_kfree(void *element, void *pool_data)
  {
  	kfree(element);
  }
  EXPORT_SYMBOL(mempool_kfree);
  
  /*
6e0678f39   Matthew Dobson   [PATCH] mempool: ...
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
   * A simple mempool-backed page allocator that allocates pages
   * of the order specified by pool_data.
   */
  void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
  {
  	int order = (int)(long)pool_data;
  	return alloc_pages(gfp_mask, order);
  }
  EXPORT_SYMBOL(mempool_alloc_pages);
  
  void mempool_free_pages(void *element, void *pool_data)
  {
  	int order = (int)(long)pool_data;
  	__free_pages(element, order);
  }
  EXPORT_SYMBOL(mempool_free_pages);