Blame view

include/linux/slab.h 21.1 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  /* SPDX-License-Identifier: GPL-2.0 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2
  /*
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
3
4
   * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
   *
cde535359   Christoph Lameter   Christoph has moved
5
   * (C) SGI 2006, Christoph Lameter
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
6
7
   * 	Cleaned up and restructured to ease the addition of alternative
   * 	implementations of SLAB allocators.
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
8
9
   * (C) Linux Foundation 2008-2013
   *      Unified interface for all slab allocators
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
10
11
12
13
   */
  
  #ifndef _LINUX_SLAB_H
  #define	_LINUX_SLAB_H
1b1cec4bb   Andrew Morton   [PATCH] slab: dep...
14
  #include <linux/gfp.h>
49b7f8983   Kees Cook   mm: Use overflow ...
15
  #include <linux/overflow.h>
1b1cec4bb   Andrew Morton   [PATCH] slab: dep...
16
  #include <linux/types.h>
1f458cbf1   Glauber Costa   memcg: destroy me...
17
  #include <linux/workqueue.h>
f0a3a24b5   Roman Gushchin   mm: memcg/slab: r...
18
  #include <linux/percpu-refcount.h>
1f458cbf1   Glauber Costa   memcg: destroy me...
19

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
20

2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
21
22
  /*
   * Flags to pass to kmem_cache_create().
124dee09f   David Rientjes   mm, slab: correct...
23
   * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
24
   */
d50112edd   Alexey Dobriyan   slab, slub, slob:...
25
  /* DEBUG: Perform (expensive) checks on alloc/free */
4fd0b46e8   Alexey Dobriyan   slab, slub, slob:...
26
  #define SLAB_CONSISTENCY_CHECKS	((slab_flags_t __force)0x00000100U)
d50112edd   Alexey Dobriyan   slab, slub, slob:...
27
  /* DEBUG: Red zone objs in a cache */
4fd0b46e8   Alexey Dobriyan   slab, slub, slob:...
28
  #define SLAB_RED_ZONE		((slab_flags_t __force)0x00000400U)
d50112edd   Alexey Dobriyan   slab, slub, slob:...
29
  /* DEBUG: Poison objects */
4fd0b46e8   Alexey Dobriyan   slab, slub, slob:...
30
  #define SLAB_POISON		((slab_flags_t __force)0x00000800U)
d50112edd   Alexey Dobriyan   slab, slub, slob:...
31
  /* Align objs on cache lines */
4fd0b46e8   Alexey Dobriyan   slab, slub, slob:...
32
  #define SLAB_HWCACHE_ALIGN	((slab_flags_t __force)0x00002000U)
d50112edd   Alexey Dobriyan   slab, slub, slob:...
33
  /* Use GFP_DMA memory */
4fd0b46e8   Alexey Dobriyan   slab, slub, slob:...
34
  #define SLAB_CACHE_DMA		((slab_flags_t __force)0x00004000U)
6d6ea1e96   Nicolas Boichat   mm: add support f...
35
36
  /* Use GFP_DMA32 memory */
  #define SLAB_CACHE_DMA32	((slab_flags_t __force)0x00008000U)
d50112edd   Alexey Dobriyan   slab, slub, slob:...
37
  /* DEBUG: Store the last owner for bug hunting */
4fd0b46e8   Alexey Dobriyan   slab, slub, slob:...
38
  #define SLAB_STORE_USER		((slab_flags_t __force)0x00010000U)
d50112edd   Alexey Dobriyan   slab, slub, slob:...
39
  /* Panic if kmem_cache_create() fails */
4fd0b46e8   Alexey Dobriyan   slab, slub, slob:...
40
  #define SLAB_PANIC		((slab_flags_t __force)0x00040000U)
d7de4c1dc   Peter Zijlstra   slab: document SL...
41
  /*
5f0d5a3ae   Paul E. McKenney   mm: Rename SLAB_D...
42
   * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
d7de4c1dc   Peter Zijlstra   slab: document SL...
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
   *
   * This delays freeing the SLAB page by a grace period, it does _NOT_
   * delay object freeing. This means that if you do kmem_cache_free()
   * that memory location is free to be reused at any time. Thus it may
   * be possible to see another object there in the same RCU grace period.
   *
   * This feature only ensures the memory location backing the object
   * stays valid, the trick to using this is relying on an independent
   * object validation pass. Something like:
   *
   *  rcu_read_lock()
   * again:
   *  obj = lockless_lookup(key);
   *  if (obj) {
   *    if (!try_get_ref(obj)) // might fail for free objects
   *      goto again;
   *
   *    if (obj->key != key) { // not the object we expected
   *      put_ref(obj);
   *      goto again;
   *    }
   *  }
   *  rcu_read_unlock();
   *
68126702b   Joonsoo Kim   slab: overloading...
67
68
69
70
71
72
73
74
   * This is useful if we need to approach a kernel structure obliquely,
   * from its address obtained without the usual locking. We can lock
   * the structure to stabilize it and check it's still at the given address,
   * only if we can be sure that the memory has not been meanwhile reused
   * for some other kind of object (which our subsystem's lock might corrupt).
   *
   * rcu_read_lock before reading the address, then rcu_read_unlock after
   * taking the spinlock within the structure expected at that address.
5f0d5a3ae   Paul E. McKenney   mm: Rename SLAB_D...
75
76
   *
   * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
d7de4c1dc   Peter Zijlstra   slab: document SL...
77
   */
d50112edd   Alexey Dobriyan   slab, slub, slob:...
78
  /* Defer freeing slabs to RCU */
4fd0b46e8   Alexey Dobriyan   slab, slub, slob:...
79
  #define SLAB_TYPESAFE_BY_RCU	((slab_flags_t __force)0x00080000U)
d50112edd   Alexey Dobriyan   slab, slub, slob:...
80
  /* Spread some memory over cpuset */
4fd0b46e8   Alexey Dobriyan   slab, slub, slob:...
81
  #define SLAB_MEM_SPREAD		((slab_flags_t __force)0x00100000U)
d50112edd   Alexey Dobriyan   slab, slub, slob:...
82
  /* Trace allocations and frees */
4fd0b46e8   Alexey Dobriyan   slab, slub, slob:...
83
  #define SLAB_TRACE		((slab_flags_t __force)0x00200000U)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
84

30327acf7   Thomas Gleixner   slab: add a flag ...
85
86
  /* Flag to prevent checks on free */
  #ifdef CONFIG_DEBUG_OBJECTS
4fd0b46e8   Alexey Dobriyan   slab, slub, slob:...
87
  # define SLAB_DEBUG_OBJECTS	((slab_flags_t __force)0x00400000U)
30327acf7   Thomas Gleixner   slab: add a flag ...
88
  #else
4fd0b46e8   Alexey Dobriyan   slab, slub, slob:...
89
  # define SLAB_DEBUG_OBJECTS	0
30327acf7   Thomas Gleixner   slab: add a flag ...
90
  #endif
d50112edd   Alexey Dobriyan   slab, slub, slob:...
91
  /* Avoid kmemleak tracing */
4fd0b46e8   Alexey Dobriyan   slab, slub, slob:...
92
  #define SLAB_NOLEAKTRACE	((slab_flags_t __force)0x00800000U)
d5cff6352   Catalin Marinas   kmemleak: Add the...
93

d50112edd   Alexey Dobriyan   slab, slub, slob:...
94
  /* Fault injection mark */
4c13dd3b4   Dmitry Monakhov   failslab: add abi...
95
  #ifdef CONFIG_FAILSLAB
4fd0b46e8   Alexey Dobriyan   slab, slub, slob:...
96
  # define SLAB_FAILSLAB		((slab_flags_t __force)0x02000000U)
4c13dd3b4   Dmitry Monakhov   failslab: add abi...
97
  #else
4fd0b46e8   Alexey Dobriyan   slab, slub, slob:...
98
  # define SLAB_FAILSLAB		0
4c13dd3b4   Dmitry Monakhov   failslab: add abi...
99
  #endif
d50112edd   Alexey Dobriyan   slab, slub, slob:...
100
  /* Account to memcg */
84c07d11a   Kirill Tkhai   mm: introduce CON...
101
  #ifdef CONFIG_MEMCG_KMEM
4fd0b46e8   Alexey Dobriyan   slab, slub, slob:...
102
  # define SLAB_ACCOUNT		((slab_flags_t __force)0x04000000U)
230e9fc28   Vladimir Davydov   slab: add SLAB_AC...
103
  #else
4fd0b46e8   Alexey Dobriyan   slab, slub, slob:...
104
  # define SLAB_ACCOUNT		0
230e9fc28   Vladimir Davydov   slab: add SLAB_AC...
105
  #endif
2dff44052   Vegard Nossum   kmemcheck: add mm...
106

7ed2f9e66   Alexander Potapenko   mm, kasan: SLAB s...
107
  #ifdef CONFIG_KASAN
4fd0b46e8   Alexey Dobriyan   slab, slub, slob:...
108
  #define SLAB_KASAN		((slab_flags_t __force)0x08000000U)
7ed2f9e66   Alexander Potapenko   mm, kasan: SLAB s...
109
  #else
4fd0b46e8   Alexey Dobriyan   slab, slub, slob:...
110
  #define SLAB_KASAN		0
7ed2f9e66   Alexander Potapenko   mm, kasan: SLAB s...
111
  #endif
e12ba74d8   Mel Gorman   Group short-lived...
112
  /* The following flags affect the page allocator grouping pages by mobility */
d50112edd   Alexey Dobriyan   slab, slub, slob:...
113
  /* Objects are reclaimable */
4fd0b46e8   Alexey Dobriyan   slab, slub, slob:...
114
  #define SLAB_RECLAIM_ACCOUNT	((slab_flags_t __force)0x00020000U)
e12ba74d8   Mel Gorman   Group short-lived...
115
  #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
fcf8a1e48   Waiman Long   mm, memcg: add a ...
116
117
118
  
  /* Slab deactivation flag */
  #define SLAB_DEACTIVATED	((slab_flags_t __force)0x10000000U)
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
119
  /*
6cb8f9132   Christoph Lameter   Slab allocators: ...
120
121
122
123
124
125
126
127
   * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
   *
   * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
   *
   * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
   * Both make kfree a no-op.
   */
  #define ZERO_SIZE_PTR ((void *)16)
1d4ec7b1d   Roland Dreier   Fix ZERO_OR_NULL_...
128
  #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
6cb8f9132   Christoph Lameter   Slab allocators: ...
129
  				(unsigned long)ZERO_SIZE_PTR)
0316bec22   Andrey Ryabinin   mm: slub: add ker...
130
  #include <linux/kasan.h>
3b0efdfa1   Christoph Lameter   mm, sl[aou]b: Ext...
131

2633d7a02   Glauber Costa   slab/slub: consid...
132
  struct mem_cgroup;
3b0efdfa1   Christoph Lameter   mm, sl[aou]b: Ext...
133
  /*
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
134
135
136
   * struct kmem_cache related prototypes
   */
  void __init kmem_cache_init(void);
fda901241   Denis Kirjanov   slab: convert sla...
137
  bool slab_is_available(void);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
138

2d891fbc3   Kees Cook   usercopy: Allow s...
139
  extern bool usercopy_fallback;
f4957d5bd   Alexey Dobriyan   slab: make kmem_c...
140
141
  struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
  			unsigned int align, slab_flags_t flags,
8eb8284b4   David Windsor   usercopy: Prepare...
142
143
  			void (*ctor)(void *));
  struct kmem_cache *kmem_cache_create_usercopy(const char *name,
f4957d5bd   Alexey Dobriyan   slab: make kmem_c...
144
145
  			unsigned int size, unsigned int align,
  			slab_flags_t flags,
7bbdb81ee   Alexey Dobriyan   slab: make userco...
146
  			unsigned int useroffset, unsigned int usersize,
8eb8284b4   David Windsor   usercopy: Prepare...
147
  			void (*ctor)(void *));
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
148
149
  void kmem_cache_destroy(struct kmem_cache *);
  int kmem_cache_shrink(struct kmem_cache *);
2a4db7eb9   Vladimir Davydov   memcg: free memcg...
150
151
  
  void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
fb2f2b0ad   Roman Gushchin   mm: memcg/slab: r...
152
  void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *);
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
153

0a31bd5f2   Christoph Lameter   KMEM_CACHE(): sim...
154
155
156
157
158
159
160
161
  /*
   * Please use this macro to create slab caches. Simply specify the
   * name of the structure and maybe some flags that are listed above.
   *
   * The alignment of the struct determines object alignment. If you
   * f.e. add ____cacheline_aligned_in_smp to the struct declaration
   * then the objects will be properly aligned in SMP configurations.
   */
8eb8284b4   David Windsor   usercopy: Prepare...
162
163
164
165
166
167
168
169
170
171
172
173
174
175
  #define KMEM_CACHE(__struct, __flags)					\
  		kmem_cache_create(#__struct, sizeof(struct __struct),	\
  			__alignof__(struct __struct), (__flags), NULL)
  
  /*
   * To whitelist a single field for copying to/from usercopy, use this
   * macro instead for KMEM_CACHE() above.
   */
  #define KMEM_CACHE_USERCOPY(__struct, __flags, __field)			\
  		kmem_cache_create_usercopy(#__struct,			\
  			sizeof(struct __struct),			\
  			__alignof__(struct __struct), (__flags),	\
  			offsetof(struct __struct, __field),		\
  			sizeof_field(struct __struct, __field), NULL)
0a31bd5f2   Christoph Lameter   KMEM_CACHE(): sim...
176

2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
177
  /*
345046673   Christoph Lameter   slab: Move kmallo...
178
179
180
181
182
183
   * Common kmalloc functions provided by all allocators
   */
  void * __must_check __krealloc(const void *, size_t, gfp_t);
  void * __must_check krealloc(const void *, size_t, gfp_t);
  void kfree(const void *);
  void kzfree(const void *);
10d1f8cb3   Marco Elver   mm/slab: refactor...
184
  size_t __ksize(const void *);
345046673   Christoph Lameter   slab: Move kmallo...
185
  size_t ksize(const void *);
f5509cc18   Kees Cook   mm: Hardened user...
186
  #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
f4e6e289c   Kees Cook   usercopy: Include...
187
188
  void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
  			bool to_user);
f5509cc18   Kees Cook   mm: Hardened user...
189
  #else
f4e6e289c   Kees Cook   usercopy: Include...
190
191
  static inline void __check_heap_object(const void *ptr, unsigned long n,
  				       struct page *page, bool to_user) { }
f5509cc18   Kees Cook   mm: Hardened user...
192
  #endif
c601fd695   Christoph Lameter   slab: Handle ARCH...
193
194
195
196
197
198
199
200
201
202
203
204
  /*
   * Some archs want to perform DMA into kmalloc caches and need a guaranteed
   * alignment larger than the alignment of a 64-bit integer.
   * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
   */
  #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
  #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
  #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
  #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
  #else
  #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
  #endif
345046673   Christoph Lameter   slab: Move kmallo...
205
  /*
94a58c360   Rasmus Villemoes   slab.h: sprinkle ...
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
   * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
   * Intended for arches that get misalignment faults even for 64 bit integer
   * aligned buffers.
   */
  #ifndef ARCH_SLAB_MINALIGN
  #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
  #endif
  
  /*
   * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
   * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
   * aligned pointers.
   */
  #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
  #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
  #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
  
  /*
95a05b428   Christoph Lameter   slab: Common cons...
224
225
226
227
228
229
   * Kmalloc array related definitions
   */
  
  #ifdef CONFIG_SLAB
  /*
   * The largest kmalloc size supported by the SLAB allocators is
0aa817f07   Christoph Lameter   Slab allocators: ...
230
231
232
233
234
235
236
   * 32 megabyte (2^25) or the maximum allocatable page order if that is
   * less than 32 MB.
   *
   * WARNING: Its not easy to increase this value since the allocators have
   * to do various tricks to work around compiler limitations in order to
   * ensure proper constant folding.
   */
debee0768   Christoph Lameter   slab allocators: ...
237
238
  #define KMALLOC_SHIFT_HIGH	((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
  				(MAX_ORDER + PAGE_SHIFT - 1) : 25)
95a05b428   Christoph Lameter   slab: Common cons...
239
  #define KMALLOC_SHIFT_MAX	KMALLOC_SHIFT_HIGH
c601fd695   Christoph Lameter   slab: Handle ARCH...
240
  #ifndef KMALLOC_SHIFT_LOW
95a05b428   Christoph Lameter   slab: Common cons...
241
  #define KMALLOC_SHIFT_LOW	5
c601fd695   Christoph Lameter   slab: Handle ARCH...
242
  #endif
069e2b351   Christoph Lameter   slob: Rework #ifd...
243
244
245
  #endif
  
  #ifdef CONFIG_SLUB
95a05b428   Christoph Lameter   slab: Common cons...
246
  /*
433a91ff5   Dave Hansen   mm: sl[uo]b: fix ...
247
248
   * SLUB directly allocates requests fitting in to an order-1 page
   * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
95a05b428   Christoph Lameter   slab: Common cons...
249
250
   */
  #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
bb1107f7c   Michal Hocko   mm, slab: make su...
251
  #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
c601fd695   Christoph Lameter   slab: Handle ARCH...
252
  #ifndef KMALLOC_SHIFT_LOW
95a05b428   Christoph Lameter   slab: Common cons...
253
254
  #define KMALLOC_SHIFT_LOW	3
  #endif
c601fd695   Christoph Lameter   slab: Handle ARCH...
255
  #endif
0aa817f07   Christoph Lameter   Slab allocators: ...
256

069e2b351   Christoph Lameter   slob: Rework #ifd...
257
258
  #ifdef CONFIG_SLOB
  /*
433a91ff5   Dave Hansen   mm: sl[uo]b: fix ...
259
   * SLOB passes all requests larger than one page to the page allocator.
069e2b351   Christoph Lameter   slob: Rework #ifd...
260
261
262
   * No kmalloc array is necessary since objects of different sizes can
   * be allocated from the same page.
   */
069e2b351   Christoph Lameter   slob: Rework #ifd...
263
  #define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
bb1107f7c   Michal Hocko   mm, slab: make su...
264
  #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
069e2b351   Christoph Lameter   slob: Rework #ifd...
265
266
267
268
  #ifndef KMALLOC_SHIFT_LOW
  #define KMALLOC_SHIFT_LOW	3
  #endif
  #endif
95a05b428   Christoph Lameter   slab: Common cons...
269
270
271
272
273
274
  /* Maximum allocatable size */
  #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
  /* Maximum size for which we actually use a slab cache */
  #define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
  /* Maximum order allocatable via the slab allocagtor */
  #define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)
0aa817f07   Christoph Lameter   Slab allocators: ...
275

90810645f   Christoph Lameter   slab allocators: ...
276
  /*
ce6a50263   Christoph Lameter   slab: Common kmal...
277
278
   * Kmalloc subsystem.
   */
c601fd695   Christoph Lameter   slab: Handle ARCH...
279
  #ifndef KMALLOC_MIN_SIZE
95a05b428   Christoph Lameter   slab: Common cons...
280
  #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
ce6a50263   Christoph Lameter   slab: Common kmal...
281
  #endif
24f870d8f   Joonsoo Kim   slab: fix wrongly...
282
283
284
285
286
287
288
289
290
291
  /*
   * This restriction comes from byte sized index implementation.
   * Page size is normally 2^12 bytes and, in this case, if we want to use
   * byte sized index which can represent 2^8 entries, the size of the object
   * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
   * If minimum size of kmalloc is less than 16, we use it as minimum object
   * size and give up to use byte sized index.
   */
  #define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
                                 (KMALLOC_MIN_SIZE) : 16)
1291523f2   Vlastimil Babka   mm, slab/slub: in...
292
293
294
295
  /*
   * Whenever changing this, take care of that kmalloc_type() and
   * create_kmalloc_caches() still work as intended.
   */
cc252eae8   Vlastimil Babka   mm, slab: combine...
296
297
  enum kmalloc_cache_type {
  	KMALLOC_NORMAL = 0,
1291523f2   Vlastimil Babka   mm, slab/slub: in...
298
  	KMALLOC_RECLAIM,
cc252eae8   Vlastimil Babka   mm, slab: combine...
299
300
301
302
303
  #ifdef CONFIG_ZONE_DMA
  	KMALLOC_DMA,
  #endif
  	NR_KMALLOC_TYPES
  };
069e2b351   Christoph Lameter   slob: Rework #ifd...
304
  #ifndef CONFIG_SLOB
cc252eae8   Vlastimil Babka   mm, slab: combine...
305
306
307
308
309
  extern struct kmem_cache *
  kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
  
  static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
  {
9425c58e5   Christoph Lameter   slab: Common defi...
310
  #ifdef CONFIG_ZONE_DMA
4e45f712d   Vlastimil Babka   include/linux/sla...
311
312
313
314
315
316
  	/*
  	 * The most common case is KMALLOC_NORMAL, so test for it
  	 * with a single branch for both flags.
  	 */
  	if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0))
  		return KMALLOC_NORMAL;
1291523f2   Vlastimil Babka   mm, slab/slub: in...
317
318
  
  	/*
4e45f712d   Vlastimil Babka   include/linux/sla...
319
320
  	 * At least one of the flags has to be set. If both are, __GFP_DMA
  	 * is more important.
1291523f2   Vlastimil Babka   mm, slab/slub: in...
321
  	 */
4e45f712d   Vlastimil Babka   include/linux/sla...
322
323
324
325
  	return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM;
  #else
  	return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL;
  #endif
cc252eae8   Vlastimil Babka   mm, slab: combine...
326
  }
ce6a50263   Christoph Lameter   slab: Common kmal...
327
328
329
330
331
  /*
   * Figure out which kmalloc slab an allocation of a certain size
   * belongs to.
   * 0 = zero alloc
   * 1 =  65 .. 96 bytes
1ed58b605   Rasmus Villemoes   linux/slab.h: fix...
332
333
   * 2 = 129 .. 192 bytes
   * n = 2^(n-1)+1 .. 2^n
ce6a50263   Christoph Lameter   slab: Common kmal...
334
   */
36071a279   Alexey Dobriyan   slab: make kmallo...
335
  static __always_inline unsigned int kmalloc_index(size_t size)
ce6a50263   Christoph Lameter   slab: Common kmal...
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
  {
  	if (!size)
  		return 0;
  
  	if (size <= KMALLOC_MIN_SIZE)
  		return KMALLOC_SHIFT_LOW;
  
  	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
  		return 1;
  	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
  		return 2;
  	if (size <=          8) return 3;
  	if (size <=         16) return 4;
  	if (size <=         32) return 5;
  	if (size <=         64) return 6;
  	if (size <=        128) return 7;
  	if (size <=        256) return 8;
  	if (size <=        512) return 9;
  	if (size <=       1024) return 10;
  	if (size <=   2 * 1024) return 11;
  	if (size <=   4 * 1024) return 12;
  	if (size <=   8 * 1024) return 13;
  	if (size <=  16 * 1024) return 14;
  	if (size <=  32 * 1024) return 15;
  	if (size <=  64 * 1024) return 16;
  	if (size <= 128 * 1024) return 17;
  	if (size <= 256 * 1024) return 18;
  	if (size <= 512 * 1024) return 19;
  	if (size <= 1024 * 1024) return 20;
  	if (size <=  2 * 1024 * 1024) return 21;
  	if (size <=  4 * 1024 * 1024) return 22;
  	if (size <=  8 * 1024 * 1024) return 23;
  	if (size <=  16 * 1024 * 1024) return 24;
  	if (size <=  32 * 1024 * 1024) return 25;
  	if (size <=  64 * 1024 * 1024) return 26;
  	BUG();
  
  	/* Will never be reached. Needed because the compiler may complain */
  	return -1;
  }
069e2b351   Christoph Lameter   slob: Rework #ifd...
376
  #endif /* !CONFIG_SLOB */
ce6a50263   Christoph Lameter   slab: Common kmal...
377

48a270554   Rasmus Villemoes   include/linux: ap...
378
379
  void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
  void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
2a4db7eb9   Vladimir Davydov   memcg: free memcg...
380
  void kmem_cache_free(struct kmem_cache *, void *);
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
381

484748f0b   Christoph Lameter   slab: infrastruct...
382
  /*
9f706d682   Jesper Dangaard Brouer   mm: fix some spel...
383
   * Bulk allocation and freeing operations. These are accelerated in an
484748f0b   Christoph Lameter   slab: infrastruct...
384
385
386
387
388
389
   * allocator specific way to avoid taking locks repeatedly or building
   * metadata structures unnecessarily.
   *
   * Note that interrupts must be enabled when calling these functions.
   */
  void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
865762a81   Jesper Dangaard Brouer   slab/slub: adjust...
390
  int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
484748f0b   Christoph Lameter   slab: infrastruct...
391

ca2571955   Jesper Dangaard Brouer   mm: new API kfree...
392
393
394
395
396
397
398
399
  /*
   * Caller must not use kfree_bulk() on memory not originally allocated
   * by kmalloc(), because the SLOB allocator cannot handle this.
   */
  static __always_inline void kfree_bulk(size_t size, void **p)
  {
  	kmem_cache_free_bulk(NULL, size, p);
  }
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
400
  #ifdef CONFIG_NUMA
48a270554   Rasmus Villemoes   include/linux: ap...
401
402
  void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
  void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
403
404
405
406
407
408
409
410
411
412
413
414
415
  #else
  static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
  {
  	return __kmalloc(size, flags);
  }
  
  static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
  {
  	return kmem_cache_alloc(s, flags);
  }
  #endif
  
  #ifdef CONFIG_TRACING
48a270554   Rasmus Villemoes   include/linux: ap...
416
  extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
417
418
419
420
  
  #ifdef CONFIG_NUMA
  extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
  					   gfp_t gfpflags,
48a270554   Rasmus Villemoes   include/linux: ap...
421
  					   int node, size_t size) __assume_slab_alignment __malloc;
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
422
423
424
425
426
427
428
429
430
431
432
433
434
435
  #else
  static __always_inline void *
  kmem_cache_alloc_node_trace(struct kmem_cache *s,
  			      gfp_t gfpflags,
  			      int node, size_t size)
  {
  	return kmem_cache_alloc_trace(s, gfpflags, size);
  }
  #endif /* CONFIG_NUMA */
  
  #else /* CONFIG_TRACING */
  static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
  		gfp_t flags, size_t size)
  {
0316bec22   Andrey Ryabinin   mm: slub: add ker...
436
  	void *ret = kmem_cache_alloc(s, flags);
0116523cf   Andrey Konovalov   kasan, mm: change...
437
  	ret = kasan_kmalloc(s, ret, size, flags);
0316bec22   Andrey Ryabinin   mm: slub: add ker...
438
  	return ret;
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
439
440
441
442
443
444
445
  }
  
  static __always_inline void *
  kmem_cache_alloc_node_trace(struct kmem_cache *s,
  			      gfp_t gfpflags,
  			      int node, size_t size)
  {
0316bec22   Andrey Ryabinin   mm: slub: add ker...
446
  	void *ret = kmem_cache_alloc_node(s, gfpflags, node);
0116523cf   Andrey Konovalov   kasan, mm: change...
447
  	ret = kasan_kmalloc(s, ret, size, gfpflags);
0316bec22   Andrey Ryabinin   mm: slub: add ker...
448
  	return ret;
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
449
450
  }
  #endif /* CONFIG_TRACING */
48a270554   Rasmus Villemoes   include/linux: ap...
451
  extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
452
453
  
  #ifdef CONFIG_TRACING
48a270554   Rasmus Villemoes   include/linux: ap...
454
  extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
455
456
457
458
459
460
  #else
  static __always_inline void *
  kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
  {
  	return kmalloc_order(size, flags, order);
  }
ce6a50263   Christoph Lameter   slab: Common kmal...
461
  #endif
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
462
463
464
465
466
467
468
469
470
  static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
  {
  	unsigned int order = get_order(size);
  	return kmalloc_order_trace(size, flags, order);
  }
  
  /**
   * kmalloc - allocate memory
   * @size: how many bytes of memory are required.
7e3528c36   Randy Dunlap   slab.h: remove du...
471
   * @flags: the type of memory to allocate.
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
472
473
474
   *
   * kmalloc is the normal method of allocating memory
   * for objects smaller than page size in the kernel.
7e3528c36   Randy Dunlap   slab.h: remove du...
475
   *
59bb47985   Vlastimil Babka   mm, sl[aou]b: gua...
476
477
478
479
   * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
   * bytes. For @size of power of two bytes, the alignment is also guaranteed
   * to be at least to the size.
   *
01598ba6b   Mike Rapoport   docs/mm: update k...
480
481
482
   * The @flags argument may be one of the GFP flags defined at
   * include/linux/gfp.h and described at
   * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
7e3528c36   Randy Dunlap   slab.h: remove du...
483
   *
01598ba6b   Mike Rapoport   docs/mm: update k...
484
   * The recommended usage of the @flags is described at
3870a2371   Jonathan Corbet   Merge branch 'kma...
485
   * :ref:`Documentation/core-api/memory-allocation.rst <memory-allocation>`
7e3528c36   Randy Dunlap   slab.h: remove du...
486
   *
01598ba6b   Mike Rapoport   docs/mm: update k...
487
   * Below is a brief outline of the most useful GFP flags
7e3528c36   Randy Dunlap   slab.h: remove du...
488
   *
01598ba6b   Mike Rapoport   docs/mm: update k...
489
490
   * %GFP_KERNEL
   *	Allocate normal kernel ram. May sleep.
7e3528c36   Randy Dunlap   slab.h: remove du...
491
   *
01598ba6b   Mike Rapoport   docs/mm: update k...
492
493
   * %GFP_NOWAIT
   *	Allocation will not sleep.
7e3528c36   Randy Dunlap   slab.h: remove du...
494
   *
01598ba6b   Mike Rapoport   docs/mm: update k...
495
496
   * %GFP_ATOMIC
   *	Allocation will not sleep.  May use emergency pools.
7e3528c36   Randy Dunlap   slab.h: remove du...
497
   *
01598ba6b   Mike Rapoport   docs/mm: update k...
498
499
   * %GFP_HIGHUSER
   *	Allocate memory from high memory on behalf of user.
7e3528c36   Randy Dunlap   slab.h: remove du...
500
501
502
503
   *
   * Also it is possible to set different flags by OR'ing
   * in one or more of the following additional @flags:
   *
01598ba6b   Mike Rapoport   docs/mm: update k...
504
505
   * %__GFP_HIGH
   *	This allocation has high priority and may use emergency pools.
7e3528c36   Randy Dunlap   slab.h: remove du...
506
   *
01598ba6b   Mike Rapoport   docs/mm: update k...
507
508
509
   * %__GFP_NOFAIL
   *	Indicate that this allocation is in no way allowed to fail
   *	(think twice before using).
7e3528c36   Randy Dunlap   slab.h: remove du...
510
   *
01598ba6b   Mike Rapoport   docs/mm: update k...
511
512
513
   * %__GFP_NORETRY
   *	If memory is not immediately available,
   *	then give up at once.
7e3528c36   Randy Dunlap   slab.h: remove du...
514
   *
01598ba6b   Mike Rapoport   docs/mm: update k...
515
516
   * %__GFP_NOWARN
   *	If allocation fails, don't issue any warnings.
7e3528c36   Randy Dunlap   slab.h: remove du...
517
   *
01598ba6b   Mike Rapoport   docs/mm: update k...
518
519
520
   * %__GFP_RETRY_MAYFAIL
   *	Try really hard to succeed the allocation but fail
   *	eventually.
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
521
522
523
524
   */
  static __always_inline void *kmalloc(size_t size, gfp_t flags)
  {
  	if (__builtin_constant_p(size)) {
cc252eae8   Vlastimil Babka   mm, slab: combine...
525
526
527
  #ifndef CONFIG_SLOB
  		unsigned int index;
  #endif
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
528
529
530
  		if (size > KMALLOC_MAX_CACHE_SIZE)
  			return kmalloc_large(size, flags);
  #ifndef CONFIG_SLOB
cc252eae8   Vlastimil Babka   mm, slab: combine...
531
  		index = kmalloc_index(size);
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
532

cc252eae8   Vlastimil Babka   mm, slab: combine...
533
534
  		if (!index)
  			return ZERO_SIZE_PTR;
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
535

cc252eae8   Vlastimil Babka   mm, slab: combine...
536
537
538
  		return kmem_cache_alloc_trace(
  				kmalloc_caches[kmalloc_type(flags)][index],
  				flags, size);
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
539
540
541
542
  #endif
  	}
  	return __kmalloc(size, flags);
  }
ce6a50263   Christoph Lameter   slab: Common kmal...
543
544
545
546
547
  /*
   * Determine size used for the nth kmalloc cache.
   * return size or 0 if a kmalloc cache for that
   * size does not exist
   */
0be70327e   Alexey Dobriyan   slab: make kmallo...
548
  static __always_inline unsigned int kmalloc_size(unsigned int n)
ce6a50263   Christoph Lameter   slab: Common kmal...
549
  {
069e2b351   Christoph Lameter   slob: Rework #ifd...
550
  #ifndef CONFIG_SLOB
ce6a50263   Christoph Lameter   slab: Common kmal...
551
  	if (n > 2)
0be70327e   Alexey Dobriyan   slab: make kmallo...
552
  		return 1U << n;
ce6a50263   Christoph Lameter   slab: Common kmal...
553
554
555
556
557
558
  
  	if (n == 1 && KMALLOC_MIN_SIZE <= 32)
  		return 96;
  
  	if (n == 2 && KMALLOC_MIN_SIZE <= 64)
  		return 192;
069e2b351   Christoph Lameter   slob: Rework #ifd...
559
  #endif
ce6a50263   Christoph Lameter   slab: Common kmal...
560
561
  	return 0;
  }
ce6a50263   Christoph Lameter   slab: Common kmal...
562

f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
563
564
565
566
  static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  {
  #ifndef CONFIG_SLOB
  	if (__builtin_constant_p(size) &&
cc252eae8   Vlastimil Babka   mm, slab: combine...
567
  		size <= KMALLOC_MAX_CACHE_SIZE) {
36071a279   Alexey Dobriyan   slab: make kmallo...
568
  		unsigned int i = kmalloc_index(size);
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
569
570
571
  
  		if (!i)
  			return ZERO_SIZE_PTR;
cc252eae8   Vlastimil Babka   mm, slab: combine...
572
573
  		return kmem_cache_alloc_node_trace(
  				kmalloc_caches[kmalloc_type(flags)][i],
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
574
575
576
577
578
  						flags, node, size);
  	}
  #endif
  	return __kmalloc_node(size, flags, node);
  }
2633d7a02   Glauber Costa   slab/slub: consid...
579
  int memcg_update_all_caches(int num_memcgs);
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
580
  /**
e7efa615c   Michael Opdenacker   slab: add kmalloc...
581
582
583
584
   * kmalloc_array - allocate memory for an array.
   * @n: number of elements.
   * @size: element size.
   * @flags: the type of memory to allocate (see kmalloc).
800590f52   Paul Drynoff   [PATCH] slab: kma...
585
   */
a8203725d   Xi Wang   slab: introduce k...
586
  static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
587
  {
49b7f8983   Kees Cook   mm: Use overflow ...
588
589
590
  	size_t bytes;
  
  	if (unlikely(check_mul_overflow(n, size, &bytes)))
6193a2ff1   Paul Mundt   slob: initial NUM...
591
  		return NULL;
91c6a05f7   Alexey Dobriyan   mm: faster kmallo...
592
  	if (__builtin_constant_p(n) && __builtin_constant_p(size))
49b7f8983   Kees Cook   mm: Use overflow ...
593
594
  		return kmalloc(bytes, flags);
  	return __kmalloc(bytes, flags);
a8203725d   Xi Wang   slab: introduce k...
595
596
597
598
599
600
601
602
603
604
605
  }
  
  /**
   * kcalloc - allocate memory for an array. The memory is set to zero.
   * @n: number of elements.
   * @size: element size.
   * @flags: the type of memory to allocate (see kmalloc).
   */
  static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
  {
  	return kmalloc_array(n, size, flags | __GFP_ZERO);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
606
  }
1d2c8eea6   Christoph Hellwig   [PATCH] slab: cle...
607
608
609
610
611
612
613
614
  /*
   * kmalloc_track_caller is a special version of kmalloc that records the
   * calling function of the routine calling it for slab leak tracking instead
   * of just the calling function (confusing, eh?).
   * It's useful when the call to kmalloc comes from a widely-used standard
   * allocator where we care about the real place the memory allocation
   * request comes from.
   */
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
615
  extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
1d2c8eea6   Christoph Hellwig   [PATCH] slab: cle...
616
  #define kmalloc_track_caller(size, flags) \
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
617
  	__kmalloc_track_caller(size, flags, _RET_IP_)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
618

5799b255c   Johannes Thumshirn   include/linux/sla...
619
620
621
  static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
  				       int node)
  {
49b7f8983   Kees Cook   mm: Use overflow ...
622
623
624
  	size_t bytes;
  
  	if (unlikely(check_mul_overflow(n, size, &bytes)))
5799b255c   Johannes Thumshirn   include/linux/sla...
625
626
  		return NULL;
  	if (__builtin_constant_p(n) && __builtin_constant_p(size))
49b7f8983   Kees Cook   mm: Use overflow ...
627
628
  		return kmalloc_node(bytes, flags, node);
  	return __kmalloc_node(bytes, flags, node);
5799b255c   Johannes Thumshirn   include/linux/sla...
629
630
631
632
633
634
  }
  
  static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
  {
  	return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
  }
97e2bde47   Manfred Spraul   [PATCH] add kmall...
635
  #ifdef CONFIG_NUMA
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
636
  extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
8b98c1699   Christoph Hellwig   [PATCH] leak trac...
637
638
  #define kmalloc_node_track_caller(size, flags, node) \
  	__kmalloc_node_track_caller(size, flags, node, \
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
639
  			_RET_IP_)
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
640

8b98c1699   Christoph Hellwig   [PATCH] leak trac...
641
  #else /* CONFIG_NUMA */
8b98c1699   Christoph Hellwig   [PATCH] leak trac...
642
643
644
  
  #define kmalloc_node_track_caller(size, flags, node) \
  	kmalloc_track_caller(size, flags)
97e2bde47   Manfred Spraul   [PATCH] add kmall...
645

dfcd36102   Pascal Terjan   slab: Fix comment...
646
  #endif /* CONFIG_NUMA */
10cef6029   Matt Mackall   [PATCH] slob: int...
647

81cda6626   Christoph Lameter   Slab allocators: ...
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
  /*
   * Shortcuts
   */
  static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
  {
  	return kmem_cache_alloc(k, flags | __GFP_ZERO);
  }
  
  /**
   * kzalloc - allocate memory. The memory is set to zero.
   * @size: how many bytes of memory are required.
   * @flags: the type of memory to allocate (see kmalloc).
   */
  static inline void *kzalloc(size_t size, gfp_t flags)
  {
  	return kmalloc(size, flags | __GFP_ZERO);
  }
979b0fea2   Jeff Layton   vm: add kzalloc_n...
665
666
667
668
669
670
671
672
673
674
  /**
   * kzalloc_node - allocate zeroed memory from a particular memory node.
   * @size: how many bytes of memory are required.
   * @flags: the type of memory to allocate (see kmalloc).
   * @node: memory node from which to allocate
   */
  static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
  {
  	return kmalloc_node(size, flags | __GFP_ZERO, node);
  }
07f361b2b   Joonsoo Kim   mm/slab_common: m...
675
  unsigned int kmem_cache_size(struct kmem_cache *s);
7e85ee0c1   Pekka Enberg   slab,slub: don't ...
676
  void __init kmem_cache_init_late(void);
6731d4f12   Sebastian Andrzej Siewior   slab: Convert to ...
677
678
679
680
681
682
683
  #if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
  int slab_prepare_cpu(unsigned int cpu);
  int slab_dead_cpu(unsigned int cpu);
  #else
  #define slab_prepare_cpu	NULL
  #define slab_dead_cpu		NULL
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
684
  #endif	/* _LINUX_SLAB_H */