Blame view

include/linux/slab.h 20.7 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  /* SPDX-License-Identifier: GPL-2.0 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2
  /*
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
3
4
   * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
   *
cde535359   Christoph Lameter   Christoph has moved
5
   * (C) SGI 2006, Christoph Lameter
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
6
7
   * 	Cleaned up and restructured to ease the addition of alternative
   * 	implementations of SLAB allocators.
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
8
9
   * (C) Linux Foundation 2008-2013
   *      Unified interface for all slab allocators
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
10
11
12
13
   */
  
  #ifndef _LINUX_SLAB_H
  #define	_LINUX_SLAB_H
1b1cec4bb   Andrew Morton   [PATCH] slab: dep...
14
  #include <linux/gfp.h>
1b1cec4bb   Andrew Morton   [PATCH] slab: dep...
15
  #include <linux/types.h>
1f458cbf1   Glauber Costa   memcg: destroy me...
16
  #include <linux/workqueue.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
17

2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
18
19
  /*
   * Flags to pass to kmem_cache_create().
124dee09f   David Rientjes   mm, slab: correct...
20
   * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
21
   */
becfda68a   Laura Abbott   slub: convert SLA...
22
  #define SLAB_CONSISTENCY_CHECKS	0x00000100UL	/* DEBUG: Perform (expensive) checks on alloc/free */
55935a34a   Christoph Lameter   [PATCH] More slab...
23
24
25
  #define SLAB_RED_ZONE		0x00000400UL	/* DEBUG: Red zone objs in a cache */
  #define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */
  #define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
26
  #define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
27
  #define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
28
  #define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */
d7de4c1dc   Peter Zijlstra   slab: document SL...
29
  /*
5f0d5a3ae   Paul E. McKenney   mm: Rename SLAB_D...
30
   * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
d7de4c1dc   Peter Zijlstra   slab: document SL...
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
   *
   * This delays freeing the SLAB page by a grace period, it does _NOT_
   * delay object freeing. This means that if you do kmem_cache_free()
   * that memory location is free to be reused at any time. Thus it may
   * be possible to see another object there in the same RCU grace period.
   *
   * This feature only ensures the memory location backing the object
   * stays valid, the trick to using this is relying on an independent
   * object validation pass. Something like:
   *
   *  rcu_read_lock()
   * again:
   *  obj = lockless_lookup(key);
   *  if (obj) {
   *    if (!try_get_ref(obj)) // might fail for free objects
   *      goto again;
   *
   *    if (obj->key != key) { // not the object we expected
   *      put_ref(obj);
   *      goto again;
   *    }
   *  }
   *  rcu_read_unlock();
   *
68126702b   Joonsoo Kim   slab: overloading...
55
56
57
58
59
60
61
62
   * This is useful if we need to approach a kernel structure obliquely,
   * from its address obtained without the usual locking. We can lock
   * the structure to stabilize it and check it's still at the given address,
   * only if we can be sure that the memory has not been meanwhile reused
   * for some other kind of object (which our subsystem's lock might corrupt).
   *
   * rcu_read_lock before reading the address, then rcu_read_unlock after
   * taking the spinlock within the structure expected at that address.
5f0d5a3ae   Paul E. McKenney   mm: Rename SLAB_D...
63
64
   *
   * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
d7de4c1dc   Peter Zijlstra   slab: document SL...
65
   */
5f0d5a3ae   Paul E. McKenney   mm: Rename SLAB_D...
66
  #define SLAB_TYPESAFE_BY_RCU	0x00080000UL	/* Defer freeing slabs to RCU */
101a50019   Paul Jackson   [PATCH] cpuset me...
67
  #define SLAB_MEM_SPREAD		0x00100000UL	/* Spread some memory over cpuset */
81819f0fc   Christoph Lameter   SLUB core
68
  #define SLAB_TRACE		0x00200000UL	/* Trace allocations and frees */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
69

30327acf7   Thomas Gleixner   slab: add a flag ...
70
71
72
73
74
75
  /* Flag to prevent checks on free */
  #ifdef CONFIG_DEBUG_OBJECTS
  # define SLAB_DEBUG_OBJECTS	0x00400000UL
  #else
  # define SLAB_DEBUG_OBJECTS	0x00000000UL
  #endif
d5cff6352   Catalin Marinas   kmemleak: Add the...
76
  #define SLAB_NOLEAKTRACE	0x00800000UL	/* Avoid kmemleak tracing */
4c13dd3b4   Dmitry Monakhov   failslab: add abi...
77
78
79
80
81
  #ifdef CONFIG_FAILSLAB
  # define SLAB_FAILSLAB		0x02000000UL	/* Fault injection mark */
  #else
  # define SLAB_FAILSLAB		0x00000000UL
  #endif
127424c86   Johannes Weiner   mm: memcontrol: m...
82
  #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
230e9fc28   Vladimir Davydov   slab: add SLAB_AC...
83
84
85
86
  # define SLAB_ACCOUNT		0x04000000UL	/* Account to memcg */
  #else
  # define SLAB_ACCOUNT		0x00000000UL
  #endif
2dff44052   Vegard Nossum   kmemcheck: add mm...
87

7ed2f9e66   Alexander Potapenko   mm, kasan: SLAB s...
88
89
90
91
92
  #ifdef CONFIG_KASAN
  #define SLAB_KASAN		0x08000000UL
  #else
  #define SLAB_KASAN		0x00000000UL
  #endif
e12ba74d8   Mel Gorman   Group short-lived...
93
94
95
  /* The following flags affect the page allocator grouping pages by mobility */
  #define SLAB_RECLAIM_ACCOUNT	0x00020000UL		/* Objects are reclaimable */
  #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
96
  /*
6cb8f9132   Christoph Lameter   Slab allocators: ...
97
98
99
100
101
102
103
104
   * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
   *
   * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
   *
   * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
   * Both make kfree a no-op.
   */
  #define ZERO_SIZE_PTR ((void *)16)
1d4ec7b1d   Roland Dreier   Fix ZERO_OR_NULL_...
105
  #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
6cb8f9132   Christoph Lameter   Slab allocators: ...
106
  				(unsigned long)ZERO_SIZE_PTR)
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
107
  #include <linux/kmemleak.h>
0316bec22   Andrey Ryabinin   mm: slub: add ker...
108
  #include <linux/kasan.h>
3b0efdfa1   Christoph Lameter   mm, sl[aou]b: Ext...
109

2633d7a02   Glauber Costa   slab/slub: consid...
110
  struct mem_cgroup;
3b0efdfa1   Christoph Lameter   mm, sl[aou]b: Ext...
111
  /*
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
112
113
114
   * struct kmem_cache related prototypes
   */
  void __init kmem_cache_init(void);
fda901241   Denis Kirjanov   slab: convert sla...
115
  bool slab_is_available(void);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
116

2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
117
  struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
ebe29738f   Christoph Lameter   [PATCH] Remove us...
118
  			unsigned long,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
119
  			void (*)(void *));
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
120
121
  void kmem_cache_destroy(struct kmem_cache *);
  int kmem_cache_shrink(struct kmem_cache *);
2a4db7eb9   Vladimir Davydov   memcg: free memcg...
122
123
124
125
  
  void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
  void memcg_deactivate_kmem_caches(struct mem_cgroup *);
  void memcg_destroy_kmem_caches(struct mem_cgroup *);
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
126

0a31bd5f2   Christoph Lameter   KMEM_CACHE(): sim...
127
128
129
130
131
132
133
134
135
136
  /*
   * Please use this macro to create slab caches. Simply specify the
   * name of the structure and maybe some flags that are listed above.
   *
   * The alignment of the struct determines object alignment. If you
   * f.e. add ____cacheline_aligned_in_smp to the struct declaration
   * then the objects will be properly aligned in SMP configurations.
   */
  #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
  		sizeof(struct __struct), __alignof__(struct __struct),\
20c2df83d   Paul Mundt   mm: Remove slab d...
137
  		(__flags), NULL)
0a31bd5f2   Christoph Lameter   KMEM_CACHE(): sim...
138

2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
139
  /*
345046673   Christoph Lameter   slab: Move kmallo...
140
141
142
143
144
145
146
   * Common kmalloc functions provided by all allocators
   */
  void * __must_check __krealloc(const void *, size_t, gfp_t);
  void * __must_check krealloc(const void *, size_t, gfp_t);
  void kfree(const void *);
  void kzfree(const void *);
  size_t ksize(const void *);
f5509cc18   Kees Cook   mm: Hardened user...
147
148
149
150
151
152
153
154
155
156
157
  #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
  const char *__check_heap_object(const void *ptr, unsigned long n,
  				struct page *page);
  #else
  static inline const char *__check_heap_object(const void *ptr,
  					      unsigned long n,
  					      struct page *page)
  {
  	return NULL;
  }
  #endif
c601fd695   Christoph Lameter   slab: Handle ARCH...
158
159
160
161
162
163
164
165
166
167
168
169
  /*
   * Some archs want to perform DMA into kmalloc caches and need a guaranteed
   * alignment larger than the alignment of a 64-bit integer.
   * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
   */
  #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
  #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
  #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
  #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
  #else
  #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
  #endif
345046673   Christoph Lameter   slab: Move kmallo...
170
  /*
94a58c360   Rasmus Villemoes   slab.h: sprinkle ...
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
   * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
   * Intended for arches that get misalignment faults even for 64 bit integer
   * aligned buffers.
   */
  #ifndef ARCH_SLAB_MINALIGN
  #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
  #endif
  
  /*
   * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
   * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
   * aligned pointers.
   */
  #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
  #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
  #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
  
  /*
95a05b428   Christoph Lameter   slab: Common cons...
189
190
191
192
193
194
   * Kmalloc array related definitions
   */
  
  #ifdef CONFIG_SLAB
  /*
   * The largest kmalloc size supported by the SLAB allocators is
0aa817f07   Christoph Lameter   Slab allocators: ...
195
196
197
198
199
200
201
   * 32 megabyte (2^25) or the maximum allocatable page order if that is
   * less than 32 MB.
   *
   * WARNING: Its not easy to increase this value since the allocators have
   * to do various tricks to work around compiler limitations in order to
   * ensure proper constant folding.
   */
debee0768   Christoph Lameter   slab allocators: ...
202
203
  #define KMALLOC_SHIFT_HIGH	((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
  				(MAX_ORDER + PAGE_SHIFT - 1) : 25)
95a05b428   Christoph Lameter   slab: Common cons...
204
  #define KMALLOC_SHIFT_MAX	KMALLOC_SHIFT_HIGH
c601fd695   Christoph Lameter   slab: Handle ARCH...
205
  #ifndef KMALLOC_SHIFT_LOW
95a05b428   Christoph Lameter   slab: Common cons...
206
  #define KMALLOC_SHIFT_LOW	5
c601fd695   Christoph Lameter   slab: Handle ARCH...
207
  #endif
069e2b351   Christoph Lameter   slob: Rework #ifd...
208
209
210
  #endif
  
  #ifdef CONFIG_SLUB
95a05b428   Christoph Lameter   slab: Common cons...
211
  /*
433a91ff5   Dave Hansen   mm: sl[uo]b: fix ...
212
213
   * SLUB directly allocates requests fitting in to an order-1 page
   * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
95a05b428   Christoph Lameter   slab: Common cons...
214
215
   */
  #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
bb1107f7c   Michal Hocko   mm, slab: make su...
216
  #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
c601fd695   Christoph Lameter   slab: Handle ARCH...
217
  #ifndef KMALLOC_SHIFT_LOW
95a05b428   Christoph Lameter   slab: Common cons...
218
219
  #define KMALLOC_SHIFT_LOW	3
  #endif
c601fd695   Christoph Lameter   slab: Handle ARCH...
220
  #endif
0aa817f07   Christoph Lameter   Slab allocators: ...
221

069e2b351   Christoph Lameter   slob: Rework #ifd...
222
223
  #ifdef CONFIG_SLOB
  /*
433a91ff5   Dave Hansen   mm: sl[uo]b: fix ...
224
   * SLOB passes all requests larger than one page to the page allocator.
069e2b351   Christoph Lameter   slob: Rework #ifd...
225
226
227
   * No kmalloc array is necessary since objects of different sizes can
   * be allocated from the same page.
   */
069e2b351   Christoph Lameter   slob: Rework #ifd...
228
  #define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
bb1107f7c   Michal Hocko   mm, slab: make su...
229
  #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
069e2b351   Christoph Lameter   slob: Rework #ifd...
230
231
232
233
  #ifndef KMALLOC_SHIFT_LOW
  #define KMALLOC_SHIFT_LOW	3
  #endif
  #endif
95a05b428   Christoph Lameter   slab: Common cons...
234
235
236
237
238
239
  /* Maximum allocatable size */
  #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
  /* Maximum size for which we actually use a slab cache */
  #define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
  /* Maximum order allocatable via the slab allocagtor */
  #define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)
0aa817f07   Christoph Lameter   Slab allocators: ...
240

90810645f   Christoph Lameter   slab allocators: ...
241
  /*
ce6a50263   Christoph Lameter   slab: Common kmal...
242
243
   * Kmalloc subsystem.
   */
c601fd695   Christoph Lameter   slab: Handle ARCH...
244
  #ifndef KMALLOC_MIN_SIZE
95a05b428   Christoph Lameter   slab: Common cons...
245
  #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
ce6a50263   Christoph Lameter   slab: Common kmal...
246
  #endif
24f870d8f   Joonsoo Kim   slab: fix wrongly...
247
248
249
250
251
252
253
254
255
256
  /*
   * This restriction comes from byte sized index implementation.
   * Page size is normally 2^12 bytes and, in this case, if we want to use
   * byte sized index which can represent 2^8 entries, the size of the object
   * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
   * If minimum size of kmalloc is less than 16, we use it as minimum object
   * size and give up to use byte sized index.
   */
  #define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
                                 (KMALLOC_MIN_SIZE) : 16)
069e2b351   Christoph Lameter   slob: Rework #ifd...
257
  #ifndef CONFIG_SLOB
9425c58e5   Christoph Lameter   slab: Common defi...
258
259
260
261
  extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
  #ifdef CONFIG_ZONE_DMA
  extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
  #endif
ce6a50263   Christoph Lameter   slab: Common kmal...
262
263
264
265
266
  /*
   * Figure out which kmalloc slab an allocation of a certain size
   * belongs to.
   * 0 = zero alloc
   * 1 =  65 .. 96 bytes
1ed58b605   Rasmus Villemoes   linux/slab.h: fix...
267
268
   * 2 = 129 .. 192 bytes
   * n = 2^(n-1)+1 .. 2^n
ce6a50263   Christoph Lameter   slab: Common kmal...
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
   */
  static __always_inline int kmalloc_index(size_t size)
  {
  	if (!size)
  		return 0;
  
  	if (size <= KMALLOC_MIN_SIZE)
  		return KMALLOC_SHIFT_LOW;
  
  	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
  		return 1;
  	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
  		return 2;
  	if (size <=          8) return 3;
  	if (size <=         16) return 4;
  	if (size <=         32) return 5;
  	if (size <=         64) return 6;
  	if (size <=        128) return 7;
  	if (size <=        256) return 8;
  	if (size <=        512) return 9;
  	if (size <=       1024) return 10;
  	if (size <=   2 * 1024) return 11;
  	if (size <=   4 * 1024) return 12;
  	if (size <=   8 * 1024) return 13;
  	if (size <=  16 * 1024) return 14;
  	if (size <=  32 * 1024) return 15;
  	if (size <=  64 * 1024) return 16;
  	if (size <= 128 * 1024) return 17;
  	if (size <= 256 * 1024) return 18;
  	if (size <= 512 * 1024) return 19;
  	if (size <= 1024 * 1024) return 20;
  	if (size <=  2 * 1024 * 1024) return 21;
  	if (size <=  4 * 1024 * 1024) return 22;
  	if (size <=  8 * 1024 * 1024) return 23;
  	if (size <=  16 * 1024 * 1024) return 24;
  	if (size <=  32 * 1024 * 1024) return 25;
  	if (size <=  64 * 1024 * 1024) return 26;
  	BUG();
  
  	/* Will never be reached. Needed because the compiler may complain */
  	return -1;
  }
069e2b351   Christoph Lameter   slob: Rework #ifd...
311
  #endif /* !CONFIG_SLOB */
ce6a50263   Christoph Lameter   slab: Common kmal...
312

48a270554   Rasmus Villemoes   include/linux: ap...
313
314
  void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
  void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
2a4db7eb9   Vladimir Davydov   memcg: free memcg...
315
  void kmem_cache_free(struct kmem_cache *, void *);
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
316

484748f0b   Christoph Lameter   slab: infrastruct...
317
  /*
9f706d682   Jesper Dangaard Brouer   mm: fix some spel...
318
   * Bulk allocation and freeing operations. These are accelerated in an
484748f0b   Christoph Lameter   slab: infrastruct...
319
320
321
322
323
324
   * allocator specific way to avoid taking locks repeatedly or building
   * metadata structures unnecessarily.
   *
   * Note that interrupts must be enabled when calling these functions.
   */
  void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
865762a81   Jesper Dangaard Brouer   slab/slub: adjust...
325
  int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
484748f0b   Christoph Lameter   slab: infrastruct...
326

ca2571955   Jesper Dangaard Brouer   mm: new API kfree...
327
328
329
330
331
332
333
334
  /*
   * Caller must not use kfree_bulk() on memory not originally allocated
   * by kmalloc(), because the SLOB allocator cannot handle this.
   */
  static __always_inline void kfree_bulk(size_t size, void **p)
  {
  	kmem_cache_free_bulk(NULL, size, p);
  }
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
335
  #ifdef CONFIG_NUMA
48a270554   Rasmus Villemoes   include/linux: ap...
336
337
  void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
  void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
338
339
340
341
342
343
344
345
346
347
348
349
350
  #else
  static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
  {
  	return __kmalloc(size, flags);
  }
  
  static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
  {
  	return kmem_cache_alloc(s, flags);
  }
  #endif
  
  #ifdef CONFIG_TRACING
48a270554   Rasmus Villemoes   include/linux: ap...
351
  extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
352
353
354
355
  
  #ifdef CONFIG_NUMA
  extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
  					   gfp_t gfpflags,
48a270554   Rasmus Villemoes   include/linux: ap...
356
  					   int node, size_t size) __assume_slab_alignment __malloc;
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
357
358
359
360
361
362
363
364
365
366
367
368
369
370
  #else
  static __always_inline void *
  kmem_cache_alloc_node_trace(struct kmem_cache *s,
  			      gfp_t gfpflags,
  			      int node, size_t size)
  {
  	return kmem_cache_alloc_trace(s, gfpflags, size);
  }
  #endif /* CONFIG_NUMA */
  
  #else /* CONFIG_TRACING */
  static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
  		gfp_t flags, size_t size)
  {
0316bec22   Andrey Ryabinin   mm: slub: add ker...
371
  	void *ret = kmem_cache_alloc(s, flags);
505f5dcb1   Alexander Potapenko   mm, kasan: add GF...
372
  	kasan_kmalloc(s, ret, size, flags);
0316bec22   Andrey Ryabinin   mm: slub: add ker...
373
  	return ret;
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
374
375
376
377
378
379
380
  }
  
  static __always_inline void *
  kmem_cache_alloc_node_trace(struct kmem_cache *s,
  			      gfp_t gfpflags,
  			      int node, size_t size)
  {
0316bec22   Andrey Ryabinin   mm: slub: add ker...
381
  	void *ret = kmem_cache_alloc_node(s, gfpflags, node);
505f5dcb1   Alexander Potapenko   mm, kasan: add GF...
382
  	kasan_kmalloc(s, ret, size, gfpflags);
0316bec22   Andrey Ryabinin   mm: slub: add ker...
383
  	return ret;
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
384
385
  }
  #endif /* CONFIG_TRACING */
48a270554   Rasmus Villemoes   include/linux: ap...
386
  extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
387
388
  
  #ifdef CONFIG_TRACING
48a270554   Rasmus Villemoes   include/linux: ap...
389
  extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
390
391
392
393
394
395
  #else
  static __always_inline void *
  kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
  {
  	return kmalloc_order(size, flags, order);
  }
ce6a50263   Christoph Lameter   slab: Common kmal...
396
  #endif
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
397
398
399
400
401
402
403
404
405
  static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
  {
  	unsigned int order = get_order(size);
  	return kmalloc_order_trace(size, flags, order);
  }
  
  /**
   * kmalloc - allocate memory
   * @size: how many bytes of memory are required.
7e3528c36   Randy Dunlap   slab.h: remove du...
406
   * @flags: the type of memory to allocate.
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
407
408
409
   *
   * kmalloc is the normal method of allocating memory
   * for objects smaller than page size in the kernel.
7e3528c36   Randy Dunlap   slab.h: remove du...
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
   *
   * The @flags argument may be one of:
   *
   * %GFP_USER - Allocate memory on behalf of user.  May sleep.
   *
   * %GFP_KERNEL - Allocate normal kernel ram.  May sleep.
   *
   * %GFP_ATOMIC - Allocation will not sleep.  May use emergency pools.
   *   For example, use this inside interrupt handlers.
   *
   * %GFP_HIGHUSER - Allocate pages from high memory.
   *
   * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
   *
   * %GFP_NOFS - Do not make any fs calls while trying to get memory.
   *
   * %GFP_NOWAIT - Allocation will not sleep.
   *
e97ca8e5b   Johannes Weiner   mm: fix GFP_THISN...
428
   * %__GFP_THISNODE - Allocate node-local memory only.
7e3528c36   Randy Dunlap   slab.h: remove du...
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
   *
   * %GFP_DMA - Allocation suitable for DMA.
   *   Should only be used for kmalloc() caches. Otherwise, use a
   *   slab created with SLAB_DMA.
   *
   * Also it is possible to set different flags by OR'ing
   * in one or more of the following additional @flags:
   *
   * %__GFP_COLD - Request cache-cold pages instead of
   *   trying to return cache-warm pages.
   *
   * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
   *
   * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
   *   (think twice before using).
   *
   * %__GFP_NORETRY - If memory is not immediately available,
   *   then give up at once.
   *
   * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
   *
dcda9b047   Michal Hocko   mm, tree wide: re...
450
451
   * %__GFP_RETRY_MAYFAIL - Try really hard to succeed the allocation but fail
   *   eventually.
7e3528c36   Randy Dunlap   slab.h: remove du...
452
453
454
455
   *
   * There are other flags available as well, but these are not intended
   * for general use, and so are not documented here. For a full list of
   * potential flags, always refer to linux/gfp.h.
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
   */
  static __always_inline void *kmalloc(size_t size, gfp_t flags)
  {
  	if (__builtin_constant_p(size)) {
  		if (size > KMALLOC_MAX_CACHE_SIZE)
  			return kmalloc_large(size, flags);
  #ifndef CONFIG_SLOB
  		if (!(flags & GFP_DMA)) {
  			int index = kmalloc_index(size);
  
  			if (!index)
  				return ZERO_SIZE_PTR;
  
  			return kmem_cache_alloc_trace(kmalloc_caches[index],
  					flags, size);
  		}
  #endif
  	}
  	return __kmalloc(size, flags);
  }
ce6a50263   Christoph Lameter   slab: Common kmal...
476
477
478
479
480
481
482
  /*
   * Determine size used for the nth kmalloc cache.
   * return size or 0 if a kmalloc cache for that
   * size does not exist
   */
  static __always_inline int kmalloc_size(int n)
  {
069e2b351   Christoph Lameter   slob: Rework #ifd...
483
  #ifndef CONFIG_SLOB
ce6a50263   Christoph Lameter   slab: Common kmal...
484
485
486
487
488
489
490
491
  	if (n > 2)
  		return 1 << n;
  
  	if (n == 1 && KMALLOC_MIN_SIZE <= 32)
  		return 96;
  
  	if (n == 2 && KMALLOC_MIN_SIZE <= 64)
  		return 192;
069e2b351   Christoph Lameter   slob: Rework #ifd...
492
  #endif
ce6a50263   Christoph Lameter   slab: Common kmal...
493
494
  	return 0;
  }
ce6a50263   Christoph Lameter   slab: Common kmal...
495

f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
496
497
498
499
  static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  {
  #ifndef CONFIG_SLOB
  	if (__builtin_constant_p(size) &&
23774a2f6   Christoph Lameter   slab: Use correct...
500
  		size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
501
502
503
504
505
506
507
508
509
510
511
  		int i = kmalloc_index(size);
  
  		if (!i)
  			return ZERO_SIZE_PTR;
  
  		return kmem_cache_alloc_node_trace(kmalloc_caches[i],
  						flags, node, size);
  	}
  #endif
  	return __kmalloc_node(size, flags, node);
  }
f7ce3190c   Vladimir Davydov   slab: embed memcg...
512
513
514
515
  struct memcg_cache_array {
  	struct rcu_head rcu;
  	struct kmem_cache *entries[0];
  };
0aa817f07   Christoph Lameter   Slab allocators: ...
516
  /*
ba6c496ed   Glauber Costa   slab/slub: struct...
517
   * This is the main placeholder for memcg-related information in kmem caches.
ba6c496ed   Glauber Costa   slab/slub: struct...
518
519
   * Both the root cache and the child caches will have it. For the root cache,
   * this will hold a dynamically allocated array large enough to hold
f8570263e   Vladimir Davydov   memcg, slab: RCU ...
520
521
522
   * information about the currently limited memcgs in the system. To allow the
   * array to be accessed without taking any locks, on relocation we free the old
   * version only after a grace period.
ba6c496ed   Glauber Costa   slab/slub: struct...
523
   *
9eeadc8b6   Tejun Heo   slab: reorganize ...
524
   * Root and child caches hold different metadata.
ba6c496ed   Glauber Costa   slab/slub: struct...
525
   *
9eeadc8b6   Tejun Heo   slab: reorganize ...
526
527
   * @root_cache:	Common to root and child caches.  NULL for root, pointer to
   *		the root cache for children.
426589f57   Vladimir Davydov   slab: link memcg ...
528
   *
9eeadc8b6   Tejun Heo   slab: reorganize ...
529
530
531
532
533
534
   * The following fields are specific to root caches.
   *
   * @memcg_caches: kmemcg ID indexed table of child caches.  This table is
   *		used to index child cachces during allocation and cleared
   *		early during shutdown.
   *
510ded33e   Tejun Heo   slab: implement s...
535
536
   * @root_caches_node: List node for slab_root_caches list.
   *
9eeadc8b6   Tejun Heo   slab: reorganize ...
537
538
539
540
541
542
543
544
545
   * @children:	List of all child caches.  While the child caches are also
   *		reachable through @memcg_caches, a child cache remains on
   *		this list until it is actually destroyed.
   *
   * The following fields are specific to child caches.
   *
   * @memcg:	Pointer to the memcg this cache belongs to.
   *
   * @children_node: List node for @root_cache->children list.
bc2791f85   Tejun Heo   slab: link memcg ...
546
547
   *
   * @kmem_caches_node: List node for @memcg->kmem_caches list.
ba6c496ed   Glauber Costa   slab/slub: struct...
548
549
   */
  struct memcg_cache_params {
9eeadc8b6   Tejun Heo   slab: reorganize ...
550
  	struct kmem_cache *root_cache;
ba6c496ed   Glauber Costa   slab/slub: struct...
551
  	union {
9eeadc8b6   Tejun Heo   slab: reorganize ...
552
553
  		struct {
  			struct memcg_cache_array __rcu *memcg_caches;
510ded33e   Tejun Heo   slab: implement s...
554
  			struct list_head __root_caches_node;
9eeadc8b6   Tejun Heo   slab: reorganize ...
555
556
  			struct list_head children;
  		};
2633d7a02   Glauber Costa   slab/slub: consid...
557
558
  		struct {
  			struct mem_cgroup *memcg;
9eeadc8b6   Tejun Heo   slab: reorganize ...
559
  			struct list_head children_node;
bc2791f85   Tejun Heo   slab: link memcg ...
560
  			struct list_head kmem_caches_node;
01fb58bcb   Tejun Heo   slab: remove sync...
561
562
563
564
565
566
  
  			void (*deact_fn)(struct kmem_cache *);
  			union {
  				struct rcu_head deact_rcu_head;
  				struct work_struct deact_work;
  			};
2633d7a02   Glauber Costa   slab/slub: consid...
567
  		};
ba6c496ed   Glauber Costa   slab/slub: struct...
568
569
  	};
  };
2633d7a02   Glauber Costa   slab/slub: consid...
570
  int memcg_update_all_caches(int num_memcgs);
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
571
  /**
e7efa615c   Michael Opdenacker   slab: add kmalloc...
572
573
574
575
   * kmalloc_array - allocate memory for an array.
   * @n: number of elements.
   * @size: element size.
   * @flags: the type of memory to allocate (see kmalloc).
800590f52   Paul Drynoff   [PATCH] slab: kma...
576
   */
a8203725d   Xi Wang   slab: introduce k...
577
  static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
578
  {
a3860c1c5   Xi Wang   introduce SIZE_MAX
579
  	if (size != 0 && n > SIZE_MAX / size)
6193a2ff1   Paul Mundt   slob: initial NUM...
580
  		return NULL;
91c6a05f7   Alexey Dobriyan   mm: faster kmallo...
581
582
  	if (__builtin_constant_p(n) && __builtin_constant_p(size))
  		return kmalloc(n * size, flags);
a8203725d   Xi Wang   slab: introduce k...
583
584
585
586
587
588
589
590
591
592
593
594
  	return __kmalloc(n * size, flags);
  }
  
  /**
   * kcalloc - allocate memory for an array. The memory is set to zero.
   * @n: number of elements.
   * @size: element size.
   * @flags: the type of memory to allocate (see kmalloc).
   */
  static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
  {
  	return kmalloc_array(n, size, flags | __GFP_ZERO);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
595
  }
1d2c8eea6   Christoph Hellwig   [PATCH] slab: cle...
596
597
598
599
600
601
602
603
  /*
   * kmalloc_track_caller is a special version of kmalloc that records the
   * calling function of the routine calling it for slab leak tracking instead
   * of just the calling function (confusing, eh?).
   * It's useful when the call to kmalloc comes from a widely-used standard
   * allocator where we care about the real place the memory allocation
   * request comes from.
   */
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
604
  extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
1d2c8eea6   Christoph Hellwig   [PATCH] slab: cle...
605
  #define kmalloc_track_caller(size, flags) \
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
606
  	__kmalloc_track_caller(size, flags, _RET_IP_)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
607

97e2bde47   Manfred Spraul   [PATCH] add kmall...
608
  #ifdef CONFIG_NUMA
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
609
  extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
8b98c1699   Christoph Hellwig   [PATCH] leak trac...
610
611
  #define kmalloc_node_track_caller(size, flags, node) \
  	__kmalloc_node_track_caller(size, flags, node, \
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
612
  			_RET_IP_)
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
613

8b98c1699   Christoph Hellwig   [PATCH] leak trac...
614
  #else /* CONFIG_NUMA */
8b98c1699   Christoph Hellwig   [PATCH] leak trac...
615
616
617
  
  #define kmalloc_node_track_caller(size, flags, node) \
  	kmalloc_track_caller(size, flags)
97e2bde47   Manfred Spraul   [PATCH] add kmall...
618

dfcd36102   Pascal Terjan   slab: Fix comment...
619
  #endif /* CONFIG_NUMA */
10cef6029   Matt Mackall   [PATCH] slob: int...
620

81cda6626   Christoph Lameter   Slab allocators: ...
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
  /*
   * Shortcuts
   */
  static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
  {
  	return kmem_cache_alloc(k, flags | __GFP_ZERO);
  }
  
  /**
   * kzalloc - allocate memory. The memory is set to zero.
   * @size: how many bytes of memory are required.
   * @flags: the type of memory to allocate (see kmalloc).
   */
  static inline void *kzalloc(size_t size, gfp_t flags)
  {
  	return kmalloc(size, flags | __GFP_ZERO);
  }
979b0fea2   Jeff Layton   vm: add kzalloc_n...
638
639
640
641
642
643
644
645
646
647
  /**
   * kzalloc_node - allocate zeroed memory from a particular memory node.
   * @size: how many bytes of memory are required.
   * @flags: the type of memory to allocate (see kmalloc).
   * @node: memory node from which to allocate
   */
  static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
  {
  	return kmalloc_node(size, flags | __GFP_ZERO, node);
  }
07f361b2b   Joonsoo Kim   mm/slab_common: m...
648
  unsigned int kmem_cache_size(struct kmem_cache *s);
7e85ee0c1   Pekka Enberg   slab,slub: don't ...
649
  void __init kmem_cache_init_late(void);
6731d4f12   Sebastian Andrzej Siewior   slab: Convert to ...
650
651
652
653
654
655
656
  #if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
  int slab_prepare_cpu(unsigned int cpu);
  int slab_dead_cpu(unsigned int cpu);
  #else
  #define slab_prepare_cpu	NULL
  #define slab_dead_cpu		NULL
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
657
  #endif	/* _LINUX_SLAB_H */