Blame view

mm/slab.h 13.3 KB
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
1
2
3
4
5
  #ifndef MM_SLAB_H
  #define MM_SLAB_H
  /*
   * Internal slab definitions
   */
07f361b2b   Joonsoo Kim   mm/slab_common: m...
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
  #ifdef CONFIG_SLOB
  /*
   * Common fields provided in kmem_cache by all slab allocators
   * This struct is either used directly by the allocator (SLOB)
   * or the allocator must include definitions for all fields
   * provided in kmem_cache_common in their definition of kmem_cache.
   *
   * Once we can do anonymous structs (C11 standard) we could put a
   * anonymous struct definition in these allocators so that the
   * separate allocations in the kmem_cache structure of SLAB and
   * SLUB is no longer needed.
   */
  struct kmem_cache {
  	unsigned int object_size;/* The original size of the object */
  	unsigned int size;	/* The aligned/padded/added on size  */
  	unsigned int align;	/* Alignment as calculated */
  	unsigned long flags;	/* Active flags on the slab */
  	const char *name;	/* Slab name for sysfs */
  	int refcount;		/* Use counter */
  	void (*ctor)(void *);	/* Called on object slot creation */
  	struct list_head list;	/* List of all slab caches on the system */
  };
  
  #endif /* CONFIG_SLOB */
  
  #ifdef CONFIG_SLAB
  #include <linux/slab_def.h>
  #endif
  
  #ifdef CONFIG_SLUB
  #include <linux/slub_def.h>
  #endif
  
  #include <linux/memcontrol.h>
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
40
41
42
43
  #include <linux/fault-inject.h>
  #include <linux/kmemcheck.h>
  #include <linux/kasan.h>
  #include <linux/kmemleak.h>
7c00fce98   Thomas Garnier   mm: reorganize SL...
44
  #include <linux/random.h>
07f361b2b   Joonsoo Kim   mm/slab_common: m...
45

97d066091   Christoph Lameter   mm, sl[aou]b: Com...
46
47
48
49
50
51
52
53
54
55
56
  /*
   * State of the slab allocator.
   *
   * This is used to describe the states of the allocator during bootup.
   * Allocators use this to gradually bootstrap themselves. Most allocators
   * have the problem that the structures used for managing slab caches are
   * allocated from slab caches themselves.
   */
  enum slab_state {
  	DOWN,			/* No slab functionality yet */
  	PARTIAL,		/* SLUB: kmem_cache_node available */
ce8eb6c42   Christoph Lameter   slab: Rename list...
57
  	PARTIAL_NODE,		/* SLAB: kmalloc size for node struct available */
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
58
59
60
61
62
  	UP,			/* Slab caches usable but not all extras yet */
  	FULL			/* Everything is working */
  };
  
  extern enum slab_state slab_state;
18004c5d4   Christoph Lameter   mm, sl[aou]b: Use...
63
64
  /* The slab cache mutex protects the management structures during changes */
  extern struct mutex slab_mutex;
9b030cb86   Christoph Lameter   mm/sl[aou]b: Use ...
65
66
  
  /* The list of all slab caches on the system */
18004c5d4   Christoph Lameter   mm, sl[aou]b: Use...
67
  extern struct list_head slab_caches;
9b030cb86   Christoph Lameter   mm/sl[aou]b: Use ...
68
69
  /* The slab cache that manages slab cache information */
  extern struct kmem_cache *kmem_cache;
459068554   Christoph Lameter   mm/sl[aou]b: Comm...
70
71
  unsigned long calculate_alignment(unsigned long flags,
  		unsigned long align, unsigned long size);
f97d5f634   Christoph Lameter   slab: Common func...
72
73
  #ifndef CONFIG_SLOB
  /* Kmalloc array related functions */
34cc6990d   Daniel Sanders   slab: correct siz...
74
  void setup_kmalloc_cache_index_table(void);
f97d5f634   Christoph Lameter   slab: Common func...
75
  void create_kmalloc_caches(unsigned long);
2c59dd654   Christoph Lameter   slab: Common Kmal...
76
77
78
  
  /* Find the kmalloc slab corresponding for a certain size */
  struct kmem_cache *kmalloc_slab(size_t, gfp_t);
f97d5f634   Christoph Lameter   slab: Common func...
79
  #endif
9b030cb86   Christoph Lameter   mm/sl[aou]b: Use ...
80
  /* Functions provided by the slab allocators */
8a13a4cc8   Christoph Lameter   mm/sl[aou]b: Shri...
81
  extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
82

45530c447   Christoph Lameter   mm, sl[au]b: crea...
83
84
85
86
  extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
  			unsigned long flags);
  extern void create_boot_cache(struct kmem_cache *, const char *name,
  			size_t size, unsigned long flags);
423c929cb   Joonsoo Kim   mm/slab_common: c...
87
88
89
  int slab_unmergeable(struct kmem_cache *s);
  struct kmem_cache *find_mergeable(size_t size, size_t align,
  		unsigned long flags, const char *name, void (*ctor)(void *));
12220dea0   Joonsoo Kim   mm/slab: support ...
90
  #ifndef CONFIG_SLOB
2633d7a02   Glauber Costa   slab/slub: consid...
91
  struct kmem_cache *
a44cb9449   Vladimir Davydov   memcg, slab: neve...
92
93
  __kmem_cache_alias(const char *name, size_t size, size_t align,
  		   unsigned long flags, void (*ctor)(void *));
423c929cb   Joonsoo Kim   mm/slab_common: c...
94
95
96
97
  
  unsigned long kmem_cache_flags(unsigned long object_size,
  	unsigned long flags, const char *name,
  	void (*ctor)(void *));
cbb79694d   Christoph Lameter   mm/sl[aou]b: Do s...
98
  #else
2633d7a02   Glauber Costa   slab/slub: consid...
99
  static inline struct kmem_cache *
a44cb9449   Vladimir Davydov   memcg, slab: neve...
100
101
  __kmem_cache_alias(const char *name, size_t size, size_t align,
  		   unsigned long flags, void (*ctor)(void *))
cbb79694d   Christoph Lameter   mm/sl[aou]b: Do s...
102
  { return NULL; }
423c929cb   Joonsoo Kim   mm/slab_common: c...
103
104
105
106
107
108
109
  
  static inline unsigned long kmem_cache_flags(unsigned long object_size,
  	unsigned long flags, const char *name,
  	void (*ctor)(void *))
  {
  	return flags;
  }
cbb79694d   Christoph Lameter   mm/sl[aou]b: Do s...
110
  #endif
d8843922f   Glauber Costa   slab: Ignore inte...
111
112
113
114
115
116
117
118
  /* Legal flag mask for kmem_cache_create(), for various configurations */
  #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
  			 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
  
  #if defined(CONFIG_DEBUG_SLAB)
  #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
  #elif defined(CONFIG_SLUB_DEBUG)
  #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
becfda68a   Laura Abbott   slub: convert SLA...
119
  			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
d8843922f   Glauber Costa   slab: Ignore inte...
120
121
122
123
124
125
  #else
  #define SLAB_DEBUG_FLAGS (0)
  #endif
  
  #if defined(CONFIG_SLAB)
  #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
230e9fc28   Vladimir Davydov   slab: add SLAB_AC...
126
127
  			  SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
  			  SLAB_NOTRACK | SLAB_ACCOUNT)
d8843922f   Glauber Costa   slab: Ignore inte...
128
129
  #elif defined(CONFIG_SLUB)
  #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
230e9fc28   Vladimir Davydov   slab: add SLAB_AC...
130
  			  SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
d8843922f   Glauber Costa   slab: Ignore inte...
131
132
133
134
135
  #else
  #define SLAB_CACHE_FLAGS (0)
  #endif
  
  #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
136
  int __kmem_cache_shutdown(struct kmem_cache *);
52b4b950b   Dmitry Safonov   mm: slab: free km...
137
  void __kmem_cache_release(struct kmem_cache *);
d6e0b7fa1   Vladimir Davydov   slub: make dead c...
138
  int __kmem_cache_shrink(struct kmem_cache *, bool);
41a212859   Christoph Lameter   slub: use sysfs'e...
139
  void slab_kmem_cache_release(struct kmem_cache *);
945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
140

b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
141
142
  struct seq_file;
  struct file;
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
143

0d7561c61   Glauber Costa   sl[au]b: Process ...
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
  struct slabinfo {
  	unsigned long active_objs;
  	unsigned long num_objs;
  	unsigned long active_slabs;
  	unsigned long num_slabs;
  	unsigned long shared_avail;
  	unsigned int limit;
  	unsigned int batchcount;
  	unsigned int shared;
  	unsigned int objects_per_slab;
  	unsigned int cache_order;
  };
  
  void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
  void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
159
160
  ssize_t slabinfo_write(struct file *file, const char __user *buffer,
  		       size_t count, loff_t *ppos);
ba6c496ed   Glauber Costa   slab/slub: struct...
161

484748f0b   Christoph Lameter   slab: infrastruct...
162
163
164
  /*
   * Generic implementation of bulk operations
   * These are useful for situations in which the allocator cannot
9f706d682   Jesper Dangaard Brouer   mm: fix some spel...
165
   * perform optimizations. In that case segments of the object listed
484748f0b   Christoph Lameter   slab: infrastruct...
166
167
168
   * may be allocated or freed using these operations.
   */
  void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
865762a81   Jesper Dangaard Brouer   slab/slub: adjust...
169
  int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
484748f0b   Christoph Lameter   slab: infrastruct...
170

127424c86   Johannes Weiner   mm: memcontrol: m...
171
  #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
426589f57   Vladimir Davydov   slab: link memcg ...
172
173
174
175
176
177
178
  /*
   * Iterate over all memcg caches of the given root cache. The caller must hold
   * slab_mutex.
   */
  #define for_each_memcg_cache(iter, root) \
  	list_for_each_entry(iter, &(root)->memcg_params.list, \
  			    memcg_params.list)
ba6c496ed   Glauber Costa   slab/slub: struct...
179
180
  static inline bool is_root_cache(struct kmem_cache *s)
  {
f7ce3190c   Vladimir Davydov   slab: embed memcg...
181
  	return s->memcg_params.is_root_cache;
ba6c496ed   Glauber Costa   slab/slub: struct...
182
  }
2633d7a02   Glauber Costa   slab/slub: consid...
183

b9ce5ef49   Glauber Costa   sl[au]b: always g...
184
  static inline bool slab_equal_or_root(struct kmem_cache *s,
f7ce3190c   Vladimir Davydov   slab: embed memcg...
185
  				      struct kmem_cache *p)
b9ce5ef49   Glauber Costa   sl[au]b: always g...
186
  {
f7ce3190c   Vladimir Davydov   slab: embed memcg...
187
  	return p == s || p == s->memcg_params.root_cache;
b9ce5ef49   Glauber Costa   sl[au]b: always g...
188
  }
749c54151   Glauber Costa   memcg: aggregate ...
189
190
191
192
193
194
195
196
197
  
  /*
   * We use suffixes to the name in memcg because we can't have caches
   * created in the system with the same name. But when we print them
   * locally, better refer to them with the base name
   */
  static inline const char *cache_name(struct kmem_cache *s)
  {
  	if (!is_root_cache(s))
f7ce3190c   Vladimir Davydov   slab: embed memcg...
198
  		s = s->memcg_params.root_cache;
749c54151   Glauber Costa   memcg: aggregate ...
199
200
  	return s->name;
  }
f8570263e   Vladimir Davydov   memcg, slab: RCU ...
201
202
  /*
   * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
f7ce3190c   Vladimir Davydov   slab: embed memcg...
203
204
   * That said the caller must assure the memcg's cache won't go away by either
   * taking a css reference to the owner cgroup, or holding the slab_mutex.
f8570263e   Vladimir Davydov   memcg, slab: RCU ...
205
   */
2ade4de87   Qiang Huang   memcg, kmem: rena...
206
207
  static inline struct kmem_cache *
  cache_from_memcg_idx(struct kmem_cache *s, int idx)
749c54151   Glauber Costa   memcg: aggregate ...
208
  {
959c8963f   Vladimir Davydov   memcg, slab: fix ...
209
  	struct kmem_cache *cachep;
f7ce3190c   Vladimir Davydov   slab: embed memcg...
210
  	struct memcg_cache_array *arr;
f8570263e   Vladimir Davydov   memcg, slab: RCU ...
211
212
  
  	rcu_read_lock();
f7ce3190c   Vladimir Davydov   slab: embed memcg...
213
  	arr = rcu_dereference(s->memcg_params.memcg_caches);
959c8963f   Vladimir Davydov   memcg, slab: fix ...
214
215
216
217
  
  	/*
  	 * Make sure we will access the up-to-date value. The code updating
  	 * memcg_caches issues a write barrier to match this (see
f7ce3190c   Vladimir Davydov   slab: embed memcg...
218
  	 * memcg_create_kmem_cache()).
959c8963f   Vladimir Davydov   memcg, slab: fix ...
219
  	 */
f7ce3190c   Vladimir Davydov   slab: embed memcg...
220
  	cachep = lockless_dereference(arr->entries[idx]);
8df0c2dcf   Pranith Kumar   slab: replace smp...
221
  	rcu_read_unlock();
959c8963f   Vladimir Davydov   memcg, slab: fix ...
222
  	return cachep;
749c54151   Glauber Costa   memcg: aggregate ...
223
  }
943a451a8   Glauber Costa   slab: propagate t...
224
225
226
227
228
  
  static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
  {
  	if (is_root_cache(s))
  		return s;
f7ce3190c   Vladimir Davydov   slab: embed memcg...
229
  	return s->memcg_params.root_cache;
943a451a8   Glauber Costa   slab: propagate t...
230
  }
5dfb41750   Vladimir Davydov   sl[au]b: charge s...
231

f3ccb2c42   Vladimir Davydov   memcg: unify slab...
232
233
234
  static __always_inline int memcg_charge_slab(struct page *page,
  					     gfp_t gfp, int order,
  					     struct kmem_cache *s)
5dfb41750   Vladimir Davydov   sl[au]b: charge s...
235
  {
27ee57c93   Vladimir Davydov   mm: memcontrol: r...
236
  	int ret;
5dfb41750   Vladimir Davydov   sl[au]b: charge s...
237
238
239
240
  	if (!memcg_kmem_enabled())
  		return 0;
  	if (is_root_cache(s))
  		return 0;
27ee57c93   Vladimir Davydov   mm: memcontrol: r...
241

452647784   Vladimir Davydov   mm: memcontrol: c...
242
  	ret = memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
27ee57c93   Vladimir Davydov   mm: memcontrol: r...
243
244
245
246
247
248
249
250
251
252
253
254
255
  	if (ret)
  		return ret;
  
  	memcg_kmem_update_page_stat(page,
  			(s->flags & SLAB_RECLAIM_ACCOUNT) ?
  			MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
  			1 << order);
  	return 0;
  }
  
  static __always_inline void memcg_uncharge_slab(struct page *page, int order,
  						struct kmem_cache *s)
  {
452647784   Vladimir Davydov   mm: memcontrol: c...
256
257
  	if (!memcg_kmem_enabled())
  		return;
27ee57c93   Vladimir Davydov   mm: memcontrol: r...
258
259
260
261
262
  	memcg_kmem_update_page_stat(page,
  			(s->flags & SLAB_RECLAIM_ACCOUNT) ?
  			MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
  			-(1 << order));
  	memcg_kmem_uncharge(page, order);
5dfb41750   Vladimir Davydov   sl[au]b: charge s...
263
  }
f7ce3190c   Vladimir Davydov   slab: embed memcg...
264
265
  
  extern void slab_init_memcg_params(struct kmem_cache *);
127424c86   Johannes Weiner   mm: memcontrol: m...
266
  #else /* CONFIG_MEMCG && !CONFIG_SLOB */
f7ce3190c   Vladimir Davydov   slab: embed memcg...
267

426589f57   Vladimir Davydov   slab: link memcg ...
268
269
  #define for_each_memcg_cache(iter, root) \
  	for ((void)(iter), (void)(root); 0; )
426589f57   Vladimir Davydov   slab: link memcg ...
270

ba6c496ed   Glauber Costa   slab/slub: struct...
271
272
273
274
  static inline bool is_root_cache(struct kmem_cache *s)
  {
  	return true;
  }
b9ce5ef49   Glauber Costa   sl[au]b: always g...
275
276
277
278
279
  static inline bool slab_equal_or_root(struct kmem_cache *s,
  				      struct kmem_cache *p)
  {
  	return true;
  }
749c54151   Glauber Costa   memcg: aggregate ...
280
281
282
283
284
  
  static inline const char *cache_name(struct kmem_cache *s)
  {
  	return s->name;
  }
2ade4de87   Qiang Huang   memcg, kmem: rena...
285
286
  static inline struct kmem_cache *
  cache_from_memcg_idx(struct kmem_cache *s, int idx)
749c54151   Glauber Costa   memcg: aggregate ...
287
288
289
  {
  	return NULL;
  }
943a451a8   Glauber Costa   slab: propagate t...
290
291
292
293
294
  
  static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
  {
  	return s;
  }
5dfb41750   Vladimir Davydov   sl[au]b: charge s...
295

f3ccb2c42   Vladimir Davydov   memcg: unify slab...
296
297
  static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
  				    struct kmem_cache *s)
5dfb41750   Vladimir Davydov   sl[au]b: charge s...
298
299
300
  {
  	return 0;
  }
27ee57c93   Vladimir Davydov   mm: memcontrol: r...
301
302
303
304
  static inline void memcg_uncharge_slab(struct page *page, int order,
  				       struct kmem_cache *s)
  {
  }
f7ce3190c   Vladimir Davydov   slab: embed memcg...
305
306
307
  static inline void slab_init_memcg_params(struct kmem_cache *s)
  {
  }
127424c86   Johannes Weiner   mm: memcontrol: m...
308
  #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
b9ce5ef49   Glauber Costa   sl[au]b: always g...
309
310
311
312
313
314
315
316
317
318
319
320
321
  
  static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
  {
  	struct kmem_cache *cachep;
  	struct page *page;
  
  	/*
  	 * When kmemcg is not being used, both assignments should return the
  	 * same value. but we don't want to pay the assignment price in that
  	 * case. If it is not compiled in, the compiler should be smart enough
  	 * to not do even the assignment. In that case, slab_equal_or_root
  	 * will also be a constant.
  	 */
becfda68a   Laura Abbott   slub: convert SLA...
322
323
  	if (!memcg_kmem_enabled() &&
  	    !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
b9ce5ef49   Glauber Costa   sl[au]b: always g...
324
325
326
327
328
329
330
331
332
  		return s;
  
  	page = virt_to_head_page(x);
  	cachep = page->slab_cache;
  	if (slab_equal_or_root(cachep, s))
  		return cachep;
  
  	pr_err("%s: Wrong slab cache. %s but object is from %s
  ",
2d16e0fd3   Daniel Borkmann   mm/slab.h: fix ar...
333
  	       __func__, s->name, cachep->name);
b9ce5ef49   Glauber Costa   sl[au]b: always g...
334
335
336
  	WARN_ON_ONCE(1);
  	return s;
  }
ca34956b8   Christoph Lameter   slab: Common defi...
337

11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
338
339
340
341
342
343
344
345
346
347
348
349
350
351
  static inline size_t slab_ksize(const struct kmem_cache *s)
  {
  #ifndef CONFIG_SLUB
  	return s->object_size;
  
  #else /* CONFIG_SLUB */
  # ifdef CONFIG_SLUB_DEBUG
  	/*
  	 * Debugging requires use of the padding between object
  	 * and whatever may come after it.
  	 */
  	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
  		return s->object_size;
  # endif
80a9201a5   Alexander Potapenko   mm, kasan: switch...
352
353
  	if (s->flags & SLAB_KASAN)
  		return s->object_size;
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
  	/*
  	 * If we have the need to store the freelist pointer
  	 * back there or track user information then we can
  	 * only use the space before that information.
  	 */
  	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
  		return s->inuse;
  	/*
  	 * Else we can use all the padding etc for the allocation
  	 */
  	return s->size;
  #endif
  }
  
  static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
  						     gfp_t flags)
  {
  	flags &= gfp_allowed_mask;
  	lockdep_trace_alloc(flags);
  	might_sleep_if(gfpflags_allow_blocking(flags));
fab9963a6   Jesper Dangaard Brouer   mm: fault-inject ...
374
  	if (should_failslab(s, flags))
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
375
  		return NULL;
452647784   Vladimir Davydov   mm: memcontrol: c...
376
377
378
379
380
  	if (memcg_kmem_enabled() &&
  	    ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
  		return memcg_kmem_get_cache(s);
  
  	return s;
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
381
382
383
384
385
386
387
388
389
390
391
392
393
394
  }
  
  static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
  					size_t size, void **p)
  {
  	size_t i;
  
  	flags &= gfp_allowed_mask;
  	for (i = 0; i < size; i++) {
  		void *object = p[i];
  
  		kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
  		kmemleak_alloc_recursive(object, s->object_size, 1,
  					 s->flags, flags);
505f5dcb1   Alexander Potapenko   mm, kasan: add GF...
395
  		kasan_slab_alloc(s, object, flags);
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
396
  	}
452647784   Vladimir Davydov   mm: memcontrol: c...
397
398
399
  
  	if (memcg_kmem_enabled())
  		memcg_kmem_put_cache(s);
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
400
  }
44c5356fb   Christoph Lameter   slab common: add ...
401
  #ifndef CONFIG_SLOB
ca34956b8   Christoph Lameter   slab: Common defi...
402
403
404
405
406
407
408
409
410
411
  /*
   * The slab lists for all objects.
   */
  struct kmem_cache_node {
  	spinlock_t list_lock;
  
  #ifdef CONFIG_SLAB
  	struct list_head slabs_partial;	/* partial list first, better asm code */
  	struct list_head slabs_full;
  	struct list_head slabs_free;
07a63c41f   Aruna Ramakrishna   mm/slab: improve ...
412
  	unsigned long num_slabs;
ca34956b8   Christoph Lameter   slab: Common defi...
413
414
415
416
  	unsigned long free_objects;
  	unsigned int free_limit;
  	unsigned int colour_next;	/* Per-node cache coloring */
  	struct array_cache *shared;	/* shared per node */
c8522a3a5   Joonsoo Kim   slab: introduce a...
417
  	struct alien_cache **alien;	/* on other nodes */
ca34956b8   Christoph Lameter   slab: Common defi...
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
  	unsigned long next_reap;	/* updated without locking */
  	int free_touched;		/* updated without locking */
  #endif
  
  #ifdef CONFIG_SLUB
  	unsigned long nr_partial;
  	struct list_head partial;
  #ifdef CONFIG_SLUB_DEBUG
  	atomic_long_t nr_slabs;
  	atomic_long_t total_objects;
  	struct list_head full;
  #endif
  #endif
  
  };
e25839f67   Wanpeng Li   mm/slab: Sharing ...
433

44c5356fb   Christoph Lameter   slab common: add ...
434
435
436
437
438
439
440
441
442
443
  static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
  {
  	return s->node[node];
  }
  
  /*
   * Iterator over all nodes. The body will be executed for each node that has
   * a kmem_cache_node structure allocated (which is true for all online nodes)
   */
  #define for_each_kmem_cache_node(__s, __node, __n) \
9163582c3   Mikulas Patocka   slab: fix for_eac...
444
445
  	for (__node = 0; __node < nr_node_ids; __node++) \
  		 if ((__n = get_node(__s, __node)))
44c5356fb   Christoph Lameter   slab common: add ...
446
447
  
  #endif
1df3b26f2   Vladimir Davydov   slab: print slabi...
448
  void *slab_start(struct seq_file *m, loff_t *pos);
276a2439c   Wanpeng Li   mm/slab: Give s_n...
449
450
  void *slab_next(struct seq_file *m, void *p, loff_t *pos);
  void slab_stop(struct seq_file *m, void *p);
b047501cd   Vladimir Davydov   memcg: use generi...
451
  int memcg_slab_show(struct seq_file *m, void *p);
5240ab407   Andrey Ryabinin   mm: slab.h: wrap ...
452

55834c590   Alexander Potapenko   mm: kasan: initia...
453
  void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
7c00fce98   Thomas Garnier   mm: reorganize SL...
454
455
456
457
458
459
460
461
462
463
464
465
  #ifdef CONFIG_SLAB_FREELIST_RANDOM
  int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
  			gfp_t gfp);
  void cache_random_seq_destroy(struct kmem_cache *cachep);
  #else
  static inline int cache_random_seq_create(struct kmem_cache *cachep,
  					unsigned int count, gfp_t gfp)
  {
  	return 0;
  }
  static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
  #endif /* CONFIG_SLAB_FREELIST_RANDOM */
5240ab407   Andrey Ryabinin   mm: slab.h: wrap ...
466
  #endif /* MM_SLAB_H */