Blame view

mm/slub.c 128 KB
81819f0fc   Christoph Lameter   SLUB core
1
2
3
4
  /*
   * SLUB: A slab allocator that limits cache line use instead of queuing
   * objects in per cpu and per node lists.
   *
881db7fb0   Christoph Lameter   slub: Invert lock...
5
6
   * The allocator synchronizes using per slab locks or atomic operatios
   * and only uses a centralized lock to manage a pool of partial slabs.
81819f0fc   Christoph Lameter   SLUB core
7
   *
cde535359   Christoph Lameter   Christoph has moved
8
   * (C) 2007 SGI, Christoph Lameter
881db7fb0   Christoph Lameter   slub: Invert lock...
9
   * (C) 2011 Linux Foundation, Christoph Lameter
81819f0fc   Christoph Lameter   SLUB core
10
11
12
   */
  
  #include <linux/mm.h>
1eb5ac646   Nick Piggin   mm: SLUB fix recl...
13
  #include <linux/swap.h> /* struct reclaim_state */
81819f0fc   Christoph Lameter   SLUB core
14
15
16
17
18
  #include <linux/module.h>
  #include <linux/bit_spinlock.h>
  #include <linux/interrupt.h>
  #include <linux/bitops.h>
  #include <linux/slab.h>
7b3c3a50a   Alexey Dobriyan   proc: move /proc/...
19
  #include <linux/proc_fs.h>
81819f0fc   Christoph Lameter   SLUB core
20
  #include <linux/seq_file.h>
5a896d9e7   Vegard Nossum   slub: add hooks f...
21
  #include <linux/kmemcheck.h>
81819f0fc   Christoph Lameter   SLUB core
22
23
24
25
  #include <linux/cpu.h>
  #include <linux/cpuset.h>
  #include <linux/mempolicy.h>
  #include <linux/ctype.h>
3ac7fe5a4   Thomas Gleixner   infrastructure to...
26
  #include <linux/debugobjects.h>
81819f0fc   Christoph Lameter   SLUB core
27
  #include <linux/kallsyms.h>
b9049e234   Yasunori Goto   memory hotplug: m...
28
  #include <linux/memory.h>
f8bd2258e   Roman Zippel   remove div_long_l...
29
  #include <linux/math64.h>
773ff60e8   Akinobu Mita   SLUB: failslab su...
30
  #include <linux/fault-inject.h>
bfa71457a   Pekka Enberg   SLUB: Fix missing...
31
  #include <linux/stacktrace.h>
81819f0fc   Christoph Lameter   SLUB core
32

4a92379bd   Richard Kennedy   slub tracing: mov...
33
  #include <trace/events/kmem.h>
81819f0fc   Christoph Lameter   SLUB core
34
35
  /*
   * Lock order:
881db7fb0   Christoph Lameter   slub: Invert lock...
36
37
38
   *   1. slub_lock (Global Semaphore)
   *   2. node->list_lock
   *   3. slab_lock(page) (Only on some arches and for debugging)
81819f0fc   Christoph Lameter   SLUB core
39
   *
881db7fb0   Christoph Lameter   slub: Invert lock...
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
   *   slub_lock
   *
   *   The role of the slub_lock is to protect the list of all the slabs
   *   and to synchronize major metadata changes to slab cache structures.
   *
   *   The slab_lock is only used for debugging and on arches that do not
   *   have the ability to do a cmpxchg_double. It only protects the second
   *   double word in the page struct. Meaning
   *	A. page->freelist	-> List of object free in a page
   *	B. page->counters	-> Counters of objects
   *	C. page->frozen		-> frozen state
   *
   *   If a slab is frozen then it is exempt from list management. It is not
   *   on any list. The processor that froze the slab is the one who can
   *   perform list operations on the page. Other processors may put objects
   *   onto the freelist but the processor that froze the slab is the only
   *   one that can retrieve the objects from the page's freelist.
81819f0fc   Christoph Lameter   SLUB core
57
58
59
60
61
62
63
64
65
66
67
68
   *
   *   The list_lock protects the partial and full list on each node and
   *   the partial slab counter. If taken then no new slabs may be added or
   *   removed from the lists nor make the number of partial slabs be modified.
   *   (Note that the total number of slabs is an atomic value that may be
   *   modified without taking the list lock).
   *
   *   The list_lock is a centralized lock and thus we avoid taking it as
   *   much as possible. As long as SLUB does not have to handle partial
   *   slabs, operations can continue without any centralized lock. F.e.
   *   allocating a long series of objects that fill up slabs does not require
   *   the list lock.
81819f0fc   Christoph Lameter   SLUB core
69
70
71
72
73
74
75
76
   *   Interrupts are disabled during allocation and deallocation in order to
   *   make the slab allocator safe to use in the context of an irq. In addition
   *   interrupts are disabled to ensure that the processor does not change
   *   while handling per_cpu slabs, due to kernel preemption.
   *
   * SLUB assigns one slab for allocation to each processor.
   * Allocations only occur from these slabs called cpu slabs.
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
77
78
   * Slabs with free elements are kept on a partial list and during regular
   * operations no list for full slabs is used. If an object in a full slab is
81819f0fc   Christoph Lameter   SLUB core
79
   * freed then the slab will show up again on the partial lists.
672bba3a4   Christoph Lameter   SLUB: update comm...
80
81
   * We track full slabs for debugging purposes though because otherwise we
   * cannot scan all objects.
81819f0fc   Christoph Lameter   SLUB core
82
83
84
85
86
87
88
   *
   * Slabs are freed when they become empty. Teardown and setup is
   * minimal so we rely on the page allocators per cpu caches for
   * fast frees and allocs.
   *
   * Overloading of page flags that are otherwise used for LRU management.
   *
4b6f07504   Christoph Lameter   SLUB: Define func...
89
90
91
92
93
94
95
96
97
98
99
100
   * PageActive 		The slab is frozen and exempt from list processing.
   * 			This means that the slab is dedicated to a purpose
   * 			such as satisfying allocations for a specific
   * 			processor. Objects may be freed in the slab while
   * 			it is frozen but slab_free will then skip the usual
   * 			list operations. It is up to the processor holding
   * 			the slab to integrate the slab into the slab lists
   * 			when the slab is no longer needed.
   *
   * 			One use of this flag is to mark slabs that are
   * 			used for allocations. Then such a slab becomes a cpu
   * 			slab. The cpu slab may be equipped with an additional
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
101
   * 			freelist that allows lockless access to
894b8788d   Christoph Lameter   slub: support con...
102
103
   * 			free objects in addition to the regular freelist
   * 			that requires the slab lock.
81819f0fc   Christoph Lameter   SLUB core
104
105
106
   *
   * PageError		Slab requires special handling due to debug
   * 			options set. This moves	slab handling out of
894b8788d   Christoph Lameter   slub: support con...
107
   * 			the fast path and disables lockless freelists.
81819f0fc   Christoph Lameter   SLUB core
108
   */
af537b0a6   Christoph Lameter   slub: Use kmem_ca...
109
110
111
112
113
  #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
  		SLAB_TRACE | SLAB_DEBUG_FREE)
  
  static inline int kmem_cache_debug(struct kmem_cache *s)
  {
5577bd8a8   Christoph Lameter   SLUB: Do our own ...
114
  #ifdef CONFIG_SLUB_DEBUG
af537b0a6   Christoph Lameter   slub: Use kmem_ca...
115
  	return unlikely(s->flags & SLAB_DEBUG_FLAGS);
5577bd8a8   Christoph Lameter   SLUB: Do our own ...
116
  #else
af537b0a6   Christoph Lameter   slub: Use kmem_ca...
117
  	return 0;
5577bd8a8   Christoph Lameter   SLUB: Do our own ...
118
  #endif
af537b0a6   Christoph Lameter   slub: Use kmem_ca...
119
  }
5577bd8a8   Christoph Lameter   SLUB: Do our own ...
120

81819f0fc   Christoph Lameter   SLUB core
121
122
123
  /*
   * Issues still to be resolved:
   *
81819f0fc   Christoph Lameter   SLUB core
124
125
   * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
   *
81819f0fc   Christoph Lameter   SLUB core
126
127
128
129
130
   * - Variable sizing of the per node arrays
   */
  
  /* Enable to test recovery from slab corruption on boot */
  #undef SLUB_RESILIENCY_TEST
b789ef518   Christoph Lameter   slub: Add cmpxchg...
131
132
  /* Enable to log cmpxchg failures */
  #undef SLUB_DEBUG_CMPXCHG
81819f0fc   Christoph Lameter   SLUB core
133
  /*
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
134
135
136
   * Mininum number of partial slabs. These will be left on the partial
   * lists even if they are empty. kmem_cache_shrink may reclaim them.
   */
76be89500   Christoph Lameter   SLUB: Improve hac...
137
  #define MIN_PARTIAL 5
e95eed571   Christoph Lameter   SLUB: Add MIN_PAR...
138

2086d26a0   Christoph Lameter   SLUB: Free slabs ...
139
140
141
142
143
144
  /*
   * Maximum number of desirable partial slabs.
   * The existence of more partial slabs makes kmem_cache_shrink
   * sort the partial list by the number of objects in the.
   */
  #define MAX_PARTIAL 10
81819f0fc   Christoph Lameter   SLUB core
145
146
  #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
  				SLAB_POISON | SLAB_STORE_USER)
672bba3a4   Christoph Lameter   SLUB: update comm...
147

81819f0fc   Christoph Lameter   SLUB core
148
  /*
3de472138   David Rientjes   slub: use size an...
149
150
151
   * Debugging flags that require metadata to be stored in the slab.  These get
   * disabled when slub_debug=O is used and a cache's min order increases with
   * metadata.
fa5ec8a1f   David Rientjes   slub: add option ...
152
   */
3de472138   David Rientjes   slub: use size an...
153
  #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
fa5ec8a1f   David Rientjes   slub: add option ...
154
155
  
  /*
81819f0fc   Christoph Lameter   SLUB core
156
157
158
   * Set of flags that will prevent slab merging
   */
  #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
4c13dd3b4   Dmitry Monakhov   failslab: add abi...
159
160
  		SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
  		SLAB_FAILSLAB)
81819f0fc   Christoph Lameter   SLUB core
161
162
  
  #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
5a896d9e7   Vegard Nossum   slub: add hooks f...
163
  		SLAB_CACHE_DMA | SLAB_NOTRACK)
81819f0fc   Christoph Lameter   SLUB core
164

210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
165
166
  #define OO_SHIFT	16
  #define OO_MASK		((1 << OO_SHIFT) - 1)
50d5c41cd   Christoph Lameter   slub: Do not use ...
167
  #define MAX_OBJS_PER_PAGE	32767 /* since page.objects is u15 */
210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
168

81819f0fc   Christoph Lameter   SLUB core
169
  /* Internal SLUB flags */
f90ec3901   Christoph Lameter   SLUB: Constants n...
170
  #define __OBJECT_POISON		0x80000000UL /* Poison object */
b789ef518   Christoph Lameter   slub: Add cmpxchg...
171
  #define __CMPXCHG_DOUBLE	0x40000000UL /* Use cmpxchg_double */
81819f0fc   Christoph Lameter   SLUB core
172
173
174
175
176
177
178
179
180
  
  static int kmem_size = sizeof(struct kmem_cache);
  
  #ifdef CONFIG_SMP
  static struct notifier_block slab_notifier;
  #endif
  
  static enum {
  	DOWN,		/* No slab functionality available */
51df11428   Christoph Lameter   slub: Dynamically...
181
  	PARTIAL,	/* Kmem_cache_node works */
672bba3a4   Christoph Lameter   SLUB: update comm...
182
  	UP,		/* Everything works but does not show up in sysfs */
81819f0fc   Christoph Lameter   SLUB core
183
184
185
186
187
  	SYSFS		/* Sysfs up */
  } slab_state = DOWN;
  
  /* A list of all slab caches on the system */
  static DECLARE_RWSEM(slub_lock);
5af328a51   Adrian Bunk   mm/slub.c: make c...
188
  static LIST_HEAD(slab_caches);
81819f0fc   Christoph Lameter   SLUB core
189

02cbc8744   Christoph Lameter   SLUB: move tracki...
190
191
192
  /*
   * Tracking user of a slab.
   */
d6543e393   Ben Greear   slub: Enable back...
193
  #define TRACK_ADDRS_COUNT 16
02cbc8744   Christoph Lameter   SLUB: move tracki...
194
  struct track {
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
195
  	unsigned long addr;	/* Called from address */
d6543e393   Ben Greear   slub: Enable back...
196
197
198
  #ifdef CONFIG_STACKTRACE
  	unsigned long addrs[TRACK_ADDRS_COUNT];	/* Called from address */
  #endif
02cbc8744   Christoph Lameter   SLUB: move tracki...
199
200
201
202
203
204
  	int cpu;		/* Was running on cpu */
  	int pid;		/* Pid context */
  	unsigned long when;	/* When did the operation occur */
  };
  
  enum track_item { TRACK_ALLOC, TRACK_FREE };
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
205
  #ifdef CONFIG_SYSFS
81819f0fc   Christoph Lameter   SLUB core
206
207
208
  static int sysfs_slab_add(struct kmem_cache *);
  static int sysfs_slab_alias(struct kmem_cache *, const char *);
  static void sysfs_slab_remove(struct kmem_cache *);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
209

81819f0fc   Christoph Lameter   SLUB core
210
  #else
0c7100132   Christoph Lameter   SLUB: add some mo...
211
212
213
  static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
  static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
  							{ return 0; }
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
214
215
  static inline void sysfs_slab_remove(struct kmem_cache *s)
  {
84c1cf624   Pekka Enberg   SLUB: Fix merged ...
216
  	kfree(s->name);
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
217
218
  	kfree(s);
  }
8ff12cfc0   Christoph Lameter   SLUB: Support for...
219

81819f0fc   Christoph Lameter   SLUB core
220
  #endif
4fdccdfbb   Christoph Lameter   slub: Add statist...
221
  static inline void stat(const struct kmem_cache *s, enum stat_item si)
8ff12cfc0   Christoph Lameter   SLUB: Support for...
222
223
  {
  #ifdef CONFIG_SLUB_STATS
84e554e68   Christoph Lameter   SLUB: Make slub s...
224
  	__this_cpu_inc(s->cpu_slab->stat[si]);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
225
226
  #endif
  }
81819f0fc   Christoph Lameter   SLUB core
227
228
229
230
231
232
233
234
235
236
237
  /********************************************************************
   * 			Core slab cache functions
   *******************************************************************/
  
  int slab_is_available(void)
  {
  	return slab_state >= UP;
  }
  
  static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
  {
81819f0fc   Christoph Lameter   SLUB core
238
  	return s->node[node];
81819f0fc   Christoph Lameter   SLUB core
239
  }
6446faa2f   Christoph Lameter   slub: Fix up comm...
240
  /* Verify that a pointer has an address that is valid within a slab page */
02cbc8744   Christoph Lameter   SLUB: move tracki...
241
242
243
244
  static inline int check_valid_pointer(struct kmem_cache *s,
  				struct page *page, const void *object)
  {
  	void *base;
a973e9dd1   Christoph Lameter   Revert "unique en...
245
  	if (!object)
02cbc8744   Christoph Lameter   SLUB: move tracki...
246
  		return 1;
a973e9dd1   Christoph Lameter   Revert "unique en...
247
  	base = page_address(page);
39b264641   Christoph Lameter   slub: Store max n...
248
  	if (object < base || object >= base + page->objects * s->size ||
02cbc8744   Christoph Lameter   SLUB: move tracki...
249
250
251
252
253
254
  		(object - base) % s->size) {
  		return 0;
  	}
  
  	return 1;
  }
7656c72b5   Christoph Lameter   SLUB: add macros ...
255
256
257
258
  static inline void *get_freepointer(struct kmem_cache *s, void *object)
  {
  	return *(void **)(object + s->offset);
  }
1393d9a18   Christoph Lameter   slub: Make CONFIG...
259
260
261
262
263
264
265
266
267
268
269
  static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
  {
  	void *p;
  
  #ifdef CONFIG_DEBUG_PAGEALLOC
  	probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
  #else
  	p = get_freepointer(s, object);
  #endif
  	return p;
  }
7656c72b5   Christoph Lameter   SLUB: add macros ...
270
271
272
273
274
275
  static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
  {
  	*(void **)(object + s->offset) = fp;
  }
  
  /* Loop over all objects in a slab */
224a88be4   Christoph Lameter   slub: for_each_ob...
276
277
  #define for_each_object(__p, __s, __addr, __objects) \
  	for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
7656c72b5   Christoph Lameter   SLUB: add macros ...
278
  			__p += (__s)->size)
7656c72b5   Christoph Lameter   SLUB: add macros ...
279
280
281
282
283
  /* Determine object index from a given position */
  static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
  {
  	return (p - addr) / s->size;
  }
d71f606f6   Mariusz Kozlowski   slub: fix ksize()...
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
  static inline size_t slab_ksize(const struct kmem_cache *s)
  {
  #ifdef CONFIG_SLUB_DEBUG
  	/*
  	 * Debugging requires use of the padding between object
  	 * and whatever may come after it.
  	 */
  	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
  		return s->objsize;
  
  #endif
  	/*
  	 * If we have the need to store the freelist pointer
  	 * back there or track user information then we can
  	 * only use the space before that information.
  	 */
  	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
  		return s->inuse;
  	/*
  	 * Else we can use all the padding etc for the allocation
  	 */
  	return s->size;
  }
ab9a0f196   Lai Jiangshan   slub: automatical...
307
308
309
310
  static inline int order_objects(int order, unsigned long size, int reserved)
  {
  	return ((PAGE_SIZE << order) - reserved) / size;
  }
834f3d119   Christoph Lameter   slub: Add kmem_ca...
311
  static inline struct kmem_cache_order_objects oo_make(int order,
ab9a0f196   Lai Jiangshan   slub: automatical...
312
  		unsigned long size, int reserved)
834f3d119   Christoph Lameter   slub: Add kmem_ca...
313
314
  {
  	struct kmem_cache_order_objects x = {
ab9a0f196   Lai Jiangshan   slub: automatical...
315
  		(order << OO_SHIFT) + order_objects(order, size, reserved)
834f3d119   Christoph Lameter   slub: Add kmem_ca...
316
317
318
319
320
321
322
  	};
  
  	return x;
  }
  
  static inline int oo_order(struct kmem_cache_order_objects x)
  {
210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
323
  	return x.x >> OO_SHIFT;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
324
325
326
327
  }
  
  static inline int oo_objects(struct kmem_cache_order_objects x)
  {
210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
328
  	return x.x & OO_MASK;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
329
  }
881db7fb0   Christoph Lameter   slub: Invert lock...
330
331
332
333
334
335
336
337
338
339
340
341
  /*
   * Per slab locking using the pagelock
   */
  static __always_inline void slab_lock(struct page *page)
  {
  	bit_spin_lock(PG_locked, &page->flags);
  }
  
  static __always_inline void slab_unlock(struct page *page)
  {
  	__bit_spin_unlock(PG_locked, &page->flags);
  }
1d07171c5   Christoph Lameter   slub: disable int...
342
343
344
345
346
347
348
  /* Interrupts must be disabled (for the fallback code to work right) */
  static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
  		void *freelist_old, unsigned long counters_old,
  		void *freelist_new, unsigned long counters_new,
  		const char *n)
  {
  	VM_BUG_ON(!irqs_disabled());
2565409fc   Heiko Carstens   mm,x86,um: move C...
349
350
  #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
      defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
1d07171c5   Christoph Lameter   slub: disable int...
351
  	if (s->flags & __CMPXCHG_DOUBLE) {
cdcd62986   Jan Beulich   x86: Fix and impr...
352
  		if (cmpxchg_double(&page->freelist, &page->counters,
1d07171c5   Christoph Lameter   slub: disable int...
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
  			freelist_old, counters_old,
  			freelist_new, counters_new))
  		return 1;
  	} else
  #endif
  	{
  		slab_lock(page);
  		if (page->freelist == freelist_old && page->counters == counters_old) {
  			page->freelist = freelist_new;
  			page->counters = counters_new;
  			slab_unlock(page);
  			return 1;
  		}
  		slab_unlock(page);
  	}
  
  	cpu_relax();
  	stat(s, CMPXCHG_DOUBLE_FAIL);
  
  #ifdef SLUB_DEBUG_CMPXCHG
  	printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
  #endif
  
  	return 0;
  }
b789ef518   Christoph Lameter   slub: Add cmpxchg...
378
379
380
381
382
  static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
  		void *freelist_old, unsigned long counters_old,
  		void *freelist_new, unsigned long counters_new,
  		const char *n)
  {
2565409fc   Heiko Carstens   mm,x86,um: move C...
383
384
  #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
      defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
b789ef518   Christoph Lameter   slub: Add cmpxchg...
385
  	if (s->flags & __CMPXCHG_DOUBLE) {
cdcd62986   Jan Beulich   x86: Fix and impr...
386
  		if (cmpxchg_double(&page->freelist, &page->counters,
b789ef518   Christoph Lameter   slub: Add cmpxchg...
387
388
389
390
391
392
  			freelist_old, counters_old,
  			freelist_new, counters_new))
  		return 1;
  	} else
  #endif
  	{
1d07171c5   Christoph Lameter   slub: disable int...
393
394
395
  		unsigned long flags;
  
  		local_irq_save(flags);
881db7fb0   Christoph Lameter   slub: Invert lock...
396
  		slab_lock(page);
b789ef518   Christoph Lameter   slub: Add cmpxchg...
397
398
399
  		if (page->freelist == freelist_old && page->counters == counters_old) {
  			page->freelist = freelist_new;
  			page->counters = counters_new;
881db7fb0   Christoph Lameter   slub: Invert lock...
400
  			slab_unlock(page);
1d07171c5   Christoph Lameter   slub: disable int...
401
  			local_irq_restore(flags);
b789ef518   Christoph Lameter   slub: Add cmpxchg...
402
403
  			return 1;
  		}
881db7fb0   Christoph Lameter   slub: Invert lock...
404
  		slab_unlock(page);
1d07171c5   Christoph Lameter   slub: disable int...
405
  		local_irq_restore(flags);
b789ef518   Christoph Lameter   slub: Add cmpxchg...
406
407
408
409
410
411
412
413
414
415
416
  	}
  
  	cpu_relax();
  	stat(s, CMPXCHG_DOUBLE_FAIL);
  
  #ifdef SLUB_DEBUG_CMPXCHG
  	printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
  #endif
  
  	return 0;
  }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
417
418
  #ifdef CONFIG_SLUB_DEBUG
  /*
5f80b13ae   Christoph Lameter   slub: get_map() f...
419
420
   * Determine a map of object in use on a page.
   *
881db7fb0   Christoph Lameter   slub: Invert lock...
421
   * Node listlock must be held to guarantee that the page does
5f80b13ae   Christoph Lameter   slub: get_map() f...
422
423
424
425
426
427
428
429
430
431
   * not vanish from under us.
   */
  static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
  {
  	void *p;
  	void *addr = page_address(page);
  
  	for (p = page->freelist; p; p = get_freepointer(s, p))
  		set_bit(slab_index(p, s, addr), map);
  }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
432
433
434
  /*
   * Debug settings:
   */
f0630fff5   Christoph Lameter   SLUB: support slu...
435
436
437
  #ifdef CONFIG_SLUB_DEBUG_ON
  static int slub_debug = DEBUG_DEFAULT_FLAGS;
  #else
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
438
  static int slub_debug;
f0630fff5   Christoph Lameter   SLUB: support slu...
439
  #endif
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
440
441
  
  static char *slub_debug_slabs;
fa5ec8a1f   David Rientjes   slub: add option ...
442
  static int disable_higher_order_debug;
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
443

7656c72b5   Christoph Lameter   SLUB: add macros ...
444
  /*
81819f0fc   Christoph Lameter   SLUB core
445
446
447
448
   * Object debugging
   */
  static void print_section(char *text, u8 *addr, unsigned int length)
  {
ffc79d288   Sebastian Andrzej Siewior   slub: use print_h...
449
450
  	print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
  			length, 1);
81819f0fc   Christoph Lameter   SLUB core
451
  }
81819f0fc   Christoph Lameter   SLUB core
452
453
454
455
456
457
458
459
460
461
462
463
464
465
  static struct track *get_track(struct kmem_cache *s, void *object,
  	enum track_item alloc)
  {
  	struct track *p;
  
  	if (s->offset)
  		p = object + s->offset + sizeof(void *);
  	else
  		p = object + s->inuse;
  
  	return p + alloc;
  }
  
  static void set_track(struct kmem_cache *s, void *object,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
466
  			enum track_item alloc, unsigned long addr)
81819f0fc   Christoph Lameter   SLUB core
467
  {
1a00df4a2   Akinobu Mita   slub: use get_tra...
468
  	struct track *p = get_track(s, object, alloc);
81819f0fc   Christoph Lameter   SLUB core
469

81819f0fc   Christoph Lameter   SLUB core
470
  	if (addr) {
d6543e393   Ben Greear   slub: Enable back...
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
  #ifdef CONFIG_STACKTRACE
  		struct stack_trace trace;
  		int i;
  
  		trace.nr_entries = 0;
  		trace.max_entries = TRACK_ADDRS_COUNT;
  		trace.entries = p->addrs;
  		trace.skip = 3;
  		save_stack_trace(&trace);
  
  		/* See rant in lockdep.c */
  		if (trace.nr_entries != 0 &&
  		    trace.entries[trace.nr_entries - 1] == ULONG_MAX)
  			trace.nr_entries--;
  
  		for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
  			p->addrs[i] = 0;
  #endif
81819f0fc   Christoph Lameter   SLUB core
489
490
  		p->addr = addr;
  		p->cpu = smp_processor_id();
88e4ccf29   Alexey Dobriyan   slub: current is ...
491
  		p->pid = current->pid;
81819f0fc   Christoph Lameter   SLUB core
492
493
494
495
  		p->when = jiffies;
  	} else
  		memset(p, 0, sizeof(struct track));
  }
81819f0fc   Christoph Lameter   SLUB core
496
497
  static void init_tracking(struct kmem_cache *s, void *object)
  {
249226847   Christoph Lameter   SLUB: change erro...
498
499
  	if (!(s->flags & SLAB_STORE_USER))
  		return;
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
500
501
  	set_track(s, object, TRACK_FREE, 0UL);
  	set_track(s, object, TRACK_ALLOC, 0UL);
81819f0fc   Christoph Lameter   SLUB core
502
503
504
505
506
507
  }
  
  static void print_track(const char *s, struct track *t)
  {
  	if (!t->addr)
  		return;
7daf705f3   Linus Torvalds   Start using the n...
508
509
  	printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d
  ",
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
510
  		s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
d6543e393   Ben Greear   slub: Enable back...
511
512
513
514
515
516
517
518
519
520
521
  #ifdef CONFIG_STACKTRACE
  	{
  		int i;
  		for (i = 0; i < TRACK_ADDRS_COUNT; i++)
  			if (t->addrs[i])
  				printk(KERN_ERR "\t%pS
  ", (void *)t->addrs[i]);
  			else
  				break;
  	}
  #endif
249226847   Christoph Lameter   SLUB: change erro...
522
523
524
525
526
527
528
529
530
531
532
533
534
  }
  
  static void print_tracking(struct kmem_cache *s, void *object)
  {
  	if (!(s->flags & SLAB_STORE_USER))
  		return;
  
  	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
  	print_track("Freed", get_track(s, object, TRACK_FREE));
  }
  
  static void print_page_info(struct page *page)
  {
39b264641   Christoph Lameter   slub: Store max n...
535
536
537
  	printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx
  ",
  		page, page->objects, page->inuse, page->freelist, page->flags);
249226847   Christoph Lameter   SLUB: change erro...
538
539
540
541
542
543
544
545
546
547
548
549
550
551
  
  }
  
  static void slab_bug(struct kmem_cache *s, char *fmt, ...)
  {
  	va_list args;
  	char buf[100];
  
  	va_start(args, fmt);
  	vsnprintf(buf, sizeof(buf), fmt, args);
  	va_end(args);
  	printk(KERN_ERR "========================================"
  			"=====================================
  ");
265d47e71   Dave Jones   slub: add taint f...
552
553
  	printk(KERN_ERR "BUG %s (%s): %s
  ", s->name, print_tainted(), buf);
249226847   Christoph Lameter   SLUB: change erro...
554
555
556
557
  	printk(KERN_ERR "----------------------------------------"
  			"-------------------------------------
  
  ");
81819f0fc   Christoph Lameter   SLUB core
558
  }
249226847   Christoph Lameter   SLUB: change erro...
559
560
561
562
563
564
565
566
567
568
569
570
571
  static void slab_fix(struct kmem_cache *s, char *fmt, ...)
  {
  	va_list args;
  	char buf[100];
  
  	va_start(args, fmt);
  	vsnprintf(buf, sizeof(buf), fmt, args);
  	va_end(args);
  	printk(KERN_ERR "FIX %s: %s
  ", s->name, buf);
  }
  
  static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
81819f0fc   Christoph Lameter   SLUB core
572
573
  {
  	unsigned int off;	/* Offset of last byte */
a973e9dd1   Christoph Lameter   Revert "unique en...
574
  	u8 *addr = page_address(page);
249226847   Christoph Lameter   SLUB: change erro...
575
576
577
578
579
580
581
582
583
584
585
  
  	print_tracking(s, p);
  
  	print_page_info(page);
  
  	printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p
  
  ",
  			p, p - addr, get_freepointer(s, p));
  
  	if (p > addr + 16)
ffc79d288   Sebastian Andrzej Siewior   slub: use print_h...
586
  		print_section("Bytes b4 ", p - 16, 16);
81819f0fc   Christoph Lameter   SLUB core
587

ffc79d288   Sebastian Andrzej Siewior   slub: use print_h...
588
589
  	print_section("Object ", p, min_t(unsigned long, s->objsize,
  				PAGE_SIZE));
81819f0fc   Christoph Lameter   SLUB core
590
  	if (s->flags & SLAB_RED_ZONE)
ffc79d288   Sebastian Andrzej Siewior   slub: use print_h...
591
  		print_section("Redzone ", p + s->objsize,
81819f0fc   Christoph Lameter   SLUB core
592
  			s->inuse - s->objsize);
81819f0fc   Christoph Lameter   SLUB core
593
594
595
596
  	if (s->offset)
  		off = s->offset + sizeof(void *);
  	else
  		off = s->inuse;
249226847   Christoph Lameter   SLUB: change erro...
597
  	if (s->flags & SLAB_STORE_USER)
81819f0fc   Christoph Lameter   SLUB core
598
  		off += 2 * sizeof(struct track);
81819f0fc   Christoph Lameter   SLUB core
599
600
601
  
  	if (off != s->size)
  		/* Beginning of the filler is the free pointer */
ffc79d288   Sebastian Andrzej Siewior   slub: use print_h...
602
  		print_section("Padding ", p + off, s->size - off);
249226847   Christoph Lameter   SLUB: change erro...
603
604
  
  	dump_stack();
81819f0fc   Christoph Lameter   SLUB core
605
606
607
608
609
  }
  
  static void object_err(struct kmem_cache *s, struct page *page,
  			u8 *object, char *reason)
  {
3dc506378   Christoph Lameter   slab_err: Pass pa...
610
  	slab_bug(s, "%s", reason);
249226847   Christoph Lameter   SLUB: change erro...
611
  	print_trailer(s, page, object);
81819f0fc   Christoph Lameter   SLUB core
612
  }
249226847   Christoph Lameter   SLUB: change erro...
613
  static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
81819f0fc   Christoph Lameter   SLUB core
614
615
616
  {
  	va_list args;
  	char buf[100];
249226847   Christoph Lameter   SLUB: change erro...
617
618
  	va_start(args, fmt);
  	vsnprintf(buf, sizeof(buf), fmt, args);
81819f0fc   Christoph Lameter   SLUB core
619
  	va_end(args);
3dc506378   Christoph Lameter   slab_err: Pass pa...
620
  	slab_bug(s, "%s", buf);
249226847   Christoph Lameter   SLUB: change erro...
621
  	print_page_info(page);
81819f0fc   Christoph Lameter   SLUB core
622
623
  	dump_stack();
  }
f7cb19336   Christoph Lameter   SLUB: Pass active...
624
  static void init_object(struct kmem_cache *s, void *object, u8 val)
81819f0fc   Christoph Lameter   SLUB core
625
626
627
628
629
  {
  	u8 *p = object;
  
  	if (s->flags & __OBJECT_POISON) {
  		memset(p, POISON_FREE, s->objsize - 1);
064287807   Pekka Enberg   SLUB: Fix coding ...
630
  		p[s->objsize - 1] = POISON_END;
81819f0fc   Christoph Lameter   SLUB core
631
632
633
  	}
  
  	if (s->flags & SLAB_RED_ZONE)
f7cb19336   Christoph Lameter   SLUB: Pass active...
634
  		memset(p + s->objsize, val, s->inuse - s->objsize);
81819f0fc   Christoph Lameter   SLUB core
635
  }
249226847   Christoph Lameter   SLUB: change erro...
636
637
638
639
640
641
642
643
644
645
  static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
  						void *from, void *to)
  {
  	slab_fix(s, "Restoring 0x%p-0x%p=0x%x
  ", from, to - 1, data);
  	memset(from, data, to - from);
  }
  
  static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
  			u8 *object, char *what,
064287807   Pekka Enberg   SLUB: Fix coding ...
646
  			u8 *start, unsigned int value, unsigned int bytes)
249226847   Christoph Lameter   SLUB: change erro...
647
648
649
  {
  	u8 *fault;
  	u8 *end;
798248206   Akinobu Mita   lib/string.c: int...
650
  	fault = memchr_inv(start, value, bytes);
249226847   Christoph Lameter   SLUB: change erro...
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
  	if (!fault)
  		return 1;
  
  	end = start + bytes;
  	while (end > fault && end[-1] == value)
  		end--;
  
  	slab_bug(s, "%s overwritten", what);
  	printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x
  ",
  					fault, end - 1, fault[0], value);
  	print_trailer(s, page, object);
  
  	restore_bytes(s, what, value, fault, end);
  	return 0;
81819f0fc   Christoph Lameter   SLUB core
666
  }
81819f0fc   Christoph Lameter   SLUB core
667
668
669
670
671
672
673
  /*
   * Object layout:
   *
   * object address
   * 	Bytes of the object to be managed.
   * 	If the freepointer may overlay the object then the free
   * 	pointer is the first word of the object.
672bba3a4   Christoph Lameter   SLUB: update comm...
674
   *
81819f0fc   Christoph Lameter   SLUB core
675
676
677
678
679
   * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
   * 	0xa5 (POISON_END)
   *
   * object + s->objsize
   * 	Padding to reach word boundary. This is also used for Redzoning.
672bba3a4   Christoph Lameter   SLUB: update comm...
680
681
682
   * 	Padding is extended by another word if Redzoning is enabled and
   * 	objsize == inuse.
   *
81819f0fc   Christoph Lameter   SLUB core
683
684
685
686
   * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
   * 	0xcc (RED_ACTIVE) for objects in use.
   *
   * object + s->inuse
672bba3a4   Christoph Lameter   SLUB: update comm...
687
688
   * 	Meta data starts here.
   *
81819f0fc   Christoph Lameter   SLUB core
689
690
   * 	A. Free pointer (if we cannot overwrite object on free)
   * 	B. Tracking data for SLAB_STORE_USER
672bba3a4   Christoph Lameter   SLUB: update comm...
691
   * 	C. Padding to reach required alignment boundary or at mininum
6446faa2f   Christoph Lameter   slub: Fix up comm...
692
   * 		one word if debugging is on to be able to detect writes
672bba3a4   Christoph Lameter   SLUB: update comm...
693
694
695
   * 		before the word boundary.
   *
   *	Padding is done using 0x5a (POISON_INUSE)
81819f0fc   Christoph Lameter   SLUB core
696
697
   *
   * object + s->size
672bba3a4   Christoph Lameter   SLUB: update comm...
698
   * 	Nothing is used beyond s->size.
81819f0fc   Christoph Lameter   SLUB core
699
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
700
701
   * If slabcaches are merged then the objsize and inuse boundaries are mostly
   * ignored. And therefore no slab options that rely on these boundaries
81819f0fc   Christoph Lameter   SLUB core
702
703
   * may be used with merged slabcaches.
   */
81819f0fc   Christoph Lameter   SLUB core
704
705
706
707
708
709
710
711
712
713
714
715
716
717
  static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
  {
  	unsigned long off = s->inuse;	/* The end of info */
  
  	if (s->offset)
  		/* Freepointer is placed after the object. */
  		off += sizeof(void *);
  
  	if (s->flags & SLAB_STORE_USER)
  		/* We also have user information there */
  		off += 2 * sizeof(struct track);
  
  	if (s->size == off)
  		return 1;
249226847   Christoph Lameter   SLUB: change erro...
718
719
  	return check_bytes_and_report(s, page, p, "Object padding",
  				p + off, POISON_INUSE, s->size - off);
81819f0fc   Christoph Lameter   SLUB core
720
  }
39b264641   Christoph Lameter   slub: Store max n...
721
  /* Check the pad bytes at the end of a slab page */
81819f0fc   Christoph Lameter   SLUB core
722
723
  static int slab_pad_check(struct kmem_cache *s, struct page *page)
  {
249226847   Christoph Lameter   SLUB: change erro...
724
725
726
727
728
  	u8 *start;
  	u8 *fault;
  	u8 *end;
  	int length;
  	int remainder;
81819f0fc   Christoph Lameter   SLUB core
729
730
731
  
  	if (!(s->flags & SLAB_POISON))
  		return 1;
a973e9dd1   Christoph Lameter   Revert "unique en...
732
  	start = page_address(page);
ab9a0f196   Lai Jiangshan   slub: automatical...
733
  	length = (PAGE_SIZE << compound_order(page)) - s->reserved;
39b264641   Christoph Lameter   slub: Store max n...
734
735
  	end = start + length;
  	remainder = length % s->size;
81819f0fc   Christoph Lameter   SLUB core
736
737
  	if (!remainder)
  		return 1;
798248206   Akinobu Mita   lib/string.c: int...
738
  	fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
249226847   Christoph Lameter   SLUB: change erro...
739
740
741
742
743
744
  	if (!fault)
  		return 1;
  	while (end > fault && end[-1] == POISON_INUSE)
  		end--;
  
  	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
ffc79d288   Sebastian Andrzej Siewior   slub: use print_h...
745
  	print_section("Padding ", end - remainder, remainder);
249226847   Christoph Lameter   SLUB: change erro...
746

8a3d271de   Eric Dumazet   slub: fix slab_pa...
747
  	restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
249226847   Christoph Lameter   SLUB: change erro...
748
  	return 0;
81819f0fc   Christoph Lameter   SLUB core
749
750
751
  }
  
  static int check_object(struct kmem_cache *s, struct page *page,
f7cb19336   Christoph Lameter   SLUB: Pass active...
752
  					void *object, u8 val)
81819f0fc   Christoph Lameter   SLUB core
753
754
755
756
757
  {
  	u8 *p = object;
  	u8 *endobject = object + s->objsize;
  
  	if (s->flags & SLAB_RED_ZONE) {
249226847   Christoph Lameter   SLUB: change erro...
758
  		if (!check_bytes_and_report(s, page, object, "Redzone",
f7cb19336   Christoph Lameter   SLUB: Pass active...
759
  			endobject, val, s->inuse - s->objsize))
81819f0fc   Christoph Lameter   SLUB core
760
  			return 0;
81819f0fc   Christoph Lameter   SLUB core
761
  	} else {
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
762
763
764
765
  		if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
  			check_bytes_and_report(s, page, p, "Alignment padding",
  				endobject, POISON_INUSE, s->inuse - s->objsize);
  		}
81819f0fc   Christoph Lameter   SLUB core
766
767
768
  	}
  
  	if (s->flags & SLAB_POISON) {
f7cb19336   Christoph Lameter   SLUB: Pass active...
769
  		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
249226847   Christoph Lameter   SLUB: change erro...
770
771
772
  			(!check_bytes_and_report(s, page, p, "Poison", p,
  					POISON_FREE, s->objsize - 1) ||
  			 !check_bytes_and_report(s, page, p, "Poison",
064287807   Pekka Enberg   SLUB: Fix coding ...
773
  				p + s->objsize - 1, POISON_END, 1)))
81819f0fc   Christoph Lameter   SLUB core
774
  			return 0;
81819f0fc   Christoph Lameter   SLUB core
775
776
777
778
779
  		/*
  		 * check_pad_bytes cleans up on its own.
  		 */
  		check_pad_bytes(s, page, p);
  	}
f7cb19336   Christoph Lameter   SLUB: Pass active...
780
  	if (!s->offset && val == SLUB_RED_ACTIVE)
81819f0fc   Christoph Lameter   SLUB core
781
782
783
784
785
786
787
788
789
790
  		/*
  		 * Object and freepointer overlap. Cannot check
  		 * freepointer while object is allocated.
  		 */
  		return 1;
  
  	/* Check free pointer validity */
  	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
  		object_err(s, page, p, "Freepointer corrupt");
  		/*
9f6c708e5   Nick Andrew   slub: Fix incorre...
791
  		 * No choice but to zap it and thus lose the remainder
81819f0fc   Christoph Lameter   SLUB core
792
  		 * of the free objects in this slab. May cause
672bba3a4   Christoph Lameter   SLUB: update comm...
793
  		 * another error because the object count is now wrong.
81819f0fc   Christoph Lameter   SLUB core
794
  		 */
a973e9dd1   Christoph Lameter   Revert "unique en...
795
  		set_freepointer(s, p, NULL);
81819f0fc   Christoph Lameter   SLUB core
796
797
798
799
800
801
802
  		return 0;
  	}
  	return 1;
  }
  
  static int check_slab(struct kmem_cache *s, struct page *page)
  {
39b264641   Christoph Lameter   slub: Store max n...
803
  	int maxobj;
81819f0fc   Christoph Lameter   SLUB core
804
805
806
  	VM_BUG_ON(!irqs_disabled());
  
  	if (!PageSlab(page)) {
249226847   Christoph Lameter   SLUB: change erro...
807
  		slab_err(s, page, "Not a valid slab page");
81819f0fc   Christoph Lameter   SLUB core
808
809
  		return 0;
  	}
39b264641   Christoph Lameter   slub: Store max n...
810

ab9a0f196   Lai Jiangshan   slub: automatical...
811
  	maxobj = order_objects(compound_order(page), s->size, s->reserved);
39b264641   Christoph Lameter   slub: Store max n...
812
813
814
815
816
817
  	if (page->objects > maxobj) {
  		slab_err(s, page, "objects %u > max %u",
  			s->name, page->objects, maxobj);
  		return 0;
  	}
  	if (page->inuse > page->objects) {
249226847   Christoph Lameter   SLUB: change erro...
818
  		slab_err(s, page, "inuse %u > max %u",
39b264641   Christoph Lameter   slub: Store max n...
819
  			s->name, page->inuse, page->objects);
81819f0fc   Christoph Lameter   SLUB core
820
821
822
823
824
825
826
827
  		return 0;
  	}
  	/* Slab_pad_check fixes things up after itself */
  	slab_pad_check(s, page);
  	return 1;
  }
  
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
828
829
   * Determine if a certain object on a page is on the freelist. Must hold the
   * slab lock to guarantee that the chains are in a consistent state.
81819f0fc   Christoph Lameter   SLUB core
830
831
832
833
   */
  static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
  {
  	int nr = 0;
881db7fb0   Christoph Lameter   slub: Invert lock...
834
  	void *fp;
81819f0fc   Christoph Lameter   SLUB core
835
  	void *object = NULL;
224a88be4   Christoph Lameter   slub: for_each_ob...
836
  	unsigned long max_objects;
81819f0fc   Christoph Lameter   SLUB core
837

881db7fb0   Christoph Lameter   slub: Invert lock...
838
  	fp = page->freelist;
39b264641   Christoph Lameter   slub: Store max n...
839
  	while (fp && nr <= page->objects) {
81819f0fc   Christoph Lameter   SLUB core
840
841
842
843
844
845
  		if (fp == search)
  			return 1;
  		if (!check_valid_pointer(s, page, fp)) {
  			if (object) {
  				object_err(s, page, object,
  					"Freechain corrupt");
a973e9dd1   Christoph Lameter   Revert "unique en...
846
  				set_freepointer(s, object, NULL);
81819f0fc   Christoph Lameter   SLUB core
847
848
  				break;
  			} else {
249226847   Christoph Lameter   SLUB: change erro...
849
  				slab_err(s, page, "Freepointer corrupt");
a973e9dd1   Christoph Lameter   Revert "unique en...
850
  				page->freelist = NULL;
39b264641   Christoph Lameter   slub: Store max n...
851
  				page->inuse = page->objects;
249226847   Christoph Lameter   SLUB: change erro...
852
  				slab_fix(s, "Freelist cleared");
81819f0fc   Christoph Lameter   SLUB core
853
854
855
856
857
858
859
860
  				return 0;
  			}
  			break;
  		}
  		object = fp;
  		fp = get_freepointer(s, object);
  		nr++;
  	}
ab9a0f196   Lai Jiangshan   slub: automatical...
861
  	max_objects = order_objects(compound_order(page), s->size, s->reserved);
210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
862
863
  	if (max_objects > MAX_OBJS_PER_PAGE)
  		max_objects = MAX_OBJS_PER_PAGE;
224a88be4   Christoph Lameter   slub: for_each_ob...
864
865
866
867
868
869
870
  
  	if (page->objects != max_objects) {
  		slab_err(s, page, "Wrong number of objects. Found %d but "
  			"should be %d", page->objects, max_objects);
  		page->objects = max_objects;
  		slab_fix(s, "Number of objects adjusted.");
  	}
39b264641   Christoph Lameter   slub: Store max n...
871
  	if (page->inuse != page->objects - nr) {
70d71228a   Christoph Lameter   slub: remove obje...
872
  		slab_err(s, page, "Wrong object count. Counter is %d but "
39b264641   Christoph Lameter   slub: Store max n...
873
874
  			"counted were %d", page->inuse, page->objects - nr);
  		page->inuse = page->objects - nr;
249226847   Christoph Lameter   SLUB: change erro...
875
  		slab_fix(s, "Object count adjusted.");
81819f0fc   Christoph Lameter   SLUB core
876
877
878
  	}
  	return search == NULL;
  }
0121c619d   Christoph Lameter   slub: Whitespace ...
879
880
  static void trace(struct kmem_cache *s, struct page *page, void *object,
  								int alloc)
3ec097421   Christoph Lameter   SLUB: Simplify de...
881
882
883
884
885
886
887
888
889
890
  {
  	if (s->flags & SLAB_TRACE) {
  		printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p
  ",
  			s->name,
  			alloc ? "alloc" : "free",
  			object, page->inuse,
  			page->freelist);
  
  		if (!alloc)
ffc79d288   Sebastian Andrzej Siewior   slub: use print_h...
891
  			print_section("Object ", (void *)object, s->objsize);
3ec097421   Christoph Lameter   SLUB: Simplify de...
892
893
894
895
  
  		dump_stack();
  	}
  }
643b11384   Christoph Lameter   slub: enable trac...
896
  /*
c016b0bde   Christoph Lameter   slub: Extract hoo...
897
898
899
900
901
   * Hooks for other subsystems that check memory allocations. In a typical
   * production configuration these hooks all should produce no code at all.
   */
  static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
  {
c1d508365   Christoph Lameter   slub: Move gfpfla...
902
  	flags &= gfp_allowed_mask;
c016b0bde   Christoph Lameter   slub: Extract hoo...
903
904
905
906
907
908
909
910
  	lockdep_trace_alloc(flags);
  	might_sleep_if(flags & __GFP_WAIT);
  
  	return should_failslab(s->objsize, flags, s->flags);
  }
  
  static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
  {
c1d508365   Christoph Lameter   slub: Move gfpfla...
911
  	flags &= gfp_allowed_mask;
b3d41885d   Eric Dumazet   slub: fix kmemche...
912
  	kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
c016b0bde   Christoph Lameter   slub: Extract hoo...
913
914
915
916
917
918
  	kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
  }
  
  static inline void slab_free_hook(struct kmem_cache *s, void *x)
  {
  	kmemleak_free_recursive(x, s->flags);
c016b0bde   Christoph Lameter   slub: Extract hoo...
919

d3f661d69   Christoph Lameter   slub: Get rid of ...
920
921
922
923
924
925
926
927
928
929
930
931
  	/*
  	 * Trouble is that we may no longer disable interupts in the fast path
  	 * So in order to make the debug calls that expect irqs to be
  	 * disabled we need to disable interrupts temporarily.
  	 */
  #if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
  	{
  		unsigned long flags;
  
  		local_irq_save(flags);
  		kmemcheck_slab_free(s, x, s->objsize);
  		debug_check_no_locks_freed(x, s->objsize);
d3f661d69   Christoph Lameter   slub: Get rid of ...
932
933
934
  		local_irq_restore(flags);
  	}
  #endif
f9b615de4   Thomas Gleixner   slub: Fix debugob...
935
936
  	if (!(s->flags & SLAB_DEBUG_OBJECTS))
  		debug_check_no_obj_freed(x, s->objsize);
c016b0bde   Christoph Lameter   slub: Extract hoo...
937
938
939
  }
  
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
940
   * Tracking of fully allocated slabs for debugging purposes.
5cc6eee8a   Christoph Lameter   slub: explicit li...
941
942
   *
   * list_lock must be held.
643b11384   Christoph Lameter   slub: enable trac...
943
   */
5cc6eee8a   Christoph Lameter   slub: explicit li...
944
945
  static void add_full(struct kmem_cache *s,
  	struct kmem_cache_node *n, struct page *page)
643b11384   Christoph Lameter   slub: enable trac...
946
  {
5cc6eee8a   Christoph Lameter   slub: explicit li...
947
948
  	if (!(s->flags & SLAB_STORE_USER))
  		return;
643b11384   Christoph Lameter   slub: enable trac...
949
  	list_add(&page->lru, &n->full);
643b11384   Christoph Lameter   slub: enable trac...
950
  }
5cc6eee8a   Christoph Lameter   slub: explicit li...
951
952
953
  /*
   * list_lock must be held.
   */
643b11384   Christoph Lameter   slub: enable trac...
954
955
  static void remove_full(struct kmem_cache *s, struct page *page)
  {
643b11384   Christoph Lameter   slub: enable trac...
956
957
  	if (!(s->flags & SLAB_STORE_USER))
  		return;
643b11384   Christoph Lameter   slub: enable trac...
958
  	list_del(&page->lru);
643b11384   Christoph Lameter   slub: enable trac...
959
  }
0f389ec63   Christoph Lameter   slub: No need for...
960
961
962
963
964
965
966
  /* Tracking of the number of slabs for debugging purposes */
  static inline unsigned long slabs_node(struct kmem_cache *s, int node)
  {
  	struct kmem_cache_node *n = get_node(s, node);
  
  	return atomic_long_read(&n->nr_slabs);
  }
26c02cf05   Alexander Beregalov   SLUB: fix build w...
967
968
969
970
  static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
  {
  	return atomic_long_read(&n->nr_slabs);
  }
205ab99dd   Christoph Lameter   slub: Update stat...
971
  static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec63   Christoph Lameter   slub: No need for...
972
973
974
975
976
977
978
979
980
  {
  	struct kmem_cache_node *n = get_node(s, node);
  
  	/*
  	 * May be called early in order to allocate a slab for the
  	 * kmem_cache_node structure. Solve the chicken-egg
  	 * dilemma by deferring the increment of the count during
  	 * bootstrap (see early_kmem_cache_node_alloc).
  	 */
7340cc841   Christoph Lameter   slub: reduce diff...
981
  	if (n) {
0f389ec63   Christoph Lameter   slub: No need for...
982
  		atomic_long_inc(&n->nr_slabs);
205ab99dd   Christoph Lameter   slub: Update stat...
983
984
  		atomic_long_add(objects, &n->total_objects);
  	}
0f389ec63   Christoph Lameter   slub: No need for...
985
  }
205ab99dd   Christoph Lameter   slub: Update stat...
986
  static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec63   Christoph Lameter   slub: No need for...
987
988
989
990
  {
  	struct kmem_cache_node *n = get_node(s, node);
  
  	atomic_long_dec(&n->nr_slabs);
205ab99dd   Christoph Lameter   slub: Update stat...
991
  	atomic_long_sub(objects, &n->total_objects);
0f389ec63   Christoph Lameter   slub: No need for...
992
993
994
  }
  
  /* Object debug checks for alloc/free paths */
3ec097421   Christoph Lameter   SLUB: Simplify de...
995
996
997
998
999
  static void setup_object_debug(struct kmem_cache *s, struct page *page,
  								void *object)
  {
  	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
  		return;
f7cb19336   Christoph Lameter   SLUB: Pass active...
1000
  	init_object(s, object, SLUB_RED_INACTIVE);
3ec097421   Christoph Lameter   SLUB: Simplify de...
1001
1002
  	init_tracking(s, object);
  }
1537066c6   Christoph Lameter   slub: Force no in...
1003
  static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
1004
  					void *object, unsigned long addr)
81819f0fc   Christoph Lameter   SLUB core
1005
1006
1007
  {
  	if (!check_slab(s, page))
  		goto bad;
81819f0fc   Christoph Lameter   SLUB core
1008
1009
  	if (!check_valid_pointer(s, page, object)) {
  		object_err(s, page, object, "Freelist Pointer check fails");
70d71228a   Christoph Lameter   slub: remove obje...
1010
  		goto bad;
81819f0fc   Christoph Lameter   SLUB core
1011
  	}
f7cb19336   Christoph Lameter   SLUB: Pass active...
1012
  	if (!check_object(s, page, object, SLUB_RED_INACTIVE))
81819f0fc   Christoph Lameter   SLUB core
1013
  		goto bad;
81819f0fc   Christoph Lameter   SLUB core
1014

3ec097421   Christoph Lameter   SLUB: Simplify de...
1015
1016
1017
1018
  	/* Success perform special debug activities for allocs */
  	if (s->flags & SLAB_STORE_USER)
  		set_track(s, object, TRACK_ALLOC, addr);
  	trace(s, page, object, 1);
f7cb19336   Christoph Lameter   SLUB: Pass active...
1019
  	init_object(s, object, SLUB_RED_ACTIVE);
81819f0fc   Christoph Lameter   SLUB core
1020
  	return 1;
3ec097421   Christoph Lameter   SLUB: Simplify de...
1021

81819f0fc   Christoph Lameter   SLUB core
1022
1023
1024
1025
1026
  bad:
  	if (PageSlab(page)) {
  		/*
  		 * If this is a slab page then lets do the best we can
  		 * to avoid issues in the future. Marking all objects
672bba3a4   Christoph Lameter   SLUB: update comm...
1027
  		 * as used avoids touching the remaining objects.
81819f0fc   Christoph Lameter   SLUB core
1028
  		 */
249226847   Christoph Lameter   SLUB: change erro...
1029
  		slab_fix(s, "Marking all objects used");
39b264641   Christoph Lameter   slub: Store max n...
1030
  		page->inuse = page->objects;
a973e9dd1   Christoph Lameter   Revert "unique en...
1031
  		page->freelist = NULL;
81819f0fc   Christoph Lameter   SLUB core
1032
1033
1034
  	}
  	return 0;
  }
1537066c6   Christoph Lameter   slub: Force no in...
1035
1036
  static noinline int free_debug_processing(struct kmem_cache *s,
  		 struct page *page, void *object, unsigned long addr)
81819f0fc   Christoph Lameter   SLUB core
1037
  {
5c2e4bbbd   Christoph Lameter   slub: Disable int...
1038
1039
1040
1041
  	unsigned long flags;
  	int rc = 0;
  
  	local_irq_save(flags);
881db7fb0   Christoph Lameter   slub: Invert lock...
1042
  	slab_lock(page);
81819f0fc   Christoph Lameter   SLUB core
1043
1044
1045
1046
  	if (!check_slab(s, page))
  		goto fail;
  
  	if (!check_valid_pointer(s, page, object)) {
70d71228a   Christoph Lameter   slub: remove obje...
1047
  		slab_err(s, page, "Invalid object pointer 0x%p", object);
81819f0fc   Christoph Lameter   SLUB core
1048
1049
1050
1051
  		goto fail;
  	}
  
  	if (on_freelist(s, page, object)) {
249226847   Christoph Lameter   SLUB: change erro...
1052
  		object_err(s, page, object, "Object already free");
81819f0fc   Christoph Lameter   SLUB core
1053
1054
  		goto fail;
  	}
f7cb19336   Christoph Lameter   SLUB: Pass active...
1055
  	if (!check_object(s, page, object, SLUB_RED_ACTIVE))
5c2e4bbbd   Christoph Lameter   slub: Disable int...
1056
  		goto out;
81819f0fc   Christoph Lameter   SLUB core
1057
1058
  
  	if (unlikely(s != page->slab)) {
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
1059
  		if (!PageSlab(page)) {
70d71228a   Christoph Lameter   slub: remove obje...
1060
1061
  			slab_err(s, page, "Attempt to free object(0x%p) "
  				"outside of slab", object);
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
1062
  		} else if (!page->slab) {
81819f0fc   Christoph Lameter   SLUB core
1063
  			printk(KERN_ERR
70d71228a   Christoph Lameter   slub: remove obje...
1064
1065
  				"SLUB <none>: no slab for object 0x%p.
  ",
81819f0fc   Christoph Lameter   SLUB core
1066
  						object);
70d71228a   Christoph Lameter   slub: remove obje...
1067
  			dump_stack();
064287807   Pekka Enberg   SLUB: Fix coding ...
1068
  		} else
249226847   Christoph Lameter   SLUB: change erro...
1069
1070
  			object_err(s, page, object,
  					"page slab pointer corrupt.");
81819f0fc   Christoph Lameter   SLUB core
1071
1072
  		goto fail;
  	}
3ec097421   Christoph Lameter   SLUB: Simplify de...
1073

3ec097421   Christoph Lameter   SLUB: Simplify de...
1074
1075
1076
  	if (s->flags & SLAB_STORE_USER)
  		set_track(s, object, TRACK_FREE, addr);
  	trace(s, page, object, 0);
f7cb19336   Christoph Lameter   SLUB: Pass active...
1077
  	init_object(s, object, SLUB_RED_INACTIVE);
5c2e4bbbd   Christoph Lameter   slub: Disable int...
1078
1079
  	rc = 1;
  out:
881db7fb0   Christoph Lameter   slub: Invert lock...
1080
  	slab_unlock(page);
5c2e4bbbd   Christoph Lameter   slub: Disable int...
1081
1082
  	local_irq_restore(flags);
  	return rc;
3ec097421   Christoph Lameter   SLUB: Simplify de...
1083

81819f0fc   Christoph Lameter   SLUB core
1084
  fail:
249226847   Christoph Lameter   SLUB: change erro...
1085
  	slab_fix(s, "Object at 0x%p not freed", object);
5c2e4bbbd   Christoph Lameter   slub: Disable int...
1086
  	goto out;
81819f0fc   Christoph Lameter   SLUB core
1087
  }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1088
1089
  static int __init setup_slub_debug(char *str)
  {
f0630fff5   Christoph Lameter   SLUB: support slu...
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
  	slub_debug = DEBUG_DEFAULT_FLAGS;
  	if (*str++ != '=' || !*str)
  		/*
  		 * No options specified. Switch on full debugging.
  		 */
  		goto out;
  
  	if (*str == ',')
  		/*
  		 * No options but restriction on slabs. This means full
  		 * debugging for slabs matching a pattern.
  		 */
  		goto check_slabs;
fa5ec8a1f   David Rientjes   slub: add option ...
1103
1104
1105
1106
1107
1108
1109
1110
  	if (tolower(*str) == 'o') {
  		/*
  		 * Avoid enabling debugging on caches if its minimum order
  		 * would increase as a result.
  		 */
  		disable_higher_order_debug = 1;
  		goto out;
  	}
f0630fff5   Christoph Lameter   SLUB: support slu...
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
  	slub_debug = 0;
  	if (*str == '-')
  		/*
  		 * Switch off all debugging measures.
  		 */
  		goto out;
  
  	/*
  	 * Determine which debug features should be switched on
  	 */
064287807   Pekka Enberg   SLUB: Fix coding ...
1121
  	for (; *str && *str != ','; str++) {
f0630fff5   Christoph Lameter   SLUB: support slu...
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
  		switch (tolower(*str)) {
  		case 'f':
  			slub_debug |= SLAB_DEBUG_FREE;
  			break;
  		case 'z':
  			slub_debug |= SLAB_RED_ZONE;
  			break;
  		case 'p':
  			slub_debug |= SLAB_POISON;
  			break;
  		case 'u':
  			slub_debug |= SLAB_STORE_USER;
  			break;
  		case 't':
  			slub_debug |= SLAB_TRACE;
  			break;
4c13dd3b4   Dmitry Monakhov   failslab: add abi...
1138
1139
1140
  		case 'a':
  			slub_debug |= SLAB_FAILSLAB;
  			break;
f0630fff5   Christoph Lameter   SLUB: support slu...
1141
1142
  		default:
  			printk(KERN_ERR "slub_debug option '%c' "
064287807   Pekka Enberg   SLUB: Fix coding ...
1143
1144
  				"unknown. skipped
  ", *str);
f0630fff5   Christoph Lameter   SLUB: support slu...
1145
  		}
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1146
  	}
f0630fff5   Christoph Lameter   SLUB: support slu...
1147
  check_slabs:
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1148
1149
  	if (*str == ',')
  		slub_debug_slabs = str + 1;
f0630fff5   Christoph Lameter   SLUB: support slu...
1150
  out:
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1151
1152
1153
1154
  	return 1;
  }
  
  __setup("slub_debug", setup_slub_debug);
ba0268a8b   Christoph Lameter   SLUB: accurately ...
1155
1156
  static unsigned long kmem_cache_flags(unsigned long objsize,
  	unsigned long flags, const char *name,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
1157
  	void (*ctor)(void *))
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1158
1159
  {
  	/*
e153362a5   Christoph Lameter   slub: Remove objs...
1160
  	 * Enable debugging if selected on the kernel commandline.
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1161
  	 */
e153362a5   Christoph Lameter   slub: Remove objs...
1162
  	if (slub_debug && (!slub_debug_slabs ||
3de472138   David Rientjes   slub: use size an...
1163
1164
  		!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
  		flags |= slub_debug;
ba0268a8b   Christoph Lameter   SLUB: accurately ...
1165
1166
  
  	return flags;
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1167
1168
  }
  #else
3ec097421   Christoph Lameter   SLUB: Simplify de...
1169
1170
  static inline void setup_object_debug(struct kmem_cache *s,
  			struct page *page, void *object) {}
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1171

3ec097421   Christoph Lameter   SLUB: Simplify de...
1172
  static inline int alloc_debug_processing(struct kmem_cache *s,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
1173
  	struct page *page, void *object, unsigned long addr) { return 0; }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1174

3ec097421   Christoph Lameter   SLUB: Simplify de...
1175
  static inline int free_debug_processing(struct kmem_cache *s,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
1176
  	struct page *page, void *object, unsigned long addr) { return 0; }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1177

41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1178
1179
1180
  static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
  			{ return 1; }
  static inline int check_object(struct kmem_cache *s, struct page *page,
f7cb19336   Christoph Lameter   SLUB: Pass active...
1181
  			void *object, u8 val) { return 1; }
5cc6eee8a   Christoph Lameter   slub: explicit li...
1182
1183
  static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
  					struct page *page) {}
2cfb7455d   Christoph Lameter   slub: Rework allo...
1184
  static inline void remove_full(struct kmem_cache *s, struct page *page) {}
ba0268a8b   Christoph Lameter   SLUB: accurately ...
1185
1186
  static inline unsigned long kmem_cache_flags(unsigned long objsize,
  	unsigned long flags, const char *name,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
1187
  	void (*ctor)(void *))
ba0268a8b   Christoph Lameter   SLUB: accurately ...
1188
1189
1190
  {
  	return flags;
  }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1191
  #define slub_debug 0
0f389ec63   Christoph Lameter   slub: No need for...
1192

fdaa45e95   Ingo Molnar   slub: Fix build e...
1193
  #define disable_higher_order_debug 0
0f389ec63   Christoph Lameter   slub: No need for...
1194
1195
  static inline unsigned long slabs_node(struct kmem_cache *s, int node)
  							{ return 0; }
26c02cf05   Alexander Beregalov   SLUB: fix build w...
1196
1197
  static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
  							{ return 0; }
205ab99dd   Christoph Lameter   slub: Update stat...
1198
1199
1200
1201
  static inline void inc_slabs_node(struct kmem_cache *s, int node,
  							int objects) {}
  static inline void dec_slabs_node(struct kmem_cache *s, int node,
  							int objects) {}
7d550c56a   Christoph Lameter   slub: Add dummy f...
1202
1203
1204
1205
1206
1207
1208
1209
  
  static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
  							{ return 0; }
  
  static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
  		void *object) {}
  
  static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
1210
  #endif /* CONFIG_SLUB_DEBUG */
205ab99dd   Christoph Lameter   slub: Update stat...
1211

81819f0fc   Christoph Lameter   SLUB core
1212
1213
1214
  /*
   * Slab allocation and freeing
   */
65c3376aa   Christoph Lameter   slub: Fallback to...
1215
1216
1217
1218
  static inline struct page *alloc_slab_page(gfp_t flags, int node,
  					struct kmem_cache_order_objects oo)
  {
  	int order = oo_order(oo);
b1eeab676   Vegard Nossum   kmemcheck: add ho...
1219
  	flags |= __GFP_NOTRACK;
2154a3363   Christoph Lameter   slub: Use a const...
1220
  	if (node == NUMA_NO_NODE)
65c3376aa   Christoph Lameter   slub: Fallback to...
1221
1222
  		return alloc_pages(flags, order);
  	else
6b65aaf30   Minchan Kim   slub: Use alloc_p...
1223
  		return alloc_pages_exact_node(node, flags, order);
65c3376aa   Christoph Lameter   slub: Fallback to...
1224
  }
81819f0fc   Christoph Lameter   SLUB core
1225
1226
  static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
  {
064287807   Pekka Enberg   SLUB: Fix coding ...
1227
  	struct page *page;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
1228
  	struct kmem_cache_order_objects oo = s->oo;
ba52270d1   Pekka Enberg   SLUB: Don't pass ...
1229
  	gfp_t alloc_gfp;
81819f0fc   Christoph Lameter   SLUB core
1230

7e0528dad   Christoph Lameter   slub: Push irq di...
1231
1232
1233
1234
  	flags &= gfp_allowed_mask;
  
  	if (flags & __GFP_WAIT)
  		local_irq_enable();
b7a49f0d4   Christoph Lameter   slub: Determine g...
1235
  	flags |= s->allocflags;
e12ba74d8   Mel Gorman   Group short-lived...
1236

ba52270d1   Pekka Enberg   SLUB: Don't pass ...
1237
1238
1239
1240
1241
1242
1243
  	/*
  	 * Let the initial higher-order allocation fail under memory pressure
  	 * so we fall-back to the minimum order allocation.
  	 */
  	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
  
  	page = alloc_slab_page(alloc_gfp, node, oo);
65c3376aa   Christoph Lameter   slub: Fallback to...
1244
1245
1246
1247
1248
1249
1250
  	if (unlikely(!page)) {
  		oo = s->min;
  		/*
  		 * Allocation may have failed due to fragmentation.
  		 * Try a lower order alloc if possible
  		 */
  		page = alloc_slab_page(flags, node, oo);
81819f0fc   Christoph Lameter   SLUB core
1251

7e0528dad   Christoph Lameter   slub: Push irq di...
1252
1253
  		if (page)
  			stat(s, ORDER_FALLBACK);
65c3376aa   Christoph Lameter   slub: Fallback to...
1254
  	}
5a896d9e7   Vegard Nossum   slub: add hooks f...
1255

7e0528dad   Christoph Lameter   slub: Push irq di...
1256
1257
1258
1259
1260
  	if (flags & __GFP_WAIT)
  		local_irq_disable();
  
  	if (!page)
  		return NULL;
5a896d9e7   Vegard Nossum   slub: add hooks f...
1261
  	if (kmemcheck_enabled
5086c389c   Amerigo Wang   SLUB: Fix some co...
1262
  		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
b1eeab676   Vegard Nossum   kmemcheck: add ho...
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
  		int pages = 1 << oo_order(oo);
  
  		kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
  
  		/*
  		 * Objects from caches that have a constructor don't get
  		 * cleared when they're allocated, so we need to do it here.
  		 */
  		if (s->ctor)
  			kmemcheck_mark_uninitialized_pages(page, pages);
  		else
  			kmemcheck_mark_unallocated_pages(page, pages);
5a896d9e7   Vegard Nossum   slub: add hooks f...
1275
  	}
834f3d119   Christoph Lameter   slub: Add kmem_ca...
1276
  	page->objects = oo_objects(oo);
81819f0fc   Christoph Lameter   SLUB core
1277
1278
1279
  	mod_zone_page_state(page_zone(page),
  		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
  		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
65c3376aa   Christoph Lameter   slub: Fallback to...
1280
  		1 << oo_order(oo));
81819f0fc   Christoph Lameter   SLUB core
1281
1282
1283
1284
1285
1286
1287
  
  	return page;
  }
  
  static void setup_object(struct kmem_cache *s, struct page *page,
  				void *object)
  {
3ec097421   Christoph Lameter   SLUB: Simplify de...
1288
  	setup_object_debug(s, page, object);
4f1049345   Christoph Lameter   slab allocators: ...
1289
  	if (unlikely(s->ctor))
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
1290
  		s->ctor(object);
81819f0fc   Christoph Lameter   SLUB core
1291
1292
1293
1294
1295
  }
  
  static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
  {
  	struct page *page;
81819f0fc   Christoph Lameter   SLUB core
1296
  	void *start;
81819f0fc   Christoph Lameter   SLUB core
1297
1298
  	void *last;
  	void *p;
6cb062296   Christoph Lameter   Categorize GFP flags
1299
  	BUG_ON(flags & GFP_SLAB_BUG_MASK);
81819f0fc   Christoph Lameter   SLUB core
1300

6cb062296   Christoph Lameter   Categorize GFP flags
1301
1302
  	page = allocate_slab(s,
  		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
81819f0fc   Christoph Lameter   SLUB core
1303
1304
  	if (!page)
  		goto out;
205ab99dd   Christoph Lameter   slub: Update stat...
1305
  	inc_slabs_node(s, page_to_nid(page), page->objects);
81819f0fc   Christoph Lameter   SLUB core
1306
1307
  	page->slab = s;
  	page->flags |= 1 << PG_slab;
81819f0fc   Christoph Lameter   SLUB core
1308
1309
  
  	start = page_address(page);
81819f0fc   Christoph Lameter   SLUB core
1310
1311
  
  	if (unlikely(s->flags & SLAB_POISON))
834f3d119   Christoph Lameter   slub: Add kmem_ca...
1312
  		memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
81819f0fc   Christoph Lameter   SLUB core
1313
1314
  
  	last = start;
224a88be4   Christoph Lameter   slub: for_each_ob...
1315
  	for_each_object(p, s, start, page->objects) {
81819f0fc   Christoph Lameter   SLUB core
1316
1317
1318
1319
1320
  		setup_object(s, page, last);
  		set_freepointer(s, last, p);
  		last = p;
  	}
  	setup_object(s, page, last);
a973e9dd1   Christoph Lameter   Revert "unique en...
1321
  	set_freepointer(s, last, NULL);
81819f0fc   Christoph Lameter   SLUB core
1322
1323
  
  	page->freelist = start;
e6e82ea11   Christoph Lameter   slub: Prepare inu...
1324
  	page->inuse = page->objects;
8cb0a5068   Christoph Lameter   slub: Move page->...
1325
  	page->frozen = 1;
81819f0fc   Christoph Lameter   SLUB core
1326
  out:
81819f0fc   Christoph Lameter   SLUB core
1327
1328
1329
1330
1331
  	return page;
  }
  
  static void __free_slab(struct kmem_cache *s, struct page *page)
  {
834f3d119   Christoph Lameter   slub: Add kmem_ca...
1332
1333
  	int order = compound_order(page);
  	int pages = 1 << order;
81819f0fc   Christoph Lameter   SLUB core
1334

af537b0a6   Christoph Lameter   slub: Use kmem_ca...
1335
  	if (kmem_cache_debug(s)) {
81819f0fc   Christoph Lameter   SLUB core
1336
1337
1338
  		void *p;
  
  		slab_pad_check(s, page);
224a88be4   Christoph Lameter   slub: for_each_ob...
1339
1340
  		for_each_object(p, s, page_address(page),
  						page->objects)
f7cb19336   Christoph Lameter   SLUB: Pass active...
1341
  			check_object(s, page, p, SLUB_RED_INACTIVE);
81819f0fc   Christoph Lameter   SLUB core
1342
  	}
b1eeab676   Vegard Nossum   kmemcheck: add ho...
1343
  	kmemcheck_free_shadow(page, compound_order(page));
5a896d9e7   Vegard Nossum   slub: add hooks f...
1344

81819f0fc   Christoph Lameter   SLUB core
1345
1346
1347
  	mod_zone_page_state(page_zone(page),
  		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
  		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
064287807   Pekka Enberg   SLUB: Fix coding ...
1348
  		-pages);
81819f0fc   Christoph Lameter   SLUB core
1349

49bd5221c   Christoph Lameter   slub: Move map/fl...
1350
1351
  	__ClearPageSlab(page);
  	reset_page_mapcount(page);
1eb5ac646   Nick Piggin   mm: SLUB fix recl...
1352
1353
  	if (current->reclaim_state)
  		current->reclaim_state->reclaimed_slab += pages;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
1354
  	__free_pages(page, order);
81819f0fc   Christoph Lameter   SLUB core
1355
  }
da9a638c6   Lai Jiangshan   slub,rcu: don't a...
1356
1357
  #define need_reserve_slab_rcu						\
  	(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
81819f0fc   Christoph Lameter   SLUB core
1358
1359
1360
  static void rcu_free_slab(struct rcu_head *h)
  {
  	struct page *page;
da9a638c6   Lai Jiangshan   slub,rcu: don't a...
1361
1362
1363
1364
  	if (need_reserve_slab_rcu)
  		page = virt_to_head_page(h);
  	else
  		page = container_of((struct list_head *)h, struct page, lru);
81819f0fc   Christoph Lameter   SLUB core
1365
1366
1367
1368
1369
1370
  	__free_slab(page->slab, page);
  }
  
  static void free_slab(struct kmem_cache *s, struct page *page)
  {
  	if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
da9a638c6   Lai Jiangshan   slub,rcu: don't a...
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
  		struct rcu_head *head;
  
  		if (need_reserve_slab_rcu) {
  			int order = compound_order(page);
  			int offset = (PAGE_SIZE << order) - s->reserved;
  
  			VM_BUG_ON(s->reserved != sizeof(*head));
  			head = page_address(page) + offset;
  		} else {
  			/*
  			 * RCU free overloads the RCU head over the LRU
  			 */
  			head = (void *)&page->lru;
  		}
81819f0fc   Christoph Lameter   SLUB core
1385
1386
1387
1388
1389
1390
1391
1392
  
  		call_rcu(head, rcu_free_slab);
  	} else
  		__free_slab(s, page);
  }
  
  static void discard_slab(struct kmem_cache *s, struct page *page)
  {
205ab99dd   Christoph Lameter   slub: Update stat...
1393
  	dec_slabs_node(s, page_to_nid(page), page->objects);
81819f0fc   Christoph Lameter   SLUB core
1394
1395
1396
1397
  	free_slab(s, page);
  }
  
  /*
5cc6eee8a   Christoph Lameter   slub: explicit li...
1398
1399
1400
   * Management of partially allocated slabs.
   *
   * list_lock must be held.
81819f0fc   Christoph Lameter   SLUB core
1401
   */
5cc6eee8a   Christoph Lameter   slub: explicit li...
1402
  static inline void add_partial(struct kmem_cache_node *n,
7c2e132c5   Christoph Lameter   Add parameter to ...
1403
  				struct page *page, int tail)
81819f0fc   Christoph Lameter   SLUB core
1404
  {
e95eed571   Christoph Lameter   SLUB: Add MIN_PAR...
1405
  	n->nr_partial++;
136333d10   Shaohua Li   slub: explicitly ...
1406
  	if (tail == DEACTIVATE_TO_TAIL)
7c2e132c5   Christoph Lameter   Add parameter to ...
1407
1408
1409
  		list_add_tail(&page->lru, &n->partial);
  	else
  		list_add(&page->lru, &n->partial);
81819f0fc   Christoph Lameter   SLUB core
1410
  }
5cc6eee8a   Christoph Lameter   slub: explicit li...
1411
1412
1413
1414
  /*
   * list_lock must be held.
   */
  static inline void remove_partial(struct kmem_cache_node *n,
62e346a83   Christoph Lameter   slub: extract com...
1415
1416
1417
1418
1419
  					struct page *page)
  {
  	list_del(&page->lru);
  	n->nr_partial--;
  }
81819f0fc   Christoph Lameter   SLUB core
1420
  /*
5cc6eee8a   Christoph Lameter   slub: explicit li...
1421
1422
   * Lock slab, remove from the partial list and put the object into the
   * per cpu freelist.
81819f0fc   Christoph Lameter   SLUB core
1423
   *
497b66f2e   Christoph Lameter   slub: return obje...
1424
1425
   * Returns a list of objects or NULL if it fails.
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
1426
   * Must hold list_lock.
81819f0fc   Christoph Lameter   SLUB core
1427
   */
497b66f2e   Christoph Lameter   slub: return obje...
1428
  static inline void *acquire_slab(struct kmem_cache *s,
acd19fd1a   Christoph Lameter   slub: pass kmem_c...
1429
  		struct kmem_cache_node *n, struct page *page,
49e225858   Christoph Lameter   slub: per cpu cac...
1430
  		int mode)
81819f0fc   Christoph Lameter   SLUB core
1431
  {
2cfb7455d   Christoph Lameter   slub: Rework allo...
1432
1433
1434
  	void *freelist;
  	unsigned long counters;
  	struct page new;
2cfb7455d   Christoph Lameter   slub: Rework allo...
1435
1436
1437
1438
1439
1440
1441
1442
1443
  	/*
  	 * Zap the freelist and set the frozen bit.
  	 * The old freelist is the list of objects for the
  	 * per cpu allocation list.
  	 */
  	do {
  		freelist = page->freelist;
  		counters = page->counters;
  		new.counters = counters;
49e225858   Christoph Lameter   slub: per cpu cac...
1444
1445
  		if (mode)
  			new.inuse = page->objects;
2cfb7455d   Christoph Lameter   slub: Rework allo...
1446
1447
1448
  
  		VM_BUG_ON(new.frozen);
  		new.frozen = 1;
1d07171c5   Christoph Lameter   slub: disable int...
1449
  	} while (!__cmpxchg_double_slab(s, page,
2cfb7455d   Christoph Lameter   slub: Rework allo...
1450
1451
1452
1453
1454
  			freelist, counters,
  			NULL, new.counters,
  			"lock and freeze"));
  
  	remove_partial(n, page);
49e225858   Christoph Lameter   slub: per cpu cac...
1455
  	return freelist;
81819f0fc   Christoph Lameter   SLUB core
1456
  }
49e225858   Christoph Lameter   slub: per cpu cac...
1457
  static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
81819f0fc   Christoph Lameter   SLUB core
1458
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
1459
   * Try to allocate a partial slab from a specific node.
81819f0fc   Christoph Lameter   SLUB core
1460
   */
497b66f2e   Christoph Lameter   slub: return obje...
1461
  static void *get_partial_node(struct kmem_cache *s,
acd19fd1a   Christoph Lameter   slub: pass kmem_c...
1462
  		struct kmem_cache_node *n, struct kmem_cache_cpu *c)
81819f0fc   Christoph Lameter   SLUB core
1463
  {
49e225858   Christoph Lameter   slub: per cpu cac...
1464
1465
  	struct page *page, *page2;
  	void *object = NULL;
81819f0fc   Christoph Lameter   SLUB core
1466
1467
1468
1469
  
  	/*
  	 * Racy check. If we mistakenly see no partial slabs then we
  	 * just allocate an empty slab. If we mistakenly try to get a
672bba3a4   Christoph Lameter   SLUB: update comm...
1470
1471
  	 * partial slab and there is none available then get_partials()
  	 * will return NULL.
81819f0fc   Christoph Lameter   SLUB core
1472
1473
1474
1475
1476
  	 */
  	if (!n || !n->nr_partial)
  		return NULL;
  
  	spin_lock(&n->list_lock);
49e225858   Christoph Lameter   slub: per cpu cac...
1477
  	list_for_each_entry_safe(page, page2, &n->partial, lru) {
12d79634f   Alex,Shi   slub: Code optimi...
1478
  		void *t = acquire_slab(s, n, page, object == NULL);
49e225858   Christoph Lameter   slub: per cpu cac...
1479
1480
1481
1482
  		int available;
  
  		if (!t)
  			break;
12d79634f   Alex,Shi   slub: Code optimi...
1483
  		if (!object) {
49e225858   Christoph Lameter   slub: per cpu cac...
1484
1485
1486
  			c->page = page;
  			c->node = page_to_nid(page);
  			stat(s, ALLOC_FROM_PARTIAL);
49e225858   Christoph Lameter   slub: per cpu cac...
1487
1488
1489
1490
1491
1492
1493
1494
  			object = t;
  			available =  page->objects - page->inuse;
  		} else {
  			page->freelist = t;
  			available = put_cpu_partial(s, page, 0);
  		}
  		if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
  			break;
497b66f2e   Christoph Lameter   slub: return obje...
1495
  	}
81819f0fc   Christoph Lameter   SLUB core
1496
  	spin_unlock(&n->list_lock);
497b66f2e   Christoph Lameter   slub: return obje...
1497
  	return object;
81819f0fc   Christoph Lameter   SLUB core
1498
1499
1500
  }
  
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
1501
   * Get a page from somewhere. Search in increasing NUMA distances.
81819f0fc   Christoph Lameter   SLUB core
1502
   */
acd19fd1a   Christoph Lameter   slub: pass kmem_c...
1503
1504
  static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
  		struct kmem_cache_cpu *c)
81819f0fc   Christoph Lameter   SLUB core
1505
1506
1507
  {
  #ifdef CONFIG_NUMA
  	struct zonelist *zonelist;
dd1a239f6   Mel Gorman   mm: have zonelist...
1508
  	struct zoneref *z;
54a6eb5c4   Mel Gorman   mm: use two zonel...
1509
1510
  	struct zone *zone;
  	enum zone_type high_zoneidx = gfp_zone(flags);
497b66f2e   Christoph Lameter   slub: return obje...
1511
  	void *object;
81819f0fc   Christoph Lameter   SLUB core
1512
1513
  
  	/*
672bba3a4   Christoph Lameter   SLUB: update comm...
1514
1515
1516
1517
  	 * The defrag ratio allows a configuration of the tradeoffs between
  	 * inter node defragmentation and node local allocations. A lower
  	 * defrag_ratio increases the tendency to do local allocations
  	 * instead of attempting to obtain partial slabs from other nodes.
81819f0fc   Christoph Lameter   SLUB core
1518
  	 *
672bba3a4   Christoph Lameter   SLUB: update comm...
1519
1520
1521
1522
  	 * If the defrag_ratio is set to 0 then kmalloc() always
  	 * returns node local objects. If the ratio is higher then kmalloc()
  	 * may return off node objects because partial slabs are obtained
  	 * from other nodes and filled up.
81819f0fc   Christoph Lameter   SLUB core
1523
  	 *
6446faa2f   Christoph Lameter   slub: Fix up comm...
1524
  	 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
672bba3a4   Christoph Lameter   SLUB: update comm...
1525
1526
1527
1528
1529
  	 * defrag_ratio = 1000) then every (well almost) allocation will
  	 * first attempt to defrag slab caches on other nodes. This means
  	 * scanning over all nodes to look for partial slabs which may be
  	 * expensive if we do it every time we are trying to find a slab
  	 * with available objects.
81819f0fc   Christoph Lameter   SLUB core
1530
  	 */
9824601ea   Christoph Lameter   SLUB: rename defr...
1531
1532
  	if (!s->remote_node_defrag_ratio ||
  			get_cycles() % 1024 > s->remote_node_defrag_ratio)
81819f0fc   Christoph Lameter   SLUB core
1533
  		return NULL;
c0ff7453b   Miao Xie   cpuset,mm: fix no...
1534
  	get_mems_allowed();
0e88460da   Mel Gorman   mm: introduce nod...
1535
  	zonelist = node_zonelist(slab_node(current->mempolicy), flags);
54a6eb5c4   Mel Gorman   mm: use two zonel...
1536
  	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
81819f0fc   Christoph Lameter   SLUB core
1537
  		struct kmem_cache_node *n;
54a6eb5c4   Mel Gorman   mm: use two zonel...
1538
  		n = get_node(s, zone_to_nid(zone));
81819f0fc   Christoph Lameter   SLUB core
1539

54a6eb5c4   Mel Gorman   mm: use two zonel...
1540
  		if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
3b89d7d88   David Rientjes   slub: move min_pa...
1541
  				n->nr_partial > s->min_partial) {
497b66f2e   Christoph Lameter   slub: return obje...
1542
1543
  			object = get_partial_node(s, n, c);
  			if (object) {
c0ff7453b   Miao Xie   cpuset,mm: fix no...
1544
  				put_mems_allowed();
497b66f2e   Christoph Lameter   slub: return obje...
1545
  				return object;
c0ff7453b   Miao Xie   cpuset,mm: fix no...
1546
  			}
81819f0fc   Christoph Lameter   SLUB core
1547
1548
  		}
  	}
c0ff7453b   Miao Xie   cpuset,mm: fix no...
1549
  	put_mems_allowed();
81819f0fc   Christoph Lameter   SLUB core
1550
1551
1552
1553
1554
1555
1556
  #endif
  	return NULL;
  }
  
  /*
   * Get a partial page, lock it and return it.
   */
497b66f2e   Christoph Lameter   slub: return obje...
1557
  static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
acd19fd1a   Christoph Lameter   slub: pass kmem_c...
1558
  		struct kmem_cache_cpu *c)
81819f0fc   Christoph Lameter   SLUB core
1559
  {
497b66f2e   Christoph Lameter   slub: return obje...
1560
  	void *object;
2154a3363   Christoph Lameter   slub: Use a const...
1561
  	int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
81819f0fc   Christoph Lameter   SLUB core
1562

497b66f2e   Christoph Lameter   slub: return obje...
1563
1564
1565
  	object = get_partial_node(s, get_node(s, searchnode), c);
  	if (object || node != NUMA_NO_NODE)
  		return object;
81819f0fc   Christoph Lameter   SLUB core
1566

acd19fd1a   Christoph Lameter   slub: pass kmem_c...
1567
  	return get_any_partial(s, flags, c);
81819f0fc   Christoph Lameter   SLUB core
1568
  }
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
  #ifdef CONFIG_PREEMPT
  /*
   * Calculate the next globally unique transaction for disambiguiation
   * during cmpxchg. The transactions start with the cpu number and are then
   * incremented by CONFIG_NR_CPUS.
   */
  #define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
  #else
  /*
   * No preemption supported therefore also no need to check for
   * different cpus.
   */
  #define TID_STEP 1
  #endif
  
  static inline unsigned long next_tid(unsigned long tid)
  {
  	return tid + TID_STEP;
  }
  
  static inline unsigned int tid_to_cpu(unsigned long tid)
  {
  	return tid % TID_STEP;
  }
  
  static inline unsigned long tid_to_event(unsigned long tid)
  {
  	return tid / TID_STEP;
  }
  
  static inline unsigned int init_tid(int cpu)
  {
  	return cpu;
  }
  
  static inline void note_cmpxchg_failure(const char *n,
  		const struct kmem_cache *s, unsigned long tid)
  {
  #ifdef SLUB_DEBUG_CMPXCHG
  	unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
  
  	printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name);
  
  #ifdef CONFIG_PREEMPT
  	if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
  		printk("due to cpu change %d -> %d
  ",
  			tid_to_cpu(tid), tid_to_cpu(actual_tid));
  	else
  #endif
  	if (tid_to_event(tid) != tid_to_event(actual_tid))
  		printk("due to cpu running other code. Event %ld->%ld
  ",
  			tid_to_event(tid), tid_to_event(actual_tid));
  	else
  		printk("for unknown reason: actual=%lx was=%lx target=%lx
  ",
  			actual_tid, tid, next_tid(tid));
  #endif
4fdccdfbb   Christoph Lameter   slub: Add statist...
1628
  	stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
1629
  }
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
1630
1631
  void init_kmem_cache_cpus(struct kmem_cache *s)
  {
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
1632
1633
1634
1635
  	int cpu;
  
  	for_each_possible_cpu(cpu)
  		per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
1636
  }
2cfb7455d   Christoph Lameter   slub: Rework allo...
1637
1638
1639
1640
  
  /*
   * Remove the cpu slab
   */
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1641
  static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
81819f0fc   Christoph Lameter   SLUB core
1642
  {
2cfb7455d   Christoph Lameter   slub: Rework allo...
1643
  	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1644
  	struct page *page = c->page;
2cfb7455d   Christoph Lameter   slub: Rework allo...
1645
1646
1647
1648
1649
  	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
  	int lock = 0;
  	enum slab_modes l = M_NONE, m = M_NONE;
  	void *freelist;
  	void *nextfree;
136333d10   Shaohua Li   slub: explicitly ...
1650
  	int tail = DEACTIVATE_TO_HEAD;
2cfb7455d   Christoph Lameter   slub: Rework allo...
1651
1652
1653
1654
  	struct page new;
  	struct page old;
  
  	if (page->freelist) {
84e554e68   Christoph Lameter   SLUB: Make slub s...
1655
  		stat(s, DEACTIVATE_REMOTE_FREES);
136333d10   Shaohua Li   slub: explicitly ...
1656
  		tail = DEACTIVATE_TO_TAIL;
2cfb7455d   Christoph Lameter   slub: Rework allo...
1657
1658
1659
1660
1661
1662
  	}
  
  	c->tid = next_tid(c->tid);
  	c->page = NULL;
  	freelist = c->freelist;
  	c->freelist = NULL;
894b8788d   Christoph Lameter   slub: support con...
1663
  	/*
2cfb7455d   Christoph Lameter   slub: Rework allo...
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
  	 * Stage one: Free all available per cpu objects back
  	 * to the page freelist while it is still frozen. Leave the
  	 * last one.
  	 *
  	 * There is no need to take the list->lock because the page
  	 * is still frozen.
  	 */
  	while (freelist && (nextfree = get_freepointer(s, freelist))) {
  		void *prior;
  		unsigned long counters;
  
  		do {
  			prior = page->freelist;
  			counters = page->counters;
  			set_freepointer(s, freelist, prior);
  			new.counters = counters;
  			new.inuse--;
  			VM_BUG_ON(!new.frozen);
1d07171c5   Christoph Lameter   slub: disable int...
1682
  		} while (!__cmpxchg_double_slab(s, page,
2cfb7455d   Christoph Lameter   slub: Rework allo...
1683
1684
1685
1686
1687
1688
  			prior, counters,
  			freelist, new.counters,
  			"drain percpu freelist"));
  
  		freelist = nextfree;
  	}
894b8788d   Christoph Lameter   slub: support con...
1689
  	/*
2cfb7455d   Christoph Lameter   slub: Rework allo...
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
  	 * Stage two: Ensure that the page is unfrozen while the
  	 * list presence reflects the actual number of objects
  	 * during unfreeze.
  	 *
  	 * We setup the list membership and then perform a cmpxchg
  	 * with the count. If there is a mismatch then the page
  	 * is not unfrozen but the page is on the wrong list.
  	 *
  	 * Then we restart the process which may have to remove
  	 * the page from the list that we just put it on again
  	 * because the number of objects in the slab may have
  	 * changed.
894b8788d   Christoph Lameter   slub: support con...
1702
  	 */
2cfb7455d   Christoph Lameter   slub: Rework allo...
1703
  redo:
894b8788d   Christoph Lameter   slub: support con...
1704

2cfb7455d   Christoph Lameter   slub: Rework allo...
1705
1706
1707
  	old.freelist = page->freelist;
  	old.counters = page->counters;
  	VM_BUG_ON(!old.frozen);
7c2e132c5   Christoph Lameter   Add parameter to ...
1708

2cfb7455d   Christoph Lameter   slub: Rework allo...
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
  	/* Determine target state of the slab */
  	new.counters = old.counters;
  	if (freelist) {
  		new.inuse--;
  		set_freepointer(s, freelist, old.freelist);
  		new.freelist = freelist;
  	} else
  		new.freelist = old.freelist;
  
  	new.frozen = 0;
81107188f   Christoph Lameter   slub: Fix partial...
1719
  	if (!new.inuse && n->nr_partial > s->min_partial)
2cfb7455d   Christoph Lameter   slub: Rework allo...
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
  		m = M_FREE;
  	else if (new.freelist) {
  		m = M_PARTIAL;
  		if (!lock) {
  			lock = 1;
  			/*
  			 * Taking the spinlock removes the possiblity
  			 * that acquire_slab() will see a slab page that
  			 * is frozen
  			 */
  			spin_lock(&n->list_lock);
  		}
  	} else {
  		m = M_FULL;
  		if (kmem_cache_debug(s) && !lock) {
  			lock = 1;
  			/*
  			 * This also ensures that the scanning of full
  			 * slabs from diagnostic functions will not see
  			 * any frozen slabs.
  			 */
  			spin_lock(&n->list_lock);
  		}
  	}
  
  	if (l != m) {
  
  		if (l == M_PARTIAL)
  
  			remove_partial(n, page);
  
  		else if (l == M_FULL)
894b8788d   Christoph Lameter   slub: support con...
1752

2cfb7455d   Christoph Lameter   slub: Rework allo...
1753
1754
1755
1756
1757
  			remove_full(s, page);
  
  		if (m == M_PARTIAL) {
  
  			add_partial(n, page, tail);
136333d10   Shaohua Li   slub: explicitly ...
1758
  			stat(s, tail);
2cfb7455d   Christoph Lameter   slub: Rework allo...
1759
1760
  
  		} else if (m == M_FULL) {
894b8788d   Christoph Lameter   slub: support con...
1761

2cfb7455d   Christoph Lameter   slub: Rework allo...
1762
1763
1764
1765
1766
1767
1768
  			stat(s, DEACTIVATE_FULL);
  			add_full(s, n, page);
  
  		}
  	}
  
  	l = m;
1d07171c5   Christoph Lameter   slub: disable int...
1769
  	if (!__cmpxchg_double_slab(s, page,
2cfb7455d   Christoph Lameter   slub: Rework allo...
1770
1771
1772
1773
  				old.freelist, old.counters,
  				new.freelist, new.counters,
  				"unfreezing slab"))
  		goto redo;
2cfb7455d   Christoph Lameter   slub: Rework allo...
1774
1775
1776
1777
1778
1779
1780
  	if (lock)
  		spin_unlock(&n->list_lock);
  
  	if (m == M_FREE) {
  		stat(s, DEACTIVATE_EMPTY);
  		discard_slab(s, page);
  		stat(s, FREE_SLAB);
894b8788d   Christoph Lameter   slub: support con...
1781
  	}
81819f0fc   Christoph Lameter   SLUB core
1782
  }
49e225858   Christoph Lameter   slub: per cpu cac...
1783
1784
1785
1786
1787
  /* Unfreeze all the cpu partial slabs */
  static void unfreeze_partials(struct kmem_cache *s)
  {
  	struct kmem_cache_node *n = NULL;
  	struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
9ada19342   Shaohua Li   slub: move discar...
1788
  	struct page *page, *discard_page = NULL;
49e225858   Christoph Lameter   slub: per cpu cac...
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
  
  	while ((page = c->partial)) {
  		enum slab_modes { M_PARTIAL, M_FREE };
  		enum slab_modes l, m;
  		struct page new;
  		struct page old;
  
  		c->partial = page->next;
  		l = M_FREE;
  
  		do {
  
  			old.freelist = page->freelist;
  			old.counters = page->counters;
  			VM_BUG_ON(!old.frozen);
  
  			new.counters = old.counters;
  			new.freelist = old.freelist;
  
  			new.frozen = 0;
dcc3be6a5   Alex Shi   slub: Discard sla...
1809
  			if (!new.inuse && (!n || n->nr_partial > s->min_partial))
49e225858   Christoph Lameter   slub: per cpu cac...
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
  				m = M_FREE;
  			else {
  				struct kmem_cache_node *n2 = get_node(s,
  							page_to_nid(page));
  
  				m = M_PARTIAL;
  				if (n != n2) {
  					if (n)
  						spin_unlock(&n->list_lock);
  
  					n = n2;
  					spin_lock(&n->list_lock);
  				}
  			}
  
  			if (l != m) {
4c493a5a5   Shaohua Li   slub: add missed ...
1826
  				if (l == M_PARTIAL) {
49e225858   Christoph Lameter   slub: per cpu cac...
1827
  					remove_partial(n, page);
4c493a5a5   Shaohua Li   slub: add missed ...
1828
1829
  					stat(s, FREE_REMOVE_PARTIAL);
  				} else {
f64ae042d   Shaohua Li   slub: use correct...
1830
1831
  					add_partial(n, page,
  						DEACTIVATE_TO_TAIL);
4c493a5a5   Shaohua Li   slub: add missed ...
1832
1833
  					stat(s, FREE_ADD_PARTIAL);
  				}
49e225858   Christoph Lameter   slub: per cpu cac...
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
  
  				l = m;
  			}
  
  		} while (!cmpxchg_double_slab(s, page,
  				old.freelist, old.counters,
  				new.freelist, new.counters,
  				"unfreezing slab"));
  
  		if (m == M_FREE) {
9ada19342   Shaohua Li   slub: move discar...
1844
1845
  			page->next = discard_page;
  			discard_page = page;
49e225858   Christoph Lameter   slub: per cpu cac...
1846
1847
1848
1849
1850
  		}
  	}
  
  	if (n)
  		spin_unlock(&n->list_lock);
9ada19342   Shaohua Li   slub: move discar...
1851
1852
1853
1854
1855
1856
1857
1858
1859
  
  	while (discard_page) {
  		page = discard_page;
  		discard_page = discard_page->next;
  
  		stat(s, DEACTIVATE_EMPTY);
  		discard_slab(s, page);
  		stat(s, FREE_SLAB);
  	}
49e225858   Christoph Lameter   slub: per cpu cac...
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
  }
  
  /*
   * Put a page that was just frozen (in __slab_free) into a partial page
   * slot if available. This is done without interrupts disabled and without
   * preemption disabled. The cmpxchg is racy and may put the partial page
   * onto a random cpus partial slot.
   *
   * If we did not find a slot then simply move all the partials to the
   * per node partial list.
   */
  int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
  {
  	struct page *oldpage;
  	int pages;
  	int pobjects;
  
  	do {
  		pages = 0;
  		pobjects = 0;
  		oldpage = this_cpu_read(s->cpu_slab->partial);
  
  		if (oldpage) {
  			pobjects = oldpage->pobjects;
  			pages = oldpage->pages;
  			if (drain && pobjects > s->cpu_partial) {
  				unsigned long flags;
  				/*
  				 * partial array is full. Move the existing
  				 * set to the per node partial list.
  				 */
  				local_irq_save(flags);
  				unfreeze_partials(s);
  				local_irq_restore(flags);
  				pobjects = 0;
  				pages = 0;
  			}
  		}
  
  		pages++;
  		pobjects += page->objects - page->inuse;
  
  		page->pages = pages;
  		page->pobjects = pobjects;
  		page->next = oldpage;
933393f58   Christoph Lameter   percpu: Remove ir...
1905
  	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
49e225858   Christoph Lameter   slub: per cpu cac...
1906
1907
1908
  	stat(s, CPU_PARTIAL_FREE);
  	return pobjects;
  }
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1909
  static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
81819f0fc   Christoph Lameter   SLUB core
1910
  {
84e554e68   Christoph Lameter   SLUB: Make slub s...
1911
  	stat(s, CPUSLAB_FLUSH);
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1912
  	deactivate_slab(s, c);
81819f0fc   Christoph Lameter   SLUB core
1913
1914
1915
1916
  }
  
  /*
   * Flush cpu slab.
6446faa2f   Christoph Lameter   slub: Fix up comm...
1917
   *
81819f0fc   Christoph Lameter   SLUB core
1918
1919
   * Called from IPI handler with interrupts disabled.
   */
0c7100132   Christoph Lameter   SLUB: add some mo...
1920
  static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
81819f0fc   Christoph Lameter   SLUB core
1921
  {
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
1922
  	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
81819f0fc   Christoph Lameter   SLUB core
1923

49e225858   Christoph Lameter   slub: per cpu cac...
1924
1925
1926
1927
1928
1929
  	if (likely(c)) {
  		if (c->page)
  			flush_slab(s, c);
  
  		unfreeze_partials(s);
  	}
81819f0fc   Christoph Lameter   SLUB core
1930
1931
1932
1933
1934
  }
  
  static void flush_cpu_slab(void *d)
  {
  	struct kmem_cache *s = d;
81819f0fc   Christoph Lameter   SLUB core
1935

dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1936
  	__flush_cpu_slab(s, smp_processor_id());
81819f0fc   Christoph Lameter   SLUB core
1937
1938
1939
1940
  }
  
  static void flush_all(struct kmem_cache *s)
  {
15c8b6c1a   Jens Axboe   on_each_cpu(): ki...
1941
  	on_each_cpu(flush_cpu_slab, s, 1);
81819f0fc   Christoph Lameter   SLUB core
1942
1943
1944
  }
  
  /*
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1945
1946
1947
1948
1949
1950
   * Check if the objects in a per cpu structure fit numa
   * locality expectations.
   */
  static inline int node_match(struct kmem_cache_cpu *c, int node)
  {
  #ifdef CONFIG_NUMA
2154a3363   Christoph Lameter   slub: Use a const...
1951
  	if (node != NUMA_NO_NODE && c->node != node)
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1952
1953
1954
1955
  		return 0;
  #endif
  	return 1;
  }
781b2ba6e   Pekka Enberg   SLUB: Out-of-memo...
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
  static int count_free(struct page *page)
  {
  	return page->objects - page->inuse;
  }
  
  static unsigned long count_partial(struct kmem_cache_node *n,
  					int (*get_count)(struct page *))
  {
  	unsigned long flags;
  	unsigned long x = 0;
  	struct page *page;
  
  	spin_lock_irqsave(&n->list_lock, flags);
  	list_for_each_entry(page, &n->partial, lru)
  		x += get_count(page);
  	spin_unlock_irqrestore(&n->list_lock, flags);
  	return x;
  }
26c02cf05   Alexander Beregalov   SLUB: fix build w...
1974
1975
1976
1977
1978
1979
1980
1981
  static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
  {
  #ifdef CONFIG_SLUB_DEBUG
  	return atomic_long_read(&n->total_objects);
  #else
  	return 0;
  #endif
  }
781b2ba6e   Pekka Enberg   SLUB: Out-of-memo...
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
  static noinline void
  slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
  {
  	int node;
  
  	printk(KERN_WARNING
  		"SLUB: Unable to allocate memory on node %d (gfp=0x%x)
  ",
  		nid, gfpflags);
  	printk(KERN_WARNING "  cache: %s, object size: %d, buffer size: %d, "
  		"default order: %d, min order: %d
  ", s->name, s->objsize,
  		s->size, oo_order(s->oo), oo_order(s->min));
fa5ec8a1f   David Rientjes   slub: add option ...
1995
1996
1997
1998
  	if (oo_order(s->min) > get_order(s->objsize))
  		printk(KERN_WARNING "  %s debugging increased min order, use "
  		       "slub_debug=O to disable.
  ", s->name);
781b2ba6e   Pekka Enberg   SLUB: Out-of-memo...
1999
2000
2001
2002
2003
2004
2005
2006
  	for_each_online_node(node) {
  		struct kmem_cache_node *n = get_node(s, node);
  		unsigned long nr_slabs;
  		unsigned long nr_objs;
  		unsigned long nr_free;
  
  		if (!n)
  			continue;
26c02cf05   Alexander Beregalov   SLUB: fix build w...
2007
2008
2009
  		nr_free  = count_partial(n, count_free);
  		nr_slabs = node_nr_slabs(n);
  		nr_objs  = node_nr_objs(n);
781b2ba6e   Pekka Enberg   SLUB: Out-of-memo...
2010
2011
2012
2013
2014
2015
2016
  
  		printk(KERN_WARNING
  			"  node %d: slabs: %ld, objs: %ld, free: %ld
  ",
  			node, nr_slabs, nr_objs, nr_free);
  	}
  }
497b66f2e   Christoph Lameter   slub: return obje...
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
  static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
  			int node, struct kmem_cache_cpu **pc)
  {
  	void *object;
  	struct kmem_cache_cpu *c;
  	struct page *page = new_slab(s, flags, node);
  
  	if (page) {
  		c = __this_cpu_ptr(s->cpu_slab);
  		if (c->page)
  			flush_slab(s, c);
  
  		/*
  		 * No other reference to the page yet so we can
  		 * muck around with it freely without cmpxchg
  		 */
  		object = page->freelist;
  		page->freelist = NULL;
  
  		stat(s, ALLOC_SLAB);
  		c->node = page_to_nid(page);
  		c->page = page;
  		*pc = c;
  	} else
  		object = NULL;
  
  	return object;
  }
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
2045
  /*
213eeb9fd   Christoph Lameter   slub: Extract get...
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
   * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
   * or deactivate the page.
   *
   * The page is still frozen if the return value is not NULL.
   *
   * If this function returns NULL then the page has been unfrozen.
   */
  static inline void *get_freelist(struct kmem_cache *s, struct page *page)
  {
  	struct page new;
  	unsigned long counters;
  	void *freelist;
  
  	do {
  		freelist = page->freelist;
  		counters = page->counters;
  		new.counters = counters;
  		VM_BUG_ON(!new.frozen);
  
  		new.inuse = page->objects;
  		new.frozen = freelist != NULL;
  
  	} while (!cmpxchg_double_slab(s, page,
  		freelist, counters,
  		NULL, new.counters,
  		"get_freelist"));
  
  	return freelist;
  }
  
  /*
894b8788d   Christoph Lameter   slub: support con...
2077
2078
2079
   * Slow path. The lockless freelist is empty or we need to perform
   * debugging duties.
   *
894b8788d   Christoph Lameter   slub: support con...
2080
2081
2082
   * Processing is still very fast if new objects have been freed to the
   * regular freelist. In that case we simply take over the regular freelist
   * as the lockless freelist and zap the regular freelist.
81819f0fc   Christoph Lameter   SLUB core
2083
   *
894b8788d   Christoph Lameter   slub: support con...
2084
2085
2086
   * If that is not working then we fall back to the partial lists. We take the
   * first element of the freelist as the object to allocate now and move the
   * rest of the freelist to the lockless freelist.
81819f0fc   Christoph Lameter   SLUB core
2087
   *
894b8788d   Christoph Lameter   slub: support con...
2088
   * And if we were unable to get a new slab from the partial slab lists then
6446faa2f   Christoph Lameter   slub: Fix up comm...
2089
2090
   * we need to allocate a new slab. This is the slowest path since it involves
   * a call to the page allocator and the setup of a new slab.
81819f0fc   Christoph Lameter   SLUB core
2091
   */
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
2092
2093
  static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
  			  unsigned long addr, struct kmem_cache_cpu *c)
81819f0fc   Christoph Lameter   SLUB core
2094
  {
81819f0fc   Christoph Lameter   SLUB core
2095
  	void **object;
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
  	unsigned long flags;
  
  	local_irq_save(flags);
  #ifdef CONFIG_PREEMPT
  	/*
  	 * We may have been preempted and rescheduled on a different
  	 * cpu before disabling interrupts. Need to reload cpu area
  	 * pointer.
  	 */
  	c = this_cpu_ptr(s->cpu_slab);
  #endif
81819f0fc   Christoph Lameter   SLUB core
2107

497b66f2e   Christoph Lameter   slub: return obje...
2108
  	if (!c->page)
81819f0fc   Christoph Lameter   SLUB core
2109
  		goto new_slab;
49e225858   Christoph Lameter   slub: per cpu cac...
2110
  redo:
fc59c0530   Christoph Lameter   slub: Get rid of ...
2111
  	if (unlikely(!node_match(c, node))) {
e36a2652d   Christoph Lameter   slub: Add statist...
2112
  		stat(s, ALLOC_NODE_MISMATCH);
fc59c0530   Christoph Lameter   slub: Get rid of ...
2113
2114
2115
  		deactivate_slab(s, c);
  		goto new_slab;
  	}
6446faa2f   Christoph Lameter   slub: Fix up comm...
2116

73736e038   Eric Dumazet   slub: fix a possi...
2117
2118
2119
2120
  	/* must check again c->freelist in case of cpu migration or IRQ */
  	object = c->freelist;
  	if (object)
  		goto load_freelist;
03e404af2   Christoph Lameter   slub: fast releas...
2121

2cfb7455d   Christoph Lameter   slub: Rework allo...
2122
  	stat(s, ALLOC_SLOWPATH);
03e404af2   Christoph Lameter   slub: fast releas...
2123

213eeb9fd   Christoph Lameter   slub: Extract get...
2124
  	object = get_freelist(s, c->page);
6446faa2f   Christoph Lameter   slub: Fix up comm...
2125

49e225858   Christoph Lameter   slub: per cpu cac...
2126
  	if (!object) {
03e404af2   Christoph Lameter   slub: fast releas...
2127
2128
  		c->page = NULL;
  		stat(s, DEACTIVATE_BYPASS);
fc59c0530   Christoph Lameter   slub: Get rid of ...
2129
  		goto new_slab;
03e404af2   Christoph Lameter   slub: fast releas...
2130
  	}
6446faa2f   Christoph Lameter   slub: Fix up comm...
2131

84e554e68   Christoph Lameter   SLUB: Make slub s...
2132
  	stat(s, ALLOC_REFILL);
6446faa2f   Christoph Lameter   slub: Fix up comm...
2133

894b8788d   Christoph Lameter   slub: support con...
2134
  load_freelist:
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
2135
  	c->freelist = get_freepointer(s, object);
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2136
2137
  	c->tid = next_tid(c->tid);
  	local_irq_restore(flags);
81819f0fc   Christoph Lameter   SLUB core
2138
  	return object;
81819f0fc   Christoph Lameter   SLUB core
2139
  new_slab:
2cfb7455d   Christoph Lameter   slub: Rework allo...
2140

49e225858   Christoph Lameter   slub: per cpu cac...
2141
2142
2143
2144
2145
2146
2147
  	if (c->partial) {
  		c->page = c->partial;
  		c->partial = c->page->next;
  		c->node = page_to_nid(c->page);
  		stat(s, CPU_PARTIAL_ALLOC);
  		c->freelist = NULL;
  		goto redo;
81819f0fc   Christoph Lameter   SLUB core
2148
  	}
49e225858   Christoph Lameter   slub: per cpu cac...
2149
  	/* Then do expensive stuff like retrieving pages from the partial lists */
497b66f2e   Christoph Lameter   slub: return obje...
2150
  	object = get_partial(s, gfpflags, node, c);
b811c202a   Christoph Lameter   SLUB: simplify IR...
2151

497b66f2e   Christoph Lameter   slub: return obje...
2152
  	if (unlikely(!object)) {
01ad8a7bc   Christoph Lameter   slub: Eliminate r...
2153

497b66f2e   Christoph Lameter   slub: return obje...
2154
  		object = new_slab_objects(s, gfpflags, node, &c);
2cfb7455d   Christoph Lameter   slub: Rework allo...
2155

497b66f2e   Christoph Lameter   slub: return obje...
2156
2157
2158
  		if (unlikely(!object)) {
  			if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
  				slab_out_of_memory(s, gfpflags, node);
9e577e8b4   Christoph Lameter   slub: When alloca...
2159

497b66f2e   Christoph Lameter   slub: return obje...
2160
2161
2162
  			local_irq_restore(flags);
  			return NULL;
  		}
81819f0fc   Christoph Lameter   SLUB core
2163
  	}
2cfb7455d   Christoph Lameter   slub: Rework allo...
2164

497b66f2e   Christoph Lameter   slub: return obje...
2165
  	if (likely(!kmem_cache_debug(s)))
4b6f07504   Christoph Lameter   SLUB: Define func...
2166
  		goto load_freelist;
2cfb7455d   Christoph Lameter   slub: Rework allo...
2167

497b66f2e   Christoph Lameter   slub: return obje...
2168
2169
2170
  	/* Only entered in the debug case */
  	if (!alloc_debug_processing(s, c->page, object, addr))
  		goto new_slab;	/* Slab failed checks. Next slab needed */
894b8788d   Christoph Lameter   slub: support con...
2171

2cfb7455d   Christoph Lameter   slub: Rework allo...
2172
  	c->freelist = get_freepointer(s, object);
442b06bce   Christoph Lameter   slub: Remove node...
2173
  	deactivate_slab(s, c);
15b7c5142   Pekka Enberg   SLUB: Optimize sl...
2174
  	c->node = NUMA_NO_NODE;
a71ae47a2   Christoph Lameter   slub: Fix double ...
2175
2176
  	local_irq_restore(flags);
  	return object;
894b8788d   Christoph Lameter   slub: support con...
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
  }
  
  /*
   * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
   * have the fastpath folded into their functions. So no function call
   * overhead for requests that can be satisfied on the fastpath.
   *
   * The fastpath works by first checking if the lockless freelist can be used.
   * If not then __slab_alloc is called for slow processing.
   *
   * Otherwise we can simply pick the next object from the lockless free list.
   */
064287807   Pekka Enberg   SLUB: Fix coding ...
2189
  static __always_inline void *slab_alloc(struct kmem_cache *s,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
2190
  		gfp_t gfpflags, int node, unsigned long addr)
894b8788d   Christoph Lameter   slub: support con...
2191
  {
894b8788d   Christoph Lameter   slub: support con...
2192
  	void **object;
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
2193
  	struct kmem_cache_cpu *c;
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2194
  	unsigned long tid;
1f84260c8   Christoph Lameter   SLUB: Alternate f...
2195

c016b0bde   Christoph Lameter   slub: Extract hoo...
2196
  	if (slab_pre_alloc_hook(s, gfpflags))
773ff60e8   Akinobu Mita   SLUB: failslab su...
2197
  		return NULL;
1f84260c8   Christoph Lameter   SLUB: Alternate f...
2198

8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2199
  redo:
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2200
2201
2202
2203
2204
2205
2206
  
  	/*
  	 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
  	 * enabled. We may switch back and forth between cpus while
  	 * reading from one cpu area. That does not matter as long
  	 * as we end up on the original cpu again when doing the cmpxchg.
  	 */
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
2207
  	c = __this_cpu_ptr(s->cpu_slab);
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2208

8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2209
2210
2211
2212
2213
2214
2215
2216
  	/*
  	 * The transaction ids are globally unique per cpu and per operation on
  	 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
  	 * occurs on the right processor and that there was no operation on the
  	 * linked list in between.
  	 */
  	tid = c->tid;
  	barrier();
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2217

9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
2218
  	object = c->freelist;
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
2219
  	if (unlikely(!object || !node_match(c, node)))
894b8788d   Christoph Lameter   slub: support con...
2220

dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
2221
  		object = __slab_alloc(s, gfpflags, node, addr, c);
894b8788d   Christoph Lameter   slub: support con...
2222
2223
  
  	else {
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2224
  		/*
25985edce   Lucas De Marchi   Fix common misspe...
2225
  		 * The cmpxchg will only match if there was no additional
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
  		 * operation and if we are on the right processor.
  		 *
  		 * The cmpxchg does the following atomically (without lock semantics!)
  		 * 1. Relocate first pointer to the current per cpu area.
  		 * 2. Verify that tid and freelist have not been changed
  		 * 3. If they were not changed replace tid and freelist
  		 *
  		 * Since this is without lock semantics the protection is only against
  		 * code executing on this cpu *not* from access by other cpus.
  		 */
933393f58   Christoph Lameter   percpu: Remove ir...
2236
  		if (unlikely(!this_cpu_cmpxchg_double(
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2237
2238
  				s->cpu_slab->freelist, s->cpu_slab->tid,
  				object, tid,
1393d9a18   Christoph Lameter   slub: Make CONFIG...
2239
  				get_freepointer_safe(s, object), next_tid(tid)))) {
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2240
2241
2242
2243
  
  			note_cmpxchg_failure("slab_alloc", s, tid);
  			goto redo;
  		}
84e554e68   Christoph Lameter   SLUB: Make slub s...
2244
  		stat(s, ALLOC_FASTPATH);
894b8788d   Christoph Lameter   slub: support con...
2245
  	}
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2246

74e2134ff   Pekka Enberg   SLUB: Fix __GFP_Z...
2247
  	if (unlikely(gfpflags & __GFP_ZERO) && object)
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
2248
  		memset(object, 0, s->objsize);
d07dbea46   Christoph Lameter   Slab allocators: ...
2249

c016b0bde   Christoph Lameter   slub: Extract hoo...
2250
  	slab_post_alloc_hook(s, gfpflags, object);
5a896d9e7   Vegard Nossum   slub: add hooks f...
2251

894b8788d   Christoph Lameter   slub: support con...
2252
  	return object;
81819f0fc   Christoph Lameter   SLUB core
2253
2254
2255
2256
  }
  
  void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
  {
2154a3363   Christoph Lameter   slub: Use a const...
2257
  	void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2258

ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
2259
  	trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2260
2261
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
2262
2263
  }
  EXPORT_SYMBOL(kmem_cache_alloc);
0f24f1287   Li Zefan   tracing, slab: De...
2264
  #ifdef CONFIG_TRACING
4a92379bd   Richard Kennedy   slub tracing: mov...
2265
2266
2267
2268
2269
2270
2271
2272
2273
  void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
  {
  	void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
  	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
  	return ret;
  }
  EXPORT_SYMBOL(kmem_cache_alloc_trace);
  
  void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2274
  {
4a92379bd   Richard Kennedy   slub tracing: mov...
2275
2276
2277
  	void *ret = kmalloc_order(size, flags, order);
  	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
  	return ret;
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2278
  }
4a92379bd   Richard Kennedy   slub tracing: mov...
2279
  EXPORT_SYMBOL(kmalloc_order_trace);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2280
  #endif
81819f0fc   Christoph Lameter   SLUB core
2281
2282
2283
  #ifdef CONFIG_NUMA
  void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
  {
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2284
  	void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
2285
2286
  	trace_kmem_cache_alloc_node(_RET_IP_, ret,
  				    s->objsize, s->size, gfpflags, node);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2287
2288
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
2289
2290
  }
  EXPORT_SYMBOL(kmem_cache_alloc_node);
81819f0fc   Christoph Lameter   SLUB core
2291

0f24f1287   Li Zefan   tracing, slab: De...
2292
  #ifdef CONFIG_TRACING
4a92379bd   Richard Kennedy   slub tracing: mov...
2293
  void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2294
  				    gfp_t gfpflags,
4a92379bd   Richard Kennedy   slub tracing: mov...
2295
  				    int node, size_t size)
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2296
  {
4a92379bd   Richard Kennedy   slub tracing: mov...
2297
2298
2299
2300
2301
  	void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
  
  	trace_kmalloc_node(_RET_IP_, ret,
  			   size, s->size, gfpflags, node);
  	return ret;
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2302
  }
4a92379bd   Richard Kennedy   slub tracing: mov...
2303
  EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2304
  #endif
5d1f57e4d   Namhyung Kim   slub: Move NUMA-r...
2305
  #endif
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2306

81819f0fc   Christoph Lameter   SLUB core
2307
  /*
894b8788d   Christoph Lameter   slub: support con...
2308
2309
   * Slow patch handling. This may still be called frequently since objects
   * have a longer lifetime than the cpu slabs in most processing loads.
81819f0fc   Christoph Lameter   SLUB core
2310
   *
894b8788d   Christoph Lameter   slub: support con...
2311
2312
2313
   * So we still attempt to reduce cache line usage. Just take the slab
   * lock and free the item. If there is no additional partial page
   * handling required then we can return immediately.
81819f0fc   Christoph Lameter   SLUB core
2314
   */
894b8788d   Christoph Lameter   slub: support con...
2315
  static void __slab_free(struct kmem_cache *s, struct page *page,
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
2316
  			void *x, unsigned long addr)
81819f0fc   Christoph Lameter   SLUB core
2317
2318
2319
  {
  	void *prior;
  	void **object = (void *)x;
2cfb7455d   Christoph Lameter   slub: Rework allo...
2320
2321
2322
2323
2324
  	int was_frozen;
  	int inuse;
  	struct page new;
  	unsigned long counters;
  	struct kmem_cache_node *n = NULL;
61728d1ef   Christoph Lameter   slub: Pass kmem_c...
2325
  	unsigned long uninitialized_var(flags);
81819f0fc   Christoph Lameter   SLUB core
2326

8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2327
  	stat(s, FREE_SLOWPATH);
81819f0fc   Christoph Lameter   SLUB core
2328

8dc16c6c0   Christoph Lameter   slub: Move debug ...
2329
  	if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr))
80f08c191   Christoph Lameter   slub: Avoid disab...
2330
  		return;
6446faa2f   Christoph Lameter   slub: Fix up comm...
2331

2cfb7455d   Christoph Lameter   slub: Rework allo...
2332
2333
2334
2335
2336
2337
2338
2339
  	do {
  		prior = page->freelist;
  		counters = page->counters;
  		set_freepointer(s, object, prior);
  		new.counters = counters;
  		was_frozen = new.frozen;
  		new.inuse--;
  		if ((!new.inuse || !prior) && !was_frozen && !n) {
49e225858   Christoph Lameter   slub: per cpu cac...
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
  
  			if (!kmem_cache_debug(s) && !prior)
  
  				/*
  				 * Slab was on no list before and will be partially empty
  				 * We can defer the list move and instead freeze it.
  				 */
  				new.frozen = 1;
  
  			else { /* Needs to be taken off a list */
  
  	                        n = get_node(s, page_to_nid(page));
  				/*
  				 * Speculatively acquire the list_lock.
  				 * If the cmpxchg does not succeed then we may
  				 * drop the list_lock without any processing.
  				 *
  				 * Otherwise the list_lock will synchronize with
  				 * other processors updating the list of slabs.
  				 */
  				spin_lock_irqsave(&n->list_lock, flags);
  
  			}
2cfb7455d   Christoph Lameter   slub: Rework allo...
2363
2364
  		}
  		inuse = new.inuse;
81819f0fc   Christoph Lameter   SLUB core
2365

2cfb7455d   Christoph Lameter   slub: Rework allo...
2366
2367
2368
2369
  	} while (!cmpxchg_double_slab(s, page,
  		prior, counters,
  		object, new.counters,
  		"__slab_free"));
81819f0fc   Christoph Lameter   SLUB core
2370

2cfb7455d   Christoph Lameter   slub: Rework allo...
2371
  	if (likely(!n)) {
49e225858   Christoph Lameter   slub: per cpu cac...
2372
2373
2374
2375
2376
2377
2378
2379
2380
  
  		/*
  		 * If we just froze the page then put it onto the
  		 * per cpu partial list.
  		 */
  		if (new.frozen && !was_frozen)
  			put_cpu_partial(s, page, 1);
  
  		/*
2cfb7455d   Christoph Lameter   slub: Rework allo...
2381
2382
2383
2384
2385
  		 * The list lock was not taken therefore no list
  		 * activity can be necessary.
  		 */
                  if (was_frozen)
                          stat(s, FREE_FROZEN);
80f08c191   Christoph Lameter   slub: Avoid disab...
2386
                  return;
2cfb7455d   Christoph Lameter   slub: Rework allo...
2387
          }
81819f0fc   Christoph Lameter   SLUB core
2388
2389
  
  	/*
2cfb7455d   Christoph Lameter   slub: Rework allo...
2390
2391
  	 * was_frozen may have been set after we acquired the list_lock in
  	 * an earlier loop. So we need to check it here again.
81819f0fc   Christoph Lameter   SLUB core
2392
  	 */
2cfb7455d   Christoph Lameter   slub: Rework allo...
2393
2394
2395
2396
2397
  	if (was_frozen)
  		stat(s, FREE_FROZEN);
  	else {
  		if (unlikely(!inuse && n->nr_partial > s->min_partial))
                          goto slab_empty;
81819f0fc   Christoph Lameter   SLUB core
2398

2cfb7455d   Christoph Lameter   slub: Rework allo...
2399
2400
2401
2402
2403
2404
  		/*
  		 * Objects left in the slab. If it was not on the partial list before
  		 * then add it.
  		 */
  		if (unlikely(!prior)) {
  			remove_full(s, page);
136333d10   Shaohua Li   slub: explicitly ...
2405
  			add_partial(n, page, DEACTIVATE_TO_TAIL);
2cfb7455d   Christoph Lameter   slub: Rework allo...
2406
2407
  			stat(s, FREE_ADD_PARTIAL);
  		}
8ff12cfc0   Christoph Lameter   SLUB: Support for...
2408
  	}
80f08c191   Christoph Lameter   slub: Avoid disab...
2409
  	spin_unlock_irqrestore(&n->list_lock, flags);
81819f0fc   Christoph Lameter   SLUB core
2410
2411
2412
  	return;
  
  slab_empty:
a973e9dd1   Christoph Lameter   Revert "unique en...
2413
  	if (prior) {
81819f0fc   Christoph Lameter   SLUB core
2414
  		/*
6fbabb20f   Christoph Lameter   slub: Fix full li...
2415
  		 * Slab on the partial list.
81819f0fc   Christoph Lameter   SLUB core
2416
  		 */
5cc6eee8a   Christoph Lameter   slub: explicit li...
2417
  		remove_partial(n, page);
84e554e68   Christoph Lameter   SLUB: Make slub s...
2418
  		stat(s, FREE_REMOVE_PARTIAL);
6fbabb20f   Christoph Lameter   slub: Fix full li...
2419
2420
2421
  	} else
  		/* Slab must be on the full list */
  		remove_full(s, page);
2cfb7455d   Christoph Lameter   slub: Rework allo...
2422

80f08c191   Christoph Lameter   slub: Avoid disab...
2423
  	spin_unlock_irqrestore(&n->list_lock, flags);
84e554e68   Christoph Lameter   SLUB: Make slub s...
2424
  	stat(s, FREE_SLAB);
81819f0fc   Christoph Lameter   SLUB core
2425
  	discard_slab(s, page);
81819f0fc   Christoph Lameter   SLUB core
2426
  }
894b8788d   Christoph Lameter   slub: support con...
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
  /*
   * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
   * can perform fastpath freeing without additional function calls.
   *
   * The fastpath is only possible if we are freeing to the current cpu slab
   * of this processor. This typically the case if we have just allocated
   * the item before.
   *
   * If fastpath is not possible then fall back to __slab_free where we deal
   * with all sorts of special processing.
   */
064287807   Pekka Enberg   SLUB: Fix coding ...
2438
  static __always_inline void slab_free(struct kmem_cache *s,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
2439
  			struct page *page, void *x, unsigned long addr)
894b8788d   Christoph Lameter   slub: support con...
2440
2441
  {
  	void **object = (void *)x;
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
2442
  	struct kmem_cache_cpu *c;
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2443
  	unsigned long tid;
1f84260c8   Christoph Lameter   SLUB: Alternate f...
2444

c016b0bde   Christoph Lameter   slub: Extract hoo...
2445
  	slab_free_hook(s, x);
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2446
2447
2448
2449
2450
2451
2452
  redo:
  	/*
  	 * Determine the currently cpus per cpu slab.
  	 * The cpu may change afterward. However that does not matter since
  	 * data is retrieved via this pointer. If we are on the same cpu
  	 * during the cmpxchg then the free will succedd.
  	 */
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
2453
  	c = __this_cpu_ptr(s->cpu_slab);
c016b0bde   Christoph Lameter   slub: Extract hoo...
2454

8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2455
2456
  	tid = c->tid;
  	barrier();
c016b0bde   Christoph Lameter   slub: Extract hoo...
2457

442b06bce   Christoph Lameter   slub: Remove node...
2458
  	if (likely(page == c->page)) {
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
2459
  		set_freepointer(s, object, c->freelist);
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2460

933393f58   Christoph Lameter   percpu: Remove ir...
2461
  		if (unlikely(!this_cpu_cmpxchg_double(
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2462
2463
2464
2465
2466
2467
2468
  				s->cpu_slab->freelist, s->cpu_slab->tid,
  				c->freelist, tid,
  				object, next_tid(tid)))) {
  
  			note_cmpxchg_failure("slab_free", s, tid);
  			goto redo;
  		}
84e554e68   Christoph Lameter   SLUB: Make slub s...
2469
  		stat(s, FREE_FASTPATH);
894b8788d   Christoph Lameter   slub: support con...
2470
  	} else
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
2471
  		__slab_free(s, page, x, addr);
894b8788d   Christoph Lameter   slub: support con...
2472

894b8788d   Christoph Lameter   slub: support con...
2473
  }
81819f0fc   Christoph Lameter   SLUB core
2474
2475
  void kmem_cache_free(struct kmem_cache *s, void *x)
  {
77c5e2d01   Christoph Lameter   slub: fix object ...
2476
  	struct page *page;
81819f0fc   Christoph Lameter   SLUB core
2477

b49af68ff   Christoph Lameter   Add virt_to_head_...
2478
  	page = virt_to_head_page(x);
81819f0fc   Christoph Lameter   SLUB core
2479

ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
2480
  	slab_free(s, page, x, _RET_IP_);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2481

ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
2482
  	trace_kmem_cache_free(_RET_IP_, x);
81819f0fc   Christoph Lameter   SLUB core
2483
2484
  }
  EXPORT_SYMBOL(kmem_cache_free);
81819f0fc   Christoph Lameter   SLUB core
2485
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
2486
2487
2488
2489
   * Object placement in a slab is made very easy because we always start at
   * offset 0. If we tune the size of the object to the alignment then we can
   * get the required alignment by putting one properly sized object after
   * another.
81819f0fc   Christoph Lameter   SLUB core
2490
2491
2492
2493
   *
   * Notice that the allocation order determines the sizes of the per cpu
   * caches. Each processor has always one slab available for allocations.
   * Increasing the allocation order reduces the number of times that slabs
672bba3a4   Christoph Lameter   SLUB: update comm...
2494
   * must be moved on and off the partial lists and is therefore a factor in
81819f0fc   Christoph Lameter   SLUB core
2495
   * locking overhead.
81819f0fc   Christoph Lameter   SLUB core
2496
2497
2498
2499
2500
2501
2502
2503
2504
   */
  
  /*
   * Mininum / Maximum order of slab pages. This influences locking overhead
   * and slab fragmentation. A higher order reduces the number of partial slabs
   * and increases the number of allocations possible without having to
   * take the list_lock.
   */
  static int slub_min_order;
114e9e89e   Christoph Lameter   slub: Drop DEFAUL...
2505
  static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
9b2cd506e   Christoph Lameter   slub: Calculate m...
2506
  static int slub_min_objects;
81819f0fc   Christoph Lameter   SLUB core
2507
2508
2509
  
  /*
   * Merge control. If this is set then no merging of slab caches will occur.
672bba3a4   Christoph Lameter   SLUB: update comm...
2510
   * (Could be removed. This was introduced to pacify the merge skeptics.)
81819f0fc   Christoph Lameter   SLUB core
2511
2512
2513
2514
   */
  static int slub_nomerge;
  
  /*
81819f0fc   Christoph Lameter   SLUB core
2515
2516
   * Calculate the order of allocation given an slab object size.
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
2517
2518
2519
2520
   * The order of allocation has significant impact on performance and other
   * system components. Generally order 0 allocations should be preferred since
   * order 0 does not cause fragmentation in the page allocator. Larger objects
   * be problematic to put into order 0 slabs because there may be too much
c124f5b54   Christoph Lameter   slub: pack object...
2521
   * unused space left. We go to a higher order if more than 1/16th of the slab
672bba3a4   Christoph Lameter   SLUB: update comm...
2522
2523
2524
2525
2526
2527
   * would be wasted.
   *
   * In order to reach satisfactory performance we must ensure that a minimum
   * number of objects is in one slab. Otherwise we may generate too much
   * activity on the partial lists which requires taking the list_lock. This is
   * less a concern for large slabs though which are rarely used.
81819f0fc   Christoph Lameter   SLUB core
2528
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
2529
2530
2531
2532
   * slub_max_order specifies the order where we begin to stop considering the
   * number of objects in a slab as critical. If we reach slub_max_order then
   * we try to keep the page order as low as possible. So we accept more waste
   * of space in favor of a small page order.
81819f0fc   Christoph Lameter   SLUB core
2533
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
2534
2535
2536
2537
   * Higher order allocations also allow the placement of more objects in a
   * slab and thereby reduce object handling overhead. If the user has
   * requested a higher mininum order then we start with that one instead of
   * the smallest order which will fit the object.
81819f0fc   Christoph Lameter   SLUB core
2538
   */
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2539
  static inline int slab_order(int size, int min_objects,
ab9a0f196   Lai Jiangshan   slub: automatical...
2540
  				int max_order, int fract_leftover, int reserved)
81819f0fc   Christoph Lameter   SLUB core
2541
2542
2543
  {
  	int order;
  	int rem;
6300ea750   Christoph Lameter   SLUB: ensure that...
2544
  	int min_order = slub_min_order;
81819f0fc   Christoph Lameter   SLUB core
2545

ab9a0f196   Lai Jiangshan   slub: automatical...
2546
  	if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
2547
  		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
39b264641   Christoph Lameter   slub: Store max n...
2548

6300ea750   Christoph Lameter   SLUB: ensure that...
2549
  	for (order = max(min_order,
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2550
2551
  				fls(min_objects * size - 1) - PAGE_SHIFT);
  			order <= max_order; order++) {
81819f0fc   Christoph Lameter   SLUB core
2552

5e6d444ea   Christoph Lameter   SLUB: rework slab...
2553
  		unsigned long slab_size = PAGE_SIZE << order;
81819f0fc   Christoph Lameter   SLUB core
2554

ab9a0f196   Lai Jiangshan   slub: automatical...
2555
  		if (slab_size < min_objects * size + reserved)
81819f0fc   Christoph Lameter   SLUB core
2556
  			continue;
ab9a0f196   Lai Jiangshan   slub: automatical...
2557
  		rem = (slab_size - reserved) % size;
81819f0fc   Christoph Lameter   SLUB core
2558

5e6d444ea   Christoph Lameter   SLUB: rework slab...
2559
  		if (rem <= slab_size / fract_leftover)
81819f0fc   Christoph Lameter   SLUB core
2560
2561
2562
  			break;
  
  	}
672bba3a4   Christoph Lameter   SLUB: update comm...
2563

81819f0fc   Christoph Lameter   SLUB core
2564
2565
  	return order;
  }
ab9a0f196   Lai Jiangshan   slub: automatical...
2566
  static inline int calculate_order(int size, int reserved)
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2567
2568
2569
2570
  {
  	int order;
  	int min_objects;
  	int fraction;
e8120ff1f   Zhang Yanmin   SLUB: Fix default...
2571
  	int max_objects;
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
  
  	/*
  	 * Attempt to find best configuration for a slab. This
  	 * works by first attempting to generate a layout with
  	 * the best configuration and backing off gradually.
  	 *
  	 * First we reduce the acceptable waste in a slab. Then
  	 * we reduce the minimum objects required in a slab.
  	 */
  	min_objects = slub_min_objects;
9b2cd506e   Christoph Lameter   slub: Calculate m...
2582
2583
  	if (!min_objects)
  		min_objects = 4 * (fls(nr_cpu_ids) + 1);
ab9a0f196   Lai Jiangshan   slub: automatical...
2584
  	max_objects = order_objects(slub_max_order, size, reserved);
e8120ff1f   Zhang Yanmin   SLUB: Fix default...
2585
  	min_objects = min(min_objects, max_objects);
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2586
  	while (min_objects > 1) {
c124f5b54   Christoph Lameter   slub: pack object...
2587
  		fraction = 16;
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2588
2589
  		while (fraction >= 4) {
  			order = slab_order(size, min_objects,
ab9a0f196   Lai Jiangshan   slub: automatical...
2590
  					slub_max_order, fraction, reserved);
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2591
2592
2593
2594
  			if (order <= slub_max_order)
  				return order;
  			fraction /= 2;
  		}
5086c389c   Amerigo Wang   SLUB: Fix some co...
2595
  		min_objects--;
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2596
2597
2598
2599
2600
2601
  	}
  
  	/*
  	 * We were unable to place multiple objects in a slab. Now
  	 * lets see if we can place a single object there.
  	 */
ab9a0f196   Lai Jiangshan   slub: automatical...
2602
  	order = slab_order(size, 1, slub_max_order, 1, reserved);
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2603
2604
2605
2606
2607
2608
  	if (order <= slub_max_order)
  		return order;
  
  	/*
  	 * Doh this slab cannot be placed using slub_max_order.
  	 */
ab9a0f196   Lai Jiangshan   slub: automatical...
2609
  	order = slab_order(size, 1, MAX_ORDER, 1, reserved);
818cf5909   David Rientjes   slub: enforce MAX...
2610
  	if (order < MAX_ORDER)
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2611
2612
2613
  		return order;
  	return -ENOSYS;
  }
81819f0fc   Christoph Lameter   SLUB core
2614
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
2615
   * Figure out what the alignment of the objects will be.
81819f0fc   Christoph Lameter   SLUB core
2616
2617
2618
2619
2620
   */
  static unsigned long calculate_alignment(unsigned long flags,
  		unsigned long align, unsigned long size)
  {
  	/*
6446faa2f   Christoph Lameter   slub: Fix up comm...
2621
2622
  	 * If the user wants hardware cache aligned objects then follow that
  	 * suggestion if the object is sufficiently large.
81819f0fc   Christoph Lameter   SLUB core
2623
  	 *
6446faa2f   Christoph Lameter   slub: Fix up comm...
2624
2625
  	 * The hardware cache alignment cannot override the specified
  	 * alignment though. If that is greater then use it.
81819f0fc   Christoph Lameter   SLUB core
2626
  	 */
b62103867   Nick Piggin   slub: Do not cros...
2627
2628
2629
2630
2631
2632
  	if (flags & SLAB_HWCACHE_ALIGN) {
  		unsigned long ralign = cache_line_size();
  		while (size <= ralign / 2)
  			ralign /= 2;
  		align = max(align, ralign);
  	}
81819f0fc   Christoph Lameter   SLUB core
2633
2634
  
  	if (align < ARCH_SLAB_MINALIGN)
b62103867   Nick Piggin   slub: Do not cros...
2635
  		align = ARCH_SLAB_MINALIGN;
81819f0fc   Christoph Lameter   SLUB core
2636
2637
2638
  
  	return ALIGN(align, sizeof(void *));
  }
5595cffc8   Pekka Enberg   SLUB: dynamic per...
2639
2640
  static void
  init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
81819f0fc   Christoph Lameter   SLUB core
2641
2642
  {
  	n->nr_partial = 0;
81819f0fc   Christoph Lameter   SLUB core
2643
2644
  	spin_lock_init(&n->list_lock);
  	INIT_LIST_HEAD(&n->partial);
8ab1372fa   Christoph Lameter   SLUB: Fix CONFIG_...
2645
  #ifdef CONFIG_SLUB_DEBUG
0f389ec63   Christoph Lameter   slub: No need for...
2646
  	atomic_long_set(&n->nr_slabs, 0);
02b71b701   Salman Qazi   slub: fixed unini...
2647
  	atomic_long_set(&n->total_objects, 0);
643b11384   Christoph Lameter   slub: enable trac...
2648
  	INIT_LIST_HEAD(&n->full);
8ab1372fa   Christoph Lameter   SLUB: Fix CONFIG_...
2649
  #endif
81819f0fc   Christoph Lameter   SLUB core
2650
  }
55136592f   Christoph Lameter   slub: Remove dyna...
2651
  static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2652
  {
6c182dc0d   Christoph Lameter   slub: Remove stat...
2653
2654
  	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
  			SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2655

8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2656
  	/*
d4d84fef6   Chris Metcalf   slub: always alig...
2657
2658
  	 * Must align to double word boundary for the double cmpxchg
  	 * instructions to work; see __pcpu_double_call_return_bool().
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2659
  	 */
d4d84fef6   Chris Metcalf   slub: always alig...
2660
2661
  	s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
  				     2 * sizeof(void *));
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2662
2663
2664
2665
2666
  
  	if (!s->cpu_slab)
  		return 0;
  
  	init_kmem_cache_cpus(s);
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2667

8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2668
  	return 1;
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2669
  }
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2670

51df11428   Christoph Lameter   slub: Dynamically...
2671
  static struct kmem_cache *kmem_cache_node;
81819f0fc   Christoph Lameter   SLUB core
2672
2673
2674
2675
2676
2677
  /*
   * No kmalloc_node yet so do it by hand. We know that this is the first
   * slab on the node for this slabcache. There are no concurrent accesses
   * possible.
   *
   * Note that this function only works on the kmalloc_node_cache
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2678
2679
   * when allocating for the kmalloc_node_cache. This is used for bootstrapping
   * memory on a fresh node that has no slab structures yet.
81819f0fc   Christoph Lameter   SLUB core
2680
   */
55136592f   Christoph Lameter   slub: Remove dyna...
2681
  static void early_kmem_cache_node_alloc(int node)
81819f0fc   Christoph Lameter   SLUB core
2682
2683
2684
  {
  	struct page *page;
  	struct kmem_cache_node *n;
51df11428   Christoph Lameter   slub: Dynamically...
2685
  	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
81819f0fc   Christoph Lameter   SLUB core
2686

51df11428   Christoph Lameter   slub: Dynamically...
2687
  	page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
81819f0fc   Christoph Lameter   SLUB core
2688
2689
  
  	BUG_ON(!page);
a2f92ee7e   Christoph Lameter   SLUB: do not fail...
2690
2691
2692
2693
2694
2695
2696
2697
  	if (page_to_nid(page) != node) {
  		printk(KERN_ERR "SLUB: Unable to allocate memory from "
  				"node %d
  ", node);
  		printk(KERN_ERR "SLUB: Allocating a useless per node structure "
  				"in order to be able to continue
  ");
  	}
81819f0fc   Christoph Lameter   SLUB core
2698
2699
  	n = page->freelist;
  	BUG_ON(!n);
51df11428   Christoph Lameter   slub: Dynamically...
2700
  	page->freelist = get_freepointer(kmem_cache_node, n);
e6e82ea11   Christoph Lameter   slub: Prepare inu...
2701
  	page->inuse = 1;
8cb0a5068   Christoph Lameter   slub: Move page->...
2702
  	page->frozen = 0;
51df11428   Christoph Lameter   slub: Dynamically...
2703
  	kmem_cache_node->node[node] = n;
8ab1372fa   Christoph Lameter   SLUB: Fix CONFIG_...
2704
  #ifdef CONFIG_SLUB_DEBUG
f7cb19336   Christoph Lameter   SLUB: Pass active...
2705
  	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
51df11428   Christoph Lameter   slub: Dynamically...
2706
  	init_tracking(kmem_cache_node, n);
8ab1372fa   Christoph Lameter   SLUB: Fix CONFIG_...
2707
  #endif
51df11428   Christoph Lameter   slub: Dynamically...
2708
2709
  	init_kmem_cache_node(n, kmem_cache_node);
  	inc_slabs_node(kmem_cache_node, node, page->objects);
6446faa2f   Christoph Lameter   slub: Fix up comm...
2710

136333d10   Shaohua Li   slub: explicitly ...
2711
  	add_partial(n, page, DEACTIVATE_TO_HEAD);
81819f0fc   Christoph Lameter   SLUB core
2712
2713
2714
2715
2716
  }
  
  static void free_kmem_cache_nodes(struct kmem_cache *s)
  {
  	int node;
f64dc58c5   Christoph Lameter   Memoryless nodes:...
2717
  	for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0fc   Christoph Lameter   SLUB core
2718
  		struct kmem_cache_node *n = s->node[node];
51df11428   Christoph Lameter   slub: Dynamically...
2719

73367bd8e   Alexander Duyck   slub: move kmem_c...
2720
  		if (n)
51df11428   Christoph Lameter   slub: Dynamically...
2721
  			kmem_cache_free(kmem_cache_node, n);
81819f0fc   Christoph Lameter   SLUB core
2722
2723
2724
  		s->node[node] = NULL;
  	}
  }
55136592f   Christoph Lameter   slub: Remove dyna...
2725
  static int init_kmem_cache_nodes(struct kmem_cache *s)
81819f0fc   Christoph Lameter   SLUB core
2726
2727
  {
  	int node;
81819f0fc   Christoph Lameter   SLUB core
2728

f64dc58c5   Christoph Lameter   Memoryless nodes:...
2729
  	for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0fc   Christoph Lameter   SLUB core
2730
  		struct kmem_cache_node *n;
73367bd8e   Alexander Duyck   slub: move kmem_c...
2731
  		if (slab_state == DOWN) {
55136592f   Christoph Lameter   slub: Remove dyna...
2732
  			early_kmem_cache_node_alloc(node);
73367bd8e   Alexander Duyck   slub: move kmem_c...
2733
2734
  			continue;
  		}
51df11428   Christoph Lameter   slub: Dynamically...
2735
  		n = kmem_cache_alloc_node(kmem_cache_node,
55136592f   Christoph Lameter   slub: Remove dyna...
2736
  						GFP_KERNEL, node);
81819f0fc   Christoph Lameter   SLUB core
2737

73367bd8e   Alexander Duyck   slub: move kmem_c...
2738
2739
2740
  		if (!n) {
  			free_kmem_cache_nodes(s);
  			return 0;
81819f0fc   Christoph Lameter   SLUB core
2741
  		}
73367bd8e   Alexander Duyck   slub: move kmem_c...
2742

81819f0fc   Christoph Lameter   SLUB core
2743
  		s->node[node] = n;
5595cffc8   Pekka Enberg   SLUB: dynamic per...
2744
  		init_kmem_cache_node(n, s);
81819f0fc   Christoph Lameter   SLUB core
2745
2746
2747
  	}
  	return 1;
  }
81819f0fc   Christoph Lameter   SLUB core
2748

c0bdb232b   David Rientjes   slub: rename calc...
2749
  static void set_min_partial(struct kmem_cache *s, unsigned long min)
3b89d7d88   David Rientjes   slub: move min_pa...
2750
2751
2752
2753
2754
2755
2756
  {
  	if (min < MIN_PARTIAL)
  		min = MIN_PARTIAL;
  	else if (min > MAX_PARTIAL)
  		min = MAX_PARTIAL;
  	s->min_partial = min;
  }
81819f0fc   Christoph Lameter   SLUB core
2757
2758
2759
2760
  /*
   * calculate_sizes() determines the order and the distribution of data within
   * a slab object.
   */
06b285dc3   Christoph Lameter   slub: Make the or...
2761
  static int calculate_sizes(struct kmem_cache *s, int forced_order)
81819f0fc   Christoph Lameter   SLUB core
2762
2763
2764
2765
  {
  	unsigned long flags = s->flags;
  	unsigned long size = s->objsize;
  	unsigned long align = s->align;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
2766
  	int order;
81819f0fc   Christoph Lameter   SLUB core
2767
2768
  
  	/*
d8b42bf54   Christoph Lameter   slub: Rearrange #...
2769
2770
2771
2772
2773
2774
2775
2776
  	 * Round up object size to the next word boundary. We can only
  	 * place the free pointer at word boundaries and this determines
  	 * the possible location of the free pointer.
  	 */
  	size = ALIGN(size, sizeof(void *));
  
  #ifdef CONFIG_SLUB_DEBUG
  	/*
81819f0fc   Christoph Lameter   SLUB core
2777
2778
2779
2780
2781
  	 * Determine if we can poison the object itself. If the user of
  	 * the slab may touch the object after free or before allocation
  	 * then we should never poison the object itself.
  	 */
  	if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
c59def9f2   Christoph Lameter   Slab allocators: ...
2782
  			!s->ctor)
81819f0fc   Christoph Lameter   SLUB core
2783
2784
2785
  		s->flags |= __OBJECT_POISON;
  	else
  		s->flags &= ~__OBJECT_POISON;
81819f0fc   Christoph Lameter   SLUB core
2786
2787
  
  	/*
672bba3a4   Christoph Lameter   SLUB: update comm...
2788
  	 * If we are Redzoning then check if there is some space between the
81819f0fc   Christoph Lameter   SLUB core
2789
  	 * end of the object and the free pointer. If not then add an
672bba3a4   Christoph Lameter   SLUB: update comm...
2790
  	 * additional word to have some bytes to store Redzone information.
81819f0fc   Christoph Lameter   SLUB core
2791
2792
2793
  	 */
  	if ((flags & SLAB_RED_ZONE) && size == s->objsize)
  		size += sizeof(void *);
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
2794
  #endif
81819f0fc   Christoph Lameter   SLUB core
2795
2796
  
  	/*
672bba3a4   Christoph Lameter   SLUB: update comm...
2797
2798
  	 * With that we have determined the number of bytes in actual use
  	 * by the object. This is the potential offset to the free pointer.
81819f0fc   Christoph Lameter   SLUB core
2799
2800
2801
2802
  	 */
  	s->inuse = size;
  
  	if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
c59def9f2   Christoph Lameter   Slab allocators: ...
2803
  		s->ctor)) {
81819f0fc   Christoph Lameter   SLUB core
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
  		/*
  		 * Relocate free pointer after the object if it is not
  		 * permitted to overwrite the first word of the object on
  		 * kmem_cache_free.
  		 *
  		 * This is the case if we do RCU, have a constructor or
  		 * destructor or are poisoning the objects.
  		 */
  		s->offset = size;
  		size += sizeof(void *);
  	}
c12b3c625   Christoph Lameter   SLUB Debug: Fix o...
2815
  #ifdef CONFIG_SLUB_DEBUG
81819f0fc   Christoph Lameter   SLUB core
2816
2817
2818
2819
2820
2821
  	if (flags & SLAB_STORE_USER)
  		/*
  		 * Need to store information about allocs and frees after
  		 * the object.
  		 */
  		size += 2 * sizeof(struct track);
be7b3fbce   Christoph Lameter   SLUB: after objec...
2822
  	if (flags & SLAB_RED_ZONE)
81819f0fc   Christoph Lameter   SLUB core
2823
2824
2825
2826
  		/*
  		 * Add some empty padding so that we can catch
  		 * overwrites from earlier objects rather than let
  		 * tracking information or the free pointer be
0211a9c85   Frederik Schwarzer   trivial: fix an -...
2827
  		 * corrupted if a user writes before the start
81819f0fc   Christoph Lameter   SLUB core
2828
2829
2830
  		 * of the object.
  		 */
  		size += sizeof(void *);
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
2831
  #endif
672bba3a4   Christoph Lameter   SLUB: update comm...
2832

81819f0fc   Christoph Lameter   SLUB core
2833
2834
  	/*
  	 * Determine the alignment based on various parameters that the
65c02d4cf   Christoph Lameter   SLUB: add support...
2835
2836
  	 * user specified and the dynamic determination of cache line size
  	 * on bootup.
81819f0fc   Christoph Lameter   SLUB core
2837
2838
  	 */
  	align = calculate_alignment(flags, align, s->objsize);
dcb0ce1bd   Zhang, Yanmin   slub: change kmem...
2839
  	s->align = align;
81819f0fc   Christoph Lameter   SLUB core
2840
2841
2842
2843
2844
2845
2846
2847
  
  	/*
  	 * SLUB stores one object immediately after another beginning from
  	 * offset 0. In order to align the objects we have to simply size
  	 * each object to conform to the alignment.
  	 */
  	size = ALIGN(size, align);
  	s->size = size;
06b285dc3   Christoph Lameter   slub: Make the or...
2848
2849
2850
  	if (forced_order >= 0)
  		order = forced_order;
  	else
ab9a0f196   Lai Jiangshan   slub: automatical...
2851
  		order = calculate_order(size, s->reserved);
81819f0fc   Christoph Lameter   SLUB core
2852

834f3d119   Christoph Lameter   slub: Add kmem_ca...
2853
  	if (order < 0)
81819f0fc   Christoph Lameter   SLUB core
2854
  		return 0;
b7a49f0d4   Christoph Lameter   slub: Determine g...
2855
  	s->allocflags = 0;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
2856
  	if (order)
b7a49f0d4   Christoph Lameter   slub: Determine g...
2857
2858
2859
2860
2861
2862
2863
  		s->allocflags |= __GFP_COMP;
  
  	if (s->flags & SLAB_CACHE_DMA)
  		s->allocflags |= SLUB_DMA;
  
  	if (s->flags & SLAB_RECLAIM_ACCOUNT)
  		s->allocflags |= __GFP_RECLAIMABLE;
81819f0fc   Christoph Lameter   SLUB core
2864
2865
2866
  	/*
  	 * Determine the number of objects per slab
  	 */
ab9a0f196   Lai Jiangshan   slub: automatical...
2867
2868
  	s->oo = oo_make(order, size, s->reserved);
  	s->min = oo_make(get_order(size), size, s->reserved);
205ab99dd   Christoph Lameter   slub: Update stat...
2869
2870
  	if (oo_objects(s->oo) > oo_objects(s->max))
  		s->max = s->oo;
81819f0fc   Christoph Lameter   SLUB core
2871

834f3d119   Christoph Lameter   slub: Add kmem_ca...
2872
  	return !!oo_objects(s->oo);
81819f0fc   Christoph Lameter   SLUB core
2873
2874
  
  }
55136592f   Christoph Lameter   slub: Remove dyna...
2875
  static int kmem_cache_open(struct kmem_cache *s,
81819f0fc   Christoph Lameter   SLUB core
2876
2877
  		const char *name, size_t size,
  		size_t align, unsigned long flags,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
2878
  		void (*ctor)(void *))
81819f0fc   Christoph Lameter   SLUB core
2879
2880
2881
2882
  {
  	memset(s, 0, kmem_size);
  	s->name = name;
  	s->ctor = ctor;
81819f0fc   Christoph Lameter   SLUB core
2883
  	s->objsize = size;
81819f0fc   Christoph Lameter   SLUB core
2884
  	s->align = align;
ba0268a8b   Christoph Lameter   SLUB: accurately ...
2885
  	s->flags = kmem_cache_flags(size, flags, name, ctor);
ab9a0f196   Lai Jiangshan   slub: automatical...
2886
  	s->reserved = 0;
81819f0fc   Christoph Lameter   SLUB core
2887

da9a638c6   Lai Jiangshan   slub,rcu: don't a...
2888
2889
  	if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
  		s->reserved = sizeof(struct rcu_head);
81819f0fc   Christoph Lameter   SLUB core
2890

06b285dc3   Christoph Lameter   slub: Make the or...
2891
  	if (!calculate_sizes(s, -1))
81819f0fc   Christoph Lameter   SLUB core
2892
  		goto error;
3de472138   David Rientjes   slub: use size an...
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
  	if (disable_higher_order_debug) {
  		/*
  		 * Disable debugging flags that store metadata if the min slab
  		 * order increased.
  		 */
  		if (get_order(s->size) > get_order(s->objsize)) {
  			s->flags &= ~DEBUG_METADATA_FLAGS;
  			s->offset = 0;
  			if (!calculate_sizes(s, -1))
  				goto error;
  		}
  	}
81819f0fc   Christoph Lameter   SLUB core
2905

2565409fc   Heiko Carstens   mm,x86,um: move C...
2906
2907
  #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
      defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
b789ef518   Christoph Lameter   slub: Add cmpxchg...
2908
2909
2910
2911
  	if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
  		/* Enable fast mode */
  		s->flags |= __CMPXCHG_DOUBLE;
  #endif
3b89d7d88   David Rientjes   slub: move min_pa...
2912
2913
2914
2915
  	/*
  	 * The larger the object size is, the more pages we want on the partial
  	 * list to avoid pounding the page allocator excessively.
  	 */
49e225858   Christoph Lameter   slub: per cpu cac...
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
  	set_min_partial(s, ilog2(s->size) / 2);
  
  	/*
  	 * cpu_partial determined the maximum number of objects kept in the
  	 * per cpu partial lists of a processor.
  	 *
  	 * Per cpu partial lists mainly contain slabs that just have one
  	 * object freed. If they are used for allocation then they can be
  	 * filled up again with minimal effort. The slab will never hit the
  	 * per node partial lists and therefore no locking will be required.
  	 *
  	 * This setting also determines
  	 *
  	 * A) The number of objects from per cpu partial slabs dumped to the
  	 *    per node list when we reach the limit.
9f2649041   Alex Shi   slub: correct com...
2931
  	 * B) The number of objects in cpu partial slabs to extract from the
49e225858   Christoph Lameter   slub: per cpu cac...
2932
2933
2934
  	 *    per node list when we run out of per cpu objects. We only fetch 50%
  	 *    to keep some capacity around for frees.
  	 */
8f1e33dae   Christoph Lameter   slub: Switch per ...
2935
2936
2937
  	if (kmem_cache_debug(s))
  		s->cpu_partial = 0;
  	else if (s->size >= PAGE_SIZE)
49e225858   Christoph Lameter   slub: per cpu cac...
2938
2939
2940
2941
2942
2943
2944
  		s->cpu_partial = 2;
  	else if (s->size >= 1024)
  		s->cpu_partial = 6;
  	else if (s->size >= 256)
  		s->cpu_partial = 13;
  	else
  		s->cpu_partial = 30;
81819f0fc   Christoph Lameter   SLUB core
2945
2946
  	s->refcount = 1;
  #ifdef CONFIG_NUMA
e2cb96b7e   Christoph Lameter   slub: Disable NUM...
2947
  	s->remote_node_defrag_ratio = 1000;
81819f0fc   Christoph Lameter   SLUB core
2948
  #endif
55136592f   Christoph Lameter   slub: Remove dyna...
2949
  	if (!init_kmem_cache_nodes(s))
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
2950
  		goto error;
81819f0fc   Christoph Lameter   SLUB core
2951

55136592f   Christoph Lameter   slub: Remove dyna...
2952
  	if (alloc_kmem_cache_cpus(s))
81819f0fc   Christoph Lameter   SLUB core
2953
  		return 1;
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
2954

4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2955
  	free_kmem_cache_nodes(s);
81819f0fc   Christoph Lameter   SLUB core
2956
2957
2958
2959
2960
  error:
  	if (flags & SLAB_PANIC)
  		panic("Cannot create slab %s size=%lu realsize=%u "
  			"order=%u offset=%u flags=%lx
  ",
834f3d119   Christoph Lameter   slub: Add kmem_ca...
2961
  			s->name, (unsigned long)size, s->size, oo_order(s->oo),
81819f0fc   Christoph Lameter   SLUB core
2962
2963
2964
  			s->offset, flags);
  	return 0;
  }
81819f0fc   Christoph Lameter   SLUB core
2965
2966
  
  /*
81819f0fc   Christoph Lameter   SLUB core
2967
2968
2969
2970
2971
2972
2973
   * Determine the size of a slab object
   */
  unsigned int kmem_cache_size(struct kmem_cache *s)
  {
  	return s->objsize;
  }
  EXPORT_SYMBOL(kmem_cache_size);
33b12c381   Christoph Lameter   slub: Dump list o...
2974
2975
2976
2977
2978
2979
  static void list_slab_objects(struct kmem_cache *s, struct page *page,
  							const char *text)
  {
  #ifdef CONFIG_SLUB_DEBUG
  	void *addr = page_address(page);
  	void *p;
a5dd5c117   Namhyung Kim   slub: Fix signedn...
2980
2981
  	unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
  				     sizeof(long), GFP_ATOMIC);
bbd7d57bf   Eric Dumazet   slub: Potential s...
2982
2983
  	if (!map)
  		return;
33b12c381   Christoph Lameter   slub: Dump list o...
2984
2985
  	slab_err(s, page, "%s", text);
  	slab_lock(page);
33b12c381   Christoph Lameter   slub: Dump list o...
2986

5f80b13ae   Christoph Lameter   slub: get_map() f...
2987
  	get_map(s, page, map);
33b12c381   Christoph Lameter   slub: Dump list o...
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
  	for_each_object(p, s, addr, page->objects) {
  
  		if (!test_bit(slab_index(p, s, addr), map)) {
  			printk(KERN_ERR "INFO: Object 0x%p @offset=%tu
  ",
  							p, p - addr);
  			print_tracking(s, p);
  		}
  	}
  	slab_unlock(page);
bbd7d57bf   Eric Dumazet   slub: Potential s...
2998
  	kfree(map);
33b12c381   Christoph Lameter   slub: Dump list o...
2999
3000
  #endif
  }
81819f0fc   Christoph Lameter   SLUB core
3001
  /*
599870b17   Christoph Lameter   slub: free_list()...
3002
   * Attempt to free all partial slabs on a node.
69cb8e6b7   Christoph Lameter   slub: free slabs ...
3003
3004
   * This is called from kmem_cache_close(). We must be the last thread
   * using the cache and therefore we do not need to lock anymore.
81819f0fc   Christoph Lameter   SLUB core
3005
   */
599870b17   Christoph Lameter   slub: free_list()...
3006
  static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
81819f0fc   Christoph Lameter   SLUB core
3007
  {
81819f0fc   Christoph Lameter   SLUB core
3008
  	struct page *page, *h;
33b12c381   Christoph Lameter   slub: Dump list o...
3009
  	list_for_each_entry_safe(page, h, &n->partial, lru) {
81819f0fc   Christoph Lameter   SLUB core
3010
  		if (!page->inuse) {
5cc6eee8a   Christoph Lameter   slub: explicit li...
3011
  			remove_partial(n, page);
81819f0fc   Christoph Lameter   SLUB core
3012
  			discard_slab(s, page);
33b12c381   Christoph Lameter   slub: Dump list o...
3013
3014
3015
  		} else {
  			list_slab_objects(s, page,
  				"Objects remaining on kmem_cache_close()");
599870b17   Christoph Lameter   slub: free_list()...
3016
  		}
33b12c381   Christoph Lameter   slub: Dump list o...
3017
  	}
81819f0fc   Christoph Lameter   SLUB core
3018
3019
3020
  }
  
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
3021
   * Release all resources used by a slab cache.
81819f0fc   Christoph Lameter   SLUB core
3022
   */
0c7100132   Christoph Lameter   SLUB: add some mo...
3023
  static inline int kmem_cache_close(struct kmem_cache *s)
81819f0fc   Christoph Lameter   SLUB core
3024
3025
3026
3027
  {
  	int node;
  
  	flush_all(s);
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
3028
  	free_percpu(s->cpu_slab);
81819f0fc   Christoph Lameter   SLUB core
3029
  	/* Attempt to free all objects */
f64dc58c5   Christoph Lameter   Memoryless nodes:...
3030
  	for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0fc   Christoph Lameter   SLUB core
3031
  		struct kmem_cache_node *n = get_node(s, node);
599870b17   Christoph Lameter   slub: free_list()...
3032
3033
  		free_partial(s, n);
  		if (n->nr_partial || slabs_node(s, node))
81819f0fc   Christoph Lameter   SLUB core
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
  			return 1;
  	}
  	free_kmem_cache_nodes(s);
  	return 0;
  }
  
  /*
   * Close a cache and release the kmem_cache structure
   * (must be used for caches created using kmem_cache_create)
   */
  void kmem_cache_destroy(struct kmem_cache *s)
  {
  	down_write(&slub_lock);
  	s->refcount--;
  	if (!s->refcount) {
  		list_del(&s->list);
69cb8e6b7   Christoph Lameter   slub: free slabs ...
3050
  		up_write(&slub_lock);
d629d8195   Pekka Enberg   slub: improve kme...
3051
3052
3053
3054
3055
3056
  		if (kmem_cache_close(s)) {
  			printk(KERN_ERR "SLUB %s: %s called for cache that "
  				"still has objects.
  ", s->name, __func__);
  			dump_stack();
  		}
d76b1590e   Eric Dumazet   slub: Fix kmem_ca...
3057
3058
  		if (s->flags & SLAB_DESTROY_BY_RCU)
  			rcu_barrier();
81819f0fc   Christoph Lameter   SLUB core
3059
  		sysfs_slab_remove(s);
69cb8e6b7   Christoph Lameter   slub: free slabs ...
3060
3061
  	} else
  		up_write(&slub_lock);
81819f0fc   Christoph Lameter   SLUB core
3062
3063
3064
3065
3066
3067
  }
  EXPORT_SYMBOL(kmem_cache_destroy);
  
  /********************************************************************
   *		Kmalloc subsystem
   *******************************************************************/
51df11428   Christoph Lameter   slub: Dynamically...
3068
  struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
81819f0fc   Christoph Lameter   SLUB core
3069
  EXPORT_SYMBOL(kmalloc_caches);
51df11428   Christoph Lameter   slub: Dynamically...
3070
  static struct kmem_cache *kmem_cache;
55136592f   Christoph Lameter   slub: Remove dyna...
3071
  #ifdef CONFIG_ZONE_DMA
51df11428   Christoph Lameter   slub: Dynamically...
3072
  static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
55136592f   Christoph Lameter   slub: Remove dyna...
3073
  #endif
81819f0fc   Christoph Lameter   SLUB core
3074
3075
  static int __init setup_slub_min_order(char *str)
  {
064287807   Pekka Enberg   SLUB: Fix coding ...
3076
  	get_option(&str, &slub_min_order);
81819f0fc   Christoph Lameter   SLUB core
3077
3078
3079
3080
3081
3082
3083
3084
  
  	return 1;
  }
  
  __setup("slub_min_order=", setup_slub_min_order);
  
  static int __init setup_slub_max_order(char *str)
  {
064287807   Pekka Enberg   SLUB: Fix coding ...
3085
  	get_option(&str, &slub_max_order);
818cf5909   David Rientjes   slub: enforce MAX...
3086
  	slub_max_order = min(slub_max_order, MAX_ORDER - 1);
81819f0fc   Christoph Lameter   SLUB core
3087
3088
3089
3090
3091
3092
3093
3094
  
  	return 1;
  }
  
  __setup("slub_max_order=", setup_slub_max_order);
  
  static int __init setup_slub_min_objects(char *str)
  {
064287807   Pekka Enberg   SLUB: Fix coding ...
3095
  	get_option(&str, &slub_min_objects);
81819f0fc   Christoph Lameter   SLUB core
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
  
  	return 1;
  }
  
  __setup("slub_min_objects=", setup_slub_min_objects);
  
  static int __init setup_slub_nomerge(char *str)
  {
  	slub_nomerge = 1;
  	return 1;
  }
  
  __setup("slub_nomerge", setup_slub_nomerge);
51df11428   Christoph Lameter   slub: Dynamically...
3109
3110
  static struct kmem_cache *__init create_kmalloc_cache(const char *name,
  						int size, unsigned int flags)
81819f0fc   Christoph Lameter   SLUB core
3111
  {
51df11428   Christoph Lameter   slub: Dynamically...
3112
3113
3114
  	struct kmem_cache *s;
  
  	s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
83b519e8b   Pekka Enberg   slab: setup alloc...
3115
3116
3117
3118
  	/*
  	 * This function is called with IRQs disabled during early-boot on
  	 * single CPU so there's no need to take slub_lock here.
  	 */
55136592f   Christoph Lameter   slub: Remove dyna...
3119
  	if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
319d1e240   Christoph Lameter   slub: Drop fallba...
3120
  								flags, NULL))
81819f0fc   Christoph Lameter   SLUB core
3121
3122
3123
  		goto panic;
  
  	list_add(&s->list, &slab_caches);
51df11428   Christoph Lameter   slub: Dynamically...
3124
  	return s;
81819f0fc   Christoph Lameter   SLUB core
3125
3126
3127
3128
  
  panic:
  	panic("Creation of kmalloc slab %s size=%d failed.
  ", name, size);
51df11428   Christoph Lameter   slub: Dynamically...
3129
  	return NULL;
81819f0fc   Christoph Lameter   SLUB core
3130
  }
f1b263393   Christoph Lameter   SLUB: faster more...
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
  /*
   * Conversion table for small slabs sizes / 8 to the index in the
   * kmalloc array. This is necessary for slabs < 192 since we have non power
   * of two cache sizes there. The size of larger slabs can be determined using
   * fls.
   */
  static s8 size_index[24] = {
  	3,	/* 8 */
  	4,	/* 16 */
  	5,	/* 24 */
  	5,	/* 32 */
  	6,	/* 40 */
  	6,	/* 48 */
  	6,	/* 56 */
  	6,	/* 64 */
  	1,	/* 72 */
  	1,	/* 80 */
  	1,	/* 88 */
  	1,	/* 96 */
  	7,	/* 104 */
  	7,	/* 112 */
  	7,	/* 120 */
  	7,	/* 128 */
  	2,	/* 136 */
  	2,	/* 144 */
  	2,	/* 152 */
  	2,	/* 160 */
  	2,	/* 168 */
  	2,	/* 176 */
  	2,	/* 184 */
  	2	/* 192 */
  };
acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
3163
3164
3165
3166
  static inline int size_index_elem(size_t bytes)
  {
  	return (bytes - 1) / 8;
  }
81819f0fc   Christoph Lameter   SLUB core
3167
3168
  static struct kmem_cache *get_slab(size_t size, gfp_t flags)
  {
f1b263393   Christoph Lameter   SLUB: faster more...
3169
  	int index;
81819f0fc   Christoph Lameter   SLUB core
3170

f1b263393   Christoph Lameter   SLUB: faster more...
3171
3172
3173
  	if (size <= 192) {
  		if (!size)
  			return ZERO_SIZE_PTR;
81819f0fc   Christoph Lameter   SLUB core
3174

acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
3175
  		index = size_index[size_index_elem(size)];
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3176
  	} else
f1b263393   Christoph Lameter   SLUB: faster more...
3177
  		index = fls(size - 1);
81819f0fc   Christoph Lameter   SLUB core
3178
3179
  
  #ifdef CONFIG_ZONE_DMA
f1b263393   Christoph Lameter   SLUB: faster more...
3180
  	if (unlikely((flags & SLUB_DMA)))
51df11428   Christoph Lameter   slub: Dynamically...
3181
  		return kmalloc_dma_caches[index];
f1b263393   Christoph Lameter   SLUB: faster more...
3182

81819f0fc   Christoph Lameter   SLUB core
3183
  #endif
51df11428   Christoph Lameter   slub: Dynamically...
3184
  	return kmalloc_caches[index];
81819f0fc   Christoph Lameter   SLUB core
3185
3186
3187
3188
  }
  
  void *__kmalloc(size_t size, gfp_t flags)
  {
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3189
  	struct kmem_cache *s;
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3190
  	void *ret;
81819f0fc   Christoph Lameter   SLUB core
3191

ffadd4d0f   Christoph Lameter   SLUB: Introduce a...
3192
  	if (unlikely(size > SLUB_MAX_SIZE))
eada35efc   Pekka Enberg   slub: kmalloc pag...
3193
  		return kmalloc_large(size, flags);
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3194
3195
3196
3197
  
  	s = get_slab(size, flags);
  
  	if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f9132   Christoph Lameter   Slab allocators: ...
3198
  		return s;
2154a3363   Christoph Lameter   slub: Use a const...
3199
  	ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3200

ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
3201
  	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3202
3203
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
3204
3205
  }
  EXPORT_SYMBOL(__kmalloc);
5d1f57e4d   Namhyung Kim   slub: Move NUMA-r...
3206
  #ifdef CONFIG_NUMA
f619cfe1b   Christoph Lameter   slub: Add kmalloc...
3207
3208
  static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
  {
b1eeab676   Vegard Nossum   kmemcheck: add ho...
3209
  	struct page *page;
e4f7c0b44   Catalin Marinas   kmemleak: Trace t...
3210
  	void *ptr = NULL;
f619cfe1b   Christoph Lameter   slub: Add kmalloc...
3211

b1eeab676   Vegard Nossum   kmemcheck: add ho...
3212
3213
  	flags |= __GFP_COMP | __GFP_NOTRACK;
  	page = alloc_pages_node(node, flags, get_order(size));
f619cfe1b   Christoph Lameter   slub: Add kmalloc...
3214
  	if (page)
e4f7c0b44   Catalin Marinas   kmemleak: Trace t...
3215
3216
3217
3218
  		ptr = page_address(page);
  
  	kmemleak_alloc(ptr, size, 1, flags);
  	return ptr;
f619cfe1b   Christoph Lameter   slub: Add kmalloc...
3219
  }
81819f0fc   Christoph Lameter   SLUB core
3220
3221
  void *__kmalloc_node(size_t size, gfp_t flags, int node)
  {
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3222
  	struct kmem_cache *s;
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3223
  	void *ret;
81819f0fc   Christoph Lameter   SLUB core
3224

057685cf5   Ingo Molnar   Merge branch 'for...
3225
  	if (unlikely(size > SLUB_MAX_SIZE)) {
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3226
  		ret = kmalloc_large_node(size, flags, node);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
3227
3228
3229
  		trace_kmalloc_node(_RET_IP_, ret,
  				   size, PAGE_SIZE << get_order(size),
  				   flags, node);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3230
3231
3232
  
  		return ret;
  	}
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3233
3234
3235
3236
  
  	s = get_slab(size, flags);
  
  	if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f9132   Christoph Lameter   Slab allocators: ...
3237
  		return s;
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3238
  	ret = slab_alloc(s, flags, node, _RET_IP_);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
3239
  	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3240
3241
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
3242
3243
3244
3245
3246
3247
  }
  EXPORT_SYMBOL(__kmalloc_node);
  #endif
  
  size_t ksize(const void *object)
  {
272c1d21d   Christoph Lameter   SLUB: return ZERO...
3248
  	struct page *page;
81819f0fc   Christoph Lameter   SLUB core
3249

ef8b4520b   Christoph Lameter   Slab allocators: ...
3250
  	if (unlikely(object == ZERO_SIZE_PTR))
272c1d21d   Christoph Lameter   SLUB: return ZERO...
3251
  		return 0;
294a80a8e   Vegard Nossum   SLUB's ksize() fa...
3252
  	page = virt_to_head_page(object);
294a80a8e   Vegard Nossum   SLUB's ksize() fa...
3253

76994412f   Pekka Enberg   slub: ksize() abu...
3254
3255
  	if (unlikely(!PageSlab(page))) {
  		WARN_ON(!PageCompound(page));
294a80a8e   Vegard Nossum   SLUB's ksize() fa...
3256
  		return PAGE_SIZE << compound_order(page);
76994412f   Pekka Enberg   slub: ksize() abu...
3257
  	}
81819f0fc   Christoph Lameter   SLUB core
3258

b3d41885d   Eric Dumazet   slub: fix kmemche...
3259
  	return slab_ksize(page->slab);
81819f0fc   Christoph Lameter   SLUB core
3260
  }
b1aabecd5   Kirill A. Shutemov   mm: Export symbol...
3261
  EXPORT_SYMBOL(ksize);
81819f0fc   Christoph Lameter   SLUB core
3262

d18a90dd8   Ben Greear   slub: Add method ...
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
  #ifdef CONFIG_SLUB_DEBUG
  bool verify_mem_not_deleted(const void *x)
  {
  	struct page *page;
  	void *object = (void *)x;
  	unsigned long flags;
  	bool rv;
  
  	if (unlikely(ZERO_OR_NULL_PTR(x)))
  		return false;
  
  	local_irq_save(flags);
  
  	page = virt_to_head_page(x);
  	if (unlikely(!PageSlab(page))) {
  		/* maybe it was from stack? */
  		rv = true;
  		goto out_unlock;
  	}
  
  	slab_lock(page);
  	if (on_freelist(page->slab, page, object)) {
  		object_err(page->slab, page, object, "Object is on free-list");
  		rv = false;
  	} else {
  		rv = true;
  	}
  	slab_unlock(page);
  
  out_unlock:
  	local_irq_restore(flags);
  	return rv;
  }
  EXPORT_SYMBOL(verify_mem_not_deleted);
  #endif
81819f0fc   Christoph Lameter   SLUB core
3298
3299
  void kfree(const void *x)
  {
81819f0fc   Christoph Lameter   SLUB core
3300
  	struct page *page;
5bb983b0c   Christoph Lameter   SLUB: Deal with a...
3301
  	void *object = (void *)x;
81819f0fc   Christoph Lameter   SLUB core
3302

2121db74b   Pekka Enberg   kmemtrace: trace ...
3303
  	trace_kfree(_RET_IP_, x);
2408c5503   Satyam Sharma   {slub, slob}: use...
3304
  	if (unlikely(ZERO_OR_NULL_PTR(x)))
81819f0fc   Christoph Lameter   SLUB core
3305
  		return;
b49af68ff   Christoph Lameter   Add virt_to_head_...
3306
  	page = virt_to_head_page(x);
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3307
  	if (unlikely(!PageSlab(page))) {
0937502af   Christoph Lameter   slub: Add check f...
3308
  		BUG_ON(!PageCompound(page));
e4f7c0b44   Catalin Marinas   kmemleak: Trace t...
3309
  		kmemleak_free(x);
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3310
3311
3312
  		put_page(page);
  		return;
  	}
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
3313
  	slab_free(page->slab, page, object, _RET_IP_);
81819f0fc   Christoph Lameter   SLUB core
3314
3315
  }
  EXPORT_SYMBOL(kfree);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3316
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
3317
3318
3319
3320
3321
3322
3323
3324
   * kmem_cache_shrink removes empty slabs from the partial lists and sorts
   * the remaining slabs by the number of items in use. The slabs with the
   * most items in use come first. New allocations will then fill those up
   * and thus they can be removed from the partial lists.
   *
   * The slabs with the least items are placed last. This results in them
   * being allocated from last increasing the chance that the last objects
   * are freed in them.
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3325
3326
3327
3328
3329
3330
3331
3332
   */
  int kmem_cache_shrink(struct kmem_cache *s)
  {
  	int node;
  	int i;
  	struct kmem_cache_node *n;
  	struct page *page;
  	struct page *t;
205ab99dd   Christoph Lameter   slub: Update stat...
3333
  	int objects = oo_objects(s->max);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3334
  	struct list_head *slabs_by_inuse =
834f3d119   Christoph Lameter   slub: Add kmem_ca...
3335
  		kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3336
3337
3338
3339
3340
3341
  	unsigned long flags;
  
  	if (!slabs_by_inuse)
  		return -ENOMEM;
  
  	flush_all(s);
f64dc58c5   Christoph Lameter   Memoryless nodes:...
3342
  	for_each_node_state(node, N_NORMAL_MEMORY) {
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3343
3344
3345
3346
  		n = get_node(s, node);
  
  		if (!n->nr_partial)
  			continue;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
3347
  		for (i = 0; i < objects; i++)
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3348
3349
3350
3351
3352
  			INIT_LIST_HEAD(slabs_by_inuse + i);
  
  		spin_lock_irqsave(&n->list_lock, flags);
  
  		/*
672bba3a4   Christoph Lameter   SLUB: update comm...
3353
  		 * Build lists indexed by the items in use in each slab.
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3354
  		 *
672bba3a4   Christoph Lameter   SLUB: update comm...
3355
3356
  		 * Note that concurrent frees may occur while we hold the
  		 * list_lock. page->inuse here is the upper limit.
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3357
3358
  		 */
  		list_for_each_entry_safe(page, t, &n->partial, lru) {
69cb8e6b7   Christoph Lameter   slub: free slabs ...
3359
3360
3361
  			list_move(&page->lru, slabs_by_inuse + page->inuse);
  			if (!page->inuse)
  				n->nr_partial--;
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3362
  		}
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3363
  		/*
672bba3a4   Christoph Lameter   SLUB: update comm...
3364
3365
  		 * Rebuild the partial list with the slabs filled up most
  		 * first and the least used slabs at the end.
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3366
  		 */
69cb8e6b7   Christoph Lameter   slub: free slabs ...
3367
  		for (i = objects - 1; i > 0; i--)
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3368
  			list_splice(slabs_by_inuse + i, n->partial.prev);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3369
  		spin_unlock_irqrestore(&n->list_lock, flags);
69cb8e6b7   Christoph Lameter   slub: free slabs ...
3370
3371
3372
3373
  
  		/* Release empty slabs */
  		list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
  			discard_slab(s, page);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3374
3375
3376
3377
3378
3379
  	}
  
  	kfree(slabs_by_inuse);
  	return 0;
  }
  EXPORT_SYMBOL(kmem_cache_shrink);
92a5bbc11   Pekka Enberg   SLUB: Fix memory ...
3380
  #if defined(CONFIG_MEMORY_HOTPLUG)
b9049e234   Yasunori Goto   memory hotplug: m...
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
  static int slab_mem_going_offline_callback(void *arg)
  {
  	struct kmem_cache *s;
  
  	down_read(&slub_lock);
  	list_for_each_entry(s, &slab_caches, list)
  		kmem_cache_shrink(s);
  	up_read(&slub_lock);
  
  	return 0;
  }
  
  static void slab_mem_offline_callback(void *arg)
  {
  	struct kmem_cache_node *n;
  	struct kmem_cache *s;
  	struct memory_notify *marg = arg;
  	int offline_node;
  
  	offline_node = marg->status_change_nid;
  
  	/*
  	 * If the node still has available memory. we need kmem_cache_node
  	 * for it yet.
  	 */
  	if (offline_node < 0)
  		return;
  
  	down_read(&slub_lock);
  	list_for_each_entry(s, &slab_caches, list) {
  		n = get_node(s, offline_node);
  		if (n) {
  			/*
  			 * if n->nr_slabs > 0, slabs still exist on the node
  			 * that is going down. We were unable to free them,
c9404c9c3   Adam Buchbinder   Fix misspelling o...
3416
  			 * and offline_pages() function shouldn't call this
b9049e234   Yasunori Goto   memory hotplug: m...
3417
3418
  			 * callback. So, we must fail.
  			 */
0f389ec63   Christoph Lameter   slub: No need for...
3419
  			BUG_ON(slabs_node(s, offline_node));
b9049e234   Yasunori Goto   memory hotplug: m...
3420
3421
  
  			s->node[offline_node] = NULL;
8de66a0c0   Christoph Lameter   slub: Fix up miss...
3422
  			kmem_cache_free(kmem_cache_node, n);
b9049e234   Yasunori Goto   memory hotplug: m...
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
  		}
  	}
  	up_read(&slub_lock);
  }
  
  static int slab_mem_going_online_callback(void *arg)
  {
  	struct kmem_cache_node *n;
  	struct kmem_cache *s;
  	struct memory_notify *marg = arg;
  	int nid = marg->status_change_nid;
  	int ret = 0;
  
  	/*
  	 * If the node's memory is already available, then kmem_cache_node is
  	 * already created. Nothing to do.
  	 */
  	if (nid < 0)
  		return 0;
  
  	/*
0121c619d   Christoph Lameter   slub: Whitespace ...
3444
  	 * We are bringing a node online. No memory is available yet. We must
b9049e234   Yasunori Goto   memory hotplug: m...
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
  	 * allocate a kmem_cache_node structure in order to bring the node
  	 * online.
  	 */
  	down_read(&slub_lock);
  	list_for_each_entry(s, &slab_caches, list) {
  		/*
  		 * XXX: kmem_cache_alloc_node will fallback to other nodes
  		 *      since memory is not yet available from the node that
  		 *      is brought up.
  		 */
8de66a0c0   Christoph Lameter   slub: Fix up miss...
3455
  		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
b9049e234   Yasunori Goto   memory hotplug: m...
3456
3457
3458
3459
  		if (!n) {
  			ret = -ENOMEM;
  			goto out;
  		}
5595cffc8   Pekka Enberg   SLUB: dynamic per...
3460
  		init_kmem_cache_node(n, s);
b9049e234   Yasunori Goto   memory hotplug: m...
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
  		s->node[nid] = n;
  	}
  out:
  	up_read(&slub_lock);
  	return ret;
  }
  
  static int slab_memory_callback(struct notifier_block *self,
  				unsigned long action, void *arg)
  {
  	int ret = 0;
  
  	switch (action) {
  	case MEM_GOING_ONLINE:
  		ret = slab_mem_going_online_callback(arg);
  		break;
  	case MEM_GOING_OFFLINE:
  		ret = slab_mem_going_offline_callback(arg);
  		break;
  	case MEM_OFFLINE:
  	case MEM_CANCEL_ONLINE:
  		slab_mem_offline_callback(arg);
  		break;
  	case MEM_ONLINE:
  	case MEM_CANCEL_OFFLINE:
  		break;
  	}
dc19f9db3   KAMEZAWA Hiroyuki   memcg: memory hot...
3488
3489
3490
3491
  	if (ret)
  		ret = notifier_from_errno(ret);
  	else
  		ret = NOTIFY_OK;
b9049e234   Yasunori Goto   memory hotplug: m...
3492
3493
3494
3495
  	return ret;
  }
  
  #endif /* CONFIG_MEMORY_HOTPLUG */
81819f0fc   Christoph Lameter   SLUB core
3496
3497
3498
  /********************************************************************
   *			Basic setup of slabs
   *******************************************************************/
51df11428   Christoph Lameter   slub: Dynamically...
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
  /*
   * Used for early kmem_cache structures that were allocated using
   * the page allocator
   */
  
  static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
  {
  	int node;
  
  	list_add(&s->list, &slab_caches);
  	s->refcount = -1;
  
  	for_each_node_state(node, N_NORMAL_MEMORY) {
  		struct kmem_cache_node *n = get_node(s, node);
  		struct page *p;
  
  		if (n) {
  			list_for_each_entry(p, &n->partial, lru)
  				p->slab = s;
607bf324a   Li Zefan   slub: Fix a typo ...
3518
  #ifdef CONFIG_SLUB_DEBUG
51df11428   Christoph Lameter   slub: Dynamically...
3519
3520
3521
3522
3523
3524
  			list_for_each_entry(p, &n->full, lru)
  				p->slab = s;
  #endif
  		}
  	}
  }
81819f0fc   Christoph Lameter   SLUB core
3525
3526
3527
  void __init kmem_cache_init(void)
  {
  	int i;
4b356be01   Christoph Lameter   SLUB: minimum ali...
3528
  	int caches = 0;
51df11428   Christoph Lameter   slub: Dynamically...
3529
3530
  	struct kmem_cache *temp_kmem_cache;
  	int order;
51df11428   Christoph Lameter   slub: Dynamically...
3531
3532
  	struct kmem_cache *temp_kmem_cache_node;
  	unsigned long kmalloc_size;
fc8d8620d   Stanislaw Gruszka   slub: min order w...
3533
3534
  	if (debug_guardpage_minorder())
  		slub_max_order = 0;
51df11428   Christoph Lameter   slub: Dynamically...
3535
3536
3537
3538
3539
3540
3541
  	kmem_size = offsetof(struct kmem_cache, node) +
  				nr_node_ids * sizeof(struct kmem_cache_node *);
  
  	/* Allocate two kmem_caches from the page allocator */
  	kmalloc_size = ALIGN(kmem_size, cache_line_size());
  	order = get_order(2 * kmalloc_size);
  	kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order);
81819f0fc   Christoph Lameter   SLUB core
3542
3543
  	/*
  	 * Must first have the slab cache available for the allocations of the
672bba3a4   Christoph Lameter   SLUB: update comm...
3544
  	 * struct kmem_cache_node's. There is special bootstrap code in
81819f0fc   Christoph Lameter   SLUB core
3545
3546
  	 * kmem_cache_open for slab_state == DOWN.
  	 */
51df11428   Christoph Lameter   slub: Dynamically...
3547
3548
3549
3550
3551
  	kmem_cache_node = (void *)kmem_cache + kmalloc_size;
  
  	kmem_cache_open(kmem_cache_node, "kmem_cache_node",
  		sizeof(struct kmem_cache_node),
  		0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
b9049e234   Yasunori Goto   memory hotplug: m...
3552

0c40ba4fd   Nadia Derbey   ipc: define the s...
3553
  	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
81819f0fc   Christoph Lameter   SLUB core
3554
3555
3556
  
  	/* Able to allocate the per node structures */
  	slab_state = PARTIAL;
51df11428   Christoph Lameter   slub: Dynamically...
3557
3558
3559
3560
3561
  	temp_kmem_cache = kmem_cache;
  	kmem_cache_open(kmem_cache, "kmem_cache", kmem_size,
  		0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
  	kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
  	memcpy(kmem_cache, temp_kmem_cache, kmem_size);
81819f0fc   Christoph Lameter   SLUB core
3562

51df11428   Christoph Lameter   slub: Dynamically...
3563
3564
3565
3566
3567
3568
  	/*
  	 * Allocate kmem_cache_node properly from the kmem_cache slab.
  	 * kmem_cache_node is separately allocated so no need to
  	 * update any list pointers.
  	 */
  	temp_kmem_cache_node = kmem_cache_node;
81819f0fc   Christoph Lameter   SLUB core
3569

51df11428   Christoph Lameter   slub: Dynamically...
3570
3571
3572
3573
3574
3575
  	kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
  	memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size);
  
  	kmem_cache_bootstrap_fixup(kmem_cache_node);
  
  	caches++;
51df11428   Christoph Lameter   slub: Dynamically...
3576
3577
3578
3579
3580
3581
  	kmem_cache_bootstrap_fixup(kmem_cache);
  	caches++;
  	/* Free temporary boot structure */
  	free_pages((unsigned long)temp_kmem_cache, order);
  
  	/* Now we can use the kmem_cache to allocate kmalloc slabs */
f1b263393   Christoph Lameter   SLUB: faster more...
3582
3583
3584
3585
  
  	/*
  	 * Patch up the size_index table if we have strange large alignment
  	 * requirements for the kmalloc array. This is only the case for
6446faa2f   Christoph Lameter   slub: Fix up comm...
3586
  	 * MIPS it seems. The standard arches will not generate any code here.
f1b263393   Christoph Lameter   SLUB: faster more...
3587
3588
3589
3590
3591
3592
3593
3594
3595
  	 *
  	 * Largest permitted alignment is 256 bytes due to the way we
  	 * handle the index determination for the smaller caches.
  	 *
  	 * Make sure that nothing crazy happens if someone starts tinkering
  	 * around with ARCH_KMALLOC_MINALIGN
  	 */
  	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
  		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
3596
3597
3598
3599
3600
3601
  	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
  		int elem = size_index_elem(i);
  		if (elem >= ARRAY_SIZE(size_index))
  			break;
  		size_index[elem] = KMALLOC_SHIFT_LOW;
  	}
f1b263393   Christoph Lameter   SLUB: faster more...
3602

acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
3603
3604
3605
3606
3607
3608
3609
3610
  	if (KMALLOC_MIN_SIZE == 64) {
  		/*
  		 * The 96 byte size cache is not used if the alignment
  		 * is 64 byte.
  		 */
  		for (i = 64 + 8; i <= 96; i += 8)
  			size_index[size_index_elem(i)] = 7;
  	} else if (KMALLOC_MIN_SIZE == 128) {
41d54d3bf   Christoph Lameter   slub: Do not use ...
3611
3612
3613
3614
3615
3616
  		/*
  		 * The 192 byte sized cache is not used if the alignment
  		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
  		 * instead.
  		 */
  		for (i = 128 + 8; i <= 192; i += 8)
acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
3617
  			size_index[size_index_elem(i)] = 8;
41d54d3bf   Christoph Lameter   slub: Do not use ...
3618
  	}
51df11428   Christoph Lameter   slub: Dynamically...
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
  	/* Caches that are not of the two-to-the-power-of size */
  	if (KMALLOC_MIN_SIZE <= 32) {
  		kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
  		caches++;
  	}
  
  	if (KMALLOC_MIN_SIZE <= 64) {
  		kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
  		caches++;
  	}
  
  	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
  		kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
  		caches++;
  	}
81819f0fc   Christoph Lameter   SLUB core
3634
3635
3636
  	slab_state = UP;
  
  	/* Provide the correct kmalloc names now that the caches are up */
84c1cf624   Pekka Enberg   SLUB: Fix merged ...
3637
3638
3639
3640
3641
3642
3643
3644
3645
  	if (KMALLOC_MIN_SIZE <= 32) {
  		kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT);
  		BUG_ON(!kmalloc_caches[1]->name);
  	}
  
  	if (KMALLOC_MIN_SIZE <= 64) {
  		kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT);
  		BUG_ON(!kmalloc_caches[2]->name);
  	}
d7278bd7d   Christoph Lameter   slub: Check kaspr...
3646
3647
3648
3649
  	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
  		char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
  
  		BUG_ON(!s);
51df11428   Christoph Lameter   slub: Dynamically...
3650
  		kmalloc_caches[i]->name = s;
d7278bd7d   Christoph Lameter   slub: Check kaspr...
3651
  	}
81819f0fc   Christoph Lameter   SLUB core
3652
3653
3654
  
  #ifdef CONFIG_SMP
  	register_cpu_notifier(&slab_notifier);
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
3655
  #endif
81819f0fc   Christoph Lameter   SLUB core
3656

55136592f   Christoph Lameter   slub: Remove dyna...
3657
  #ifdef CONFIG_ZONE_DMA
51df11428   Christoph Lameter   slub: Dynamically...
3658
3659
  	for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
  		struct kmem_cache *s = kmalloc_caches[i];
55136592f   Christoph Lameter   slub: Remove dyna...
3660

51df11428   Christoph Lameter   slub: Dynamically...
3661
  		if (s && s->size) {
55136592f   Christoph Lameter   slub: Remove dyna...
3662
3663
3664
3665
  			char *name = kasprintf(GFP_NOWAIT,
  				 "dma-kmalloc-%d", s->objsize);
  
  			BUG_ON(!name);
51df11428   Christoph Lameter   slub: Dynamically...
3666
3667
  			kmalloc_dma_caches[i] = create_kmalloc_cache(name,
  				s->objsize, SLAB_CACHE_DMA);
55136592f   Christoph Lameter   slub: Remove dyna...
3668
3669
3670
  		}
  	}
  #endif
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
3671
3672
  	printk(KERN_INFO
  		"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
4b356be01   Christoph Lameter   SLUB: minimum ali...
3673
3674
3675
  		" CPUs=%d, Nodes=%d
  ",
  		caches, cache_line_size(),
81819f0fc   Christoph Lameter   SLUB core
3676
3677
3678
  		slub_min_order, slub_max_order, slub_min_objects,
  		nr_cpu_ids, nr_node_ids);
  }
7e85ee0c1   Pekka Enberg   slab,slub: don't ...
3679
3680
  void __init kmem_cache_init_late(void)
  {
7e85ee0c1   Pekka Enberg   slab,slub: don't ...
3681
  }
81819f0fc   Christoph Lameter   SLUB core
3682
3683
3684
3685
3686
3687
3688
  /*
   * Find a mergeable slab cache
   */
  static int slab_unmergeable(struct kmem_cache *s)
  {
  	if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
  		return 1;
c59def9f2   Christoph Lameter   Slab allocators: ...
3689
  	if (s->ctor)
81819f0fc   Christoph Lameter   SLUB core
3690
  		return 1;
8ffa68755   Christoph Lameter   SLUB: Fix NUMA / ...
3691
3692
3693
3694
3695
  	/*
  	 * We may have set a slab to be unmergeable during bootstrap.
  	 */
  	if (s->refcount < 0)
  		return 1;
81819f0fc   Christoph Lameter   SLUB core
3696
3697
3698
3699
  	return 0;
  }
  
  static struct kmem_cache *find_mergeable(size_t size,
ba0268a8b   Christoph Lameter   SLUB: accurately ...
3700
  		size_t align, unsigned long flags, const char *name,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
3701
  		void (*ctor)(void *))
81819f0fc   Christoph Lameter   SLUB core
3702
  {
5b95a4acf   Christoph Lameter   SLUB: use list_fo...
3703
  	struct kmem_cache *s;
81819f0fc   Christoph Lameter   SLUB core
3704
3705
3706
  
  	if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
  		return NULL;
c59def9f2   Christoph Lameter   Slab allocators: ...
3707
  	if (ctor)
81819f0fc   Christoph Lameter   SLUB core
3708
3709
3710
3711
3712
  		return NULL;
  
  	size = ALIGN(size, sizeof(void *));
  	align = calculate_alignment(flags, align, size);
  	size = ALIGN(size, align);
ba0268a8b   Christoph Lameter   SLUB: accurately ...
3713
  	flags = kmem_cache_flags(size, flags, name, NULL);
81819f0fc   Christoph Lameter   SLUB core
3714

5b95a4acf   Christoph Lameter   SLUB: use list_fo...
3715
  	list_for_each_entry(s, &slab_caches, list) {
81819f0fc   Christoph Lameter   SLUB core
3716
3717
3718
3719
3720
  		if (slab_unmergeable(s))
  			continue;
  
  		if (size > s->size)
  			continue;
ba0268a8b   Christoph Lameter   SLUB: accurately ...
3721
  		if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
81819f0fc   Christoph Lameter   SLUB core
3722
3723
3724
3725
3726
  				continue;
  		/*
  		 * Check if alignment is compatible.
  		 * Courtesy of Adrian Drzewiecki
  		 */
064287807   Pekka Enberg   SLUB: Fix coding ...
3727
  		if ((s->size & ~(align - 1)) != s->size)
81819f0fc   Christoph Lameter   SLUB core
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
  			continue;
  
  		if (s->size - size >= sizeof(void *))
  			continue;
  
  		return s;
  	}
  	return NULL;
  }
  
  struct kmem_cache *kmem_cache_create(const char *name, size_t size,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
3739
  		size_t align, unsigned long flags, void (*ctor)(void *))
81819f0fc   Christoph Lameter   SLUB core
3740
3741
  {
  	struct kmem_cache *s;
84c1cf624   Pekka Enberg   SLUB: Fix merged ...
3742
  	char *n;
81819f0fc   Christoph Lameter   SLUB core
3743

fe1ff49d0   Benjamin Herrenschmidt   mm: kmem_cache_cr...
3744
3745
  	if (WARN_ON(!name))
  		return NULL;
81819f0fc   Christoph Lameter   SLUB core
3746
  	down_write(&slub_lock);
ba0268a8b   Christoph Lameter   SLUB: accurately ...
3747
  	s = find_mergeable(size, align, flags, name, ctor);
81819f0fc   Christoph Lameter   SLUB core
3748
3749
3750
3751
3752
3753
3754
3755
  	if (s) {
  		s->refcount++;
  		/*
  		 * Adjust the object sizes so that we clear
  		 * the complete object on kzalloc.
  		 */
  		s->objsize = max(s->objsize, (int)size);
  		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
6446faa2f   Christoph Lameter   slub: Fix up comm...
3756

7b8f3b66d   David Rientjes   slub: avoid leaki...
3757
  		if (sysfs_slab_alias(s, name)) {
7b8f3b66d   David Rientjes   slub: avoid leaki...
3758
  			s->refcount--;
81819f0fc   Christoph Lameter   SLUB core
3759
  			goto err;
7b8f3b66d   David Rientjes   slub: avoid leaki...
3760
  		}
2bce64858   Christoph Lameter   slub: Allow remov...
3761
  		up_write(&slub_lock);
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
3762
3763
  		return s;
  	}
6446faa2f   Christoph Lameter   slub: Fix up comm...
3764

84c1cf624   Pekka Enberg   SLUB: Fix merged ...
3765
3766
3767
  	n = kstrdup(name, GFP_KERNEL);
  	if (!n)
  		goto err;
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
3768
3769
  	s = kmalloc(kmem_size, GFP_KERNEL);
  	if (s) {
84c1cf624   Pekka Enberg   SLUB: Fix merged ...
3770
  		if (kmem_cache_open(s, n,
c59def9f2   Christoph Lameter   Slab allocators: ...
3771
  				size, align, flags, ctor)) {
81819f0fc   Christoph Lameter   SLUB core
3772
  			list_add(&s->list, &slab_caches);
7b8f3b66d   David Rientjes   slub: avoid leaki...
3773
  			if (sysfs_slab_add(s)) {
7b8f3b66d   David Rientjes   slub: avoid leaki...
3774
  				list_del(&s->list);
84c1cf624   Pekka Enberg   SLUB: Fix merged ...
3775
  				kfree(n);
7b8f3b66d   David Rientjes   slub: avoid leaki...
3776
  				kfree(s);
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
3777
  				goto err;
7b8f3b66d   David Rientjes   slub: avoid leaki...
3778
  			}
2bce64858   Christoph Lameter   slub: Allow remov...
3779
  			up_write(&slub_lock);
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
3780
3781
  			return s;
  		}
84c1cf624   Pekka Enberg   SLUB: Fix merged ...
3782
  		kfree(n);
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
3783
  		kfree(s);
81819f0fc   Christoph Lameter   SLUB core
3784
  	}
68cee4f11   Pavel Emelyanov   slub: Fix slub_lo...
3785
  err:
81819f0fc   Christoph Lameter   SLUB core
3786
  	up_write(&slub_lock);
81819f0fc   Christoph Lameter   SLUB core
3787

81819f0fc   Christoph Lameter   SLUB core
3788
3789
3790
3791
3792
3793
3794
3795
  	if (flags & SLAB_PANIC)
  		panic("Cannot create slabcache %s
  ", name);
  	else
  		s = NULL;
  	return s;
  }
  EXPORT_SYMBOL(kmem_cache_create);
81819f0fc   Christoph Lameter   SLUB core
3796
  #ifdef CONFIG_SMP
27390bc33   Christoph Lameter   SLUB: fix locking...
3797
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
3798
3799
   * Use the cpu notifier to insure that the cpu slabs are flushed when
   * necessary.
81819f0fc   Christoph Lameter   SLUB core
3800
3801
3802
3803
3804
   */
  static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
  		unsigned long action, void *hcpu)
  {
  	long cpu = (long)hcpu;
5b95a4acf   Christoph Lameter   SLUB: use list_fo...
3805
3806
  	struct kmem_cache *s;
  	unsigned long flags;
81819f0fc   Christoph Lameter   SLUB core
3807
3808
3809
  
  	switch (action) {
  	case CPU_UP_CANCELED:
8bb784428   Rafael J. Wysocki   Add suspend-relat...
3810
  	case CPU_UP_CANCELED_FROZEN:
81819f0fc   Christoph Lameter   SLUB core
3811
  	case CPU_DEAD:
8bb784428   Rafael J. Wysocki   Add suspend-relat...
3812
  	case CPU_DEAD_FROZEN:
5b95a4acf   Christoph Lameter   SLUB: use list_fo...
3813
3814
3815
3816
3817
3818
3819
  		down_read(&slub_lock);
  		list_for_each_entry(s, &slab_caches, list) {
  			local_irq_save(flags);
  			__flush_cpu_slab(s, cpu);
  			local_irq_restore(flags);
  		}
  		up_read(&slub_lock);
81819f0fc   Christoph Lameter   SLUB core
3820
3821
3822
3823
3824
3825
  		break;
  	default:
  		break;
  	}
  	return NOTIFY_OK;
  }
064287807   Pekka Enberg   SLUB: Fix coding ...
3826
  static struct notifier_block __cpuinitdata slab_notifier = {
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
3827
  	.notifier_call = slab_cpuup_callback
064287807   Pekka Enberg   SLUB: Fix coding ...
3828
  };
81819f0fc   Christoph Lameter   SLUB core
3829
3830
  
  #endif
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
3831
  void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
81819f0fc   Christoph Lameter   SLUB core
3832
  {
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3833
  	struct kmem_cache *s;
94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3834
  	void *ret;
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3835

ffadd4d0f   Christoph Lameter   SLUB: Introduce a...
3836
  	if (unlikely(size > SLUB_MAX_SIZE))
eada35efc   Pekka Enberg   slub: kmalloc pag...
3837
  		return kmalloc_large(size, gfpflags);
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3838
  	s = get_slab(size, gfpflags);
81819f0fc   Christoph Lameter   SLUB core
3839

2408c5503   Satyam Sharma   {slub, slob}: use...
3840
  	if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f9132   Christoph Lameter   Slab allocators: ...
3841
  		return s;
81819f0fc   Christoph Lameter   SLUB core
3842

2154a3363   Christoph Lameter   slub: Use a const...
3843
  	ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller);
94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3844

25985edce   Lucas De Marchi   Fix common misspe...
3845
  	/* Honor the call site pointer we received. */
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
3846
  	trace_kmalloc(caller, ret, size, s->size, gfpflags);
94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3847
3848
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
3849
  }
5d1f57e4d   Namhyung Kim   slub: Move NUMA-r...
3850
  #ifdef CONFIG_NUMA
81819f0fc   Christoph Lameter   SLUB core
3851
  void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
3852
  					int node, unsigned long caller)
81819f0fc   Christoph Lameter   SLUB core
3853
  {
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3854
  	struct kmem_cache *s;
94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3855
  	void *ret;
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3856

d3e14aa33   Xiaotian Feng   slub: __kmalloc_n...
3857
3858
3859
3860
3861
3862
3863
3864
3865
  	if (unlikely(size > SLUB_MAX_SIZE)) {
  		ret = kmalloc_large_node(size, gfpflags, node);
  
  		trace_kmalloc_node(caller, ret,
  				   size, PAGE_SIZE << get_order(size),
  				   gfpflags, node);
  
  		return ret;
  	}
eada35efc   Pekka Enberg   slub: kmalloc pag...
3866

aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3867
  	s = get_slab(size, gfpflags);
81819f0fc   Christoph Lameter   SLUB core
3868

2408c5503   Satyam Sharma   {slub, slob}: use...
3869
  	if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f9132   Christoph Lameter   Slab allocators: ...
3870
  		return s;
81819f0fc   Christoph Lameter   SLUB core
3871

94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3872
  	ret = slab_alloc(s, gfpflags, node, caller);
25985edce   Lucas De Marchi   Fix common misspe...
3873
  	/* Honor the call site pointer we received. */
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
3874
  	trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3875
3876
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
3877
  }
5d1f57e4d   Namhyung Kim   slub: Move NUMA-r...
3878
  #endif
81819f0fc   Christoph Lameter   SLUB core
3879

ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
3880
  #ifdef CONFIG_SYSFS
205ab99dd   Christoph Lameter   slub: Update stat...
3881
3882
3883
3884
3885
3886
3887
3888
3889
  static int count_inuse(struct page *page)
  {
  	return page->inuse;
  }
  
  static int count_total(struct page *page)
  {
  	return page->objects;
  }
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
3890
  #endif
205ab99dd   Christoph Lameter   slub: Update stat...
3891

ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
3892
  #ifdef CONFIG_SLUB_DEBUG
434e245dd   Christoph Lameter   SLUB: Do not allo...
3893
3894
  static int validate_slab(struct kmem_cache *s, struct page *page,
  						unsigned long *map)
53e15af03   Christoph Lameter   slub: validation ...
3895
3896
  {
  	void *p;
a973e9dd1   Christoph Lameter   Revert "unique en...
3897
  	void *addr = page_address(page);
53e15af03   Christoph Lameter   slub: validation ...
3898
3899
3900
3901
3902
3903
  
  	if (!check_slab(s, page) ||
  			!on_freelist(s, page, NULL))
  		return 0;
  
  	/* Now we know that a valid freelist exists */
39b264641   Christoph Lameter   slub: Store max n...
3904
  	bitmap_zero(map, page->objects);
53e15af03   Christoph Lameter   slub: validation ...
3905

5f80b13ae   Christoph Lameter   slub: get_map() f...
3906
3907
3908
3909
3910
  	get_map(s, page, map);
  	for_each_object(p, s, addr, page->objects) {
  		if (test_bit(slab_index(p, s, addr), map))
  			if (!check_object(s, page, p, SLUB_RED_INACTIVE))
  				return 0;
53e15af03   Christoph Lameter   slub: validation ...
3911
  	}
224a88be4   Christoph Lameter   slub: for_each_ob...
3912
  	for_each_object(p, s, addr, page->objects)
7656c72b5   Christoph Lameter   SLUB: add macros ...
3913
  		if (!test_bit(slab_index(p, s, addr), map))
37d57443d   Tero Roponen   slub: Fix a crash...
3914
  			if (!check_object(s, page, p, SLUB_RED_ACTIVE))
53e15af03   Christoph Lameter   slub: validation ...
3915
3916
3917
  				return 0;
  	return 1;
  }
434e245dd   Christoph Lameter   SLUB: Do not allo...
3918
3919
  static void validate_slab_slab(struct kmem_cache *s, struct page *page,
  						unsigned long *map)
53e15af03   Christoph Lameter   slub: validation ...
3920
  {
881db7fb0   Christoph Lameter   slub: Invert lock...
3921
3922
3923
  	slab_lock(page);
  	validate_slab(s, page, map);
  	slab_unlock(page);
53e15af03   Christoph Lameter   slub: validation ...
3924
  }
434e245dd   Christoph Lameter   SLUB: Do not allo...
3925
3926
  static int validate_slab_node(struct kmem_cache *s,
  		struct kmem_cache_node *n, unsigned long *map)
53e15af03   Christoph Lameter   slub: validation ...
3927
3928
3929
3930
3931
3932
3933
3934
  {
  	unsigned long count = 0;
  	struct page *page;
  	unsigned long flags;
  
  	spin_lock_irqsave(&n->list_lock, flags);
  
  	list_for_each_entry(page, &n->partial, lru) {
434e245dd   Christoph Lameter   SLUB: Do not allo...
3935
  		validate_slab_slab(s, page, map);
53e15af03   Christoph Lameter   slub: validation ...
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
  		count++;
  	}
  	if (count != n->nr_partial)
  		printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
  			"counter=%ld
  ", s->name, count, n->nr_partial);
  
  	if (!(s->flags & SLAB_STORE_USER))
  		goto out;
  
  	list_for_each_entry(page, &n->full, lru) {
434e245dd   Christoph Lameter   SLUB: Do not allo...
3947
  		validate_slab_slab(s, page, map);
53e15af03   Christoph Lameter   slub: validation ...
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
  		count++;
  	}
  	if (count != atomic_long_read(&n->nr_slabs))
  		printk(KERN_ERR "SLUB: %s %ld slabs counted but "
  			"counter=%ld
  ", s->name, count,
  			atomic_long_read(&n->nr_slabs));
  
  out:
  	spin_unlock_irqrestore(&n->list_lock, flags);
  	return count;
  }
434e245dd   Christoph Lameter   SLUB: Do not allo...
3960
  static long validate_slab_cache(struct kmem_cache *s)
53e15af03   Christoph Lameter   slub: validation ...
3961
3962
3963
  {
  	int node;
  	unsigned long count = 0;
205ab99dd   Christoph Lameter   slub: Update stat...
3964
  	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
434e245dd   Christoph Lameter   SLUB: Do not allo...
3965
3966
3967
3968
  				sizeof(unsigned long), GFP_KERNEL);
  
  	if (!map)
  		return -ENOMEM;
53e15af03   Christoph Lameter   slub: validation ...
3969
3970
  
  	flush_all(s);
f64dc58c5   Christoph Lameter   Memoryless nodes:...
3971
  	for_each_node_state(node, N_NORMAL_MEMORY) {
53e15af03   Christoph Lameter   slub: validation ...
3972
  		struct kmem_cache_node *n = get_node(s, node);
434e245dd   Christoph Lameter   SLUB: Do not allo...
3973
  		count += validate_slab_node(s, n, map);
53e15af03   Christoph Lameter   slub: validation ...
3974
  	}
434e245dd   Christoph Lameter   SLUB: Do not allo...
3975
  	kfree(map);
53e15af03   Christoph Lameter   slub: validation ...
3976
3977
  	return count;
  }
88a420e4e   Christoph Lameter   slub: add ability...
3978
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
3979
   * Generate lists of code addresses where slabcache objects are allocated
88a420e4e   Christoph Lameter   slub: add ability...
3980
3981
3982
3983
3984
   * and freed.
   */
  
  struct location {
  	unsigned long count;
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
3985
  	unsigned long addr;
45edfa580   Christoph Lameter   SLUB: include lif...
3986
3987
3988
3989
3990
  	long long sum_time;
  	long min_time;
  	long max_time;
  	long min_pid;
  	long max_pid;
174596a0b   Rusty Russell   cpumask: convert mm/
3991
  	DECLARE_BITMAP(cpus, NR_CPUS);
45edfa580   Christoph Lameter   SLUB: include lif...
3992
  	nodemask_t nodes;
88a420e4e   Christoph Lameter   slub: add ability...
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
  };
  
  struct loc_track {
  	unsigned long max;
  	unsigned long count;
  	struct location *loc;
  };
  
  static void free_loc_track(struct loc_track *t)
  {
  	if (t->max)
  		free_pages((unsigned long)t->loc,
  			get_order(sizeof(struct location) * t->max));
  }
68dff6a9a   Christoph Lameter   SLUB slab validat...
4007
  static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
88a420e4e   Christoph Lameter   slub: add ability...
4008
4009
4010
  {
  	struct location *l;
  	int order;
88a420e4e   Christoph Lameter   slub: add ability...
4011
  	order = get_order(sizeof(struct location) * max);
68dff6a9a   Christoph Lameter   SLUB slab validat...
4012
  	l = (void *)__get_free_pages(flags, order);
88a420e4e   Christoph Lameter   slub: add ability...
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
  	if (!l)
  		return 0;
  
  	if (t->count) {
  		memcpy(l, t->loc, sizeof(struct location) * t->count);
  		free_loc_track(t);
  	}
  	t->max = max;
  	t->loc = l;
  	return 1;
  }
  
  static int add_location(struct loc_track *t, struct kmem_cache *s,
45edfa580   Christoph Lameter   SLUB: include lif...
4026
  				const struct track *track)
88a420e4e   Christoph Lameter   slub: add ability...
4027
4028
4029
  {
  	long start, end, pos;
  	struct location *l;
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
4030
  	unsigned long caddr;
45edfa580   Christoph Lameter   SLUB: include lif...
4031
  	unsigned long age = jiffies - track->when;
88a420e4e   Christoph Lameter   slub: add ability...
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
  
  	start = -1;
  	end = t->count;
  
  	for ( ; ; ) {
  		pos = start + (end - start + 1) / 2;
  
  		/*
  		 * There is nothing at "end". If we end up there
  		 * we need to add something to before end.
  		 */
  		if (pos == end)
  			break;
  
  		caddr = t->loc[pos].addr;
45edfa580   Christoph Lameter   SLUB: include lif...
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
  		if (track->addr == caddr) {
  
  			l = &t->loc[pos];
  			l->count++;
  			if (track->when) {
  				l->sum_time += age;
  				if (age < l->min_time)
  					l->min_time = age;
  				if (age > l->max_time)
  					l->max_time = age;
  
  				if (track->pid < l->min_pid)
  					l->min_pid = track->pid;
  				if (track->pid > l->max_pid)
  					l->max_pid = track->pid;
174596a0b   Rusty Russell   cpumask: convert mm/
4062
4063
  				cpumask_set_cpu(track->cpu,
  						to_cpumask(l->cpus));
45edfa580   Christoph Lameter   SLUB: include lif...
4064
4065
  			}
  			node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4e   Christoph Lameter   slub: add ability...
4066
4067
  			return 1;
  		}
45edfa580   Christoph Lameter   SLUB: include lif...
4068
  		if (track->addr < caddr)
88a420e4e   Christoph Lameter   slub: add ability...
4069
4070
4071
4072
4073
4074
  			end = pos;
  		else
  			start = pos;
  	}
  
  	/*
672bba3a4   Christoph Lameter   SLUB: update comm...
4075
  	 * Not found. Insert new tracking element.
88a420e4e   Christoph Lameter   slub: add ability...
4076
  	 */
68dff6a9a   Christoph Lameter   SLUB slab validat...
4077
  	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
88a420e4e   Christoph Lameter   slub: add ability...
4078
4079
4080
4081
4082
4083
4084
4085
  		return 0;
  
  	l = t->loc + pos;
  	if (pos < t->count)
  		memmove(l + 1, l,
  			(t->count - pos) * sizeof(struct location));
  	t->count++;
  	l->count = 1;
45edfa580   Christoph Lameter   SLUB: include lif...
4086
4087
4088
4089
4090
4091
  	l->addr = track->addr;
  	l->sum_time = age;
  	l->min_time = age;
  	l->max_time = age;
  	l->min_pid = track->pid;
  	l->max_pid = track->pid;
174596a0b   Rusty Russell   cpumask: convert mm/
4092
4093
  	cpumask_clear(to_cpumask(l->cpus));
  	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
45edfa580   Christoph Lameter   SLUB: include lif...
4094
4095
  	nodes_clear(l->nodes);
  	node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4e   Christoph Lameter   slub: add ability...
4096
4097
4098
4099
  	return 1;
  }
  
  static void process_slab(struct loc_track *t, struct kmem_cache *s,
bbd7d57bf   Eric Dumazet   slub: Potential s...
4100
  		struct page *page, enum track_item alloc,
a5dd5c117   Namhyung Kim   slub: Fix signedn...
4101
  		unsigned long *map)
88a420e4e   Christoph Lameter   slub: add ability...
4102
  {
a973e9dd1   Christoph Lameter   Revert "unique en...
4103
  	void *addr = page_address(page);
88a420e4e   Christoph Lameter   slub: add ability...
4104
  	void *p;
39b264641   Christoph Lameter   slub: Store max n...
4105
  	bitmap_zero(map, page->objects);
5f80b13ae   Christoph Lameter   slub: get_map() f...
4106
  	get_map(s, page, map);
88a420e4e   Christoph Lameter   slub: add ability...
4107

224a88be4   Christoph Lameter   slub: for_each_ob...
4108
  	for_each_object(p, s, addr, page->objects)
45edfa580   Christoph Lameter   SLUB: include lif...
4109
4110
  		if (!test_bit(slab_index(p, s, addr), map))
  			add_location(t, s, get_track(s, p, alloc));
88a420e4e   Christoph Lameter   slub: add ability...
4111
4112
4113
4114
4115
  }
  
  static int list_locations(struct kmem_cache *s, char *buf,
  					enum track_item alloc)
  {
e374d4835   Harvey Harrison   slub: fix shadowe...
4116
  	int len = 0;
88a420e4e   Christoph Lameter   slub: add ability...
4117
  	unsigned long i;
68dff6a9a   Christoph Lameter   SLUB slab validat...
4118
  	struct loc_track t = { 0, 0, NULL };
88a420e4e   Christoph Lameter   slub: add ability...
4119
  	int node;
bbd7d57bf   Eric Dumazet   slub: Potential s...
4120
4121
  	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
  				     sizeof(unsigned long), GFP_KERNEL);
88a420e4e   Christoph Lameter   slub: add ability...
4122

bbd7d57bf   Eric Dumazet   slub: Potential s...
4123
4124
4125
  	if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
  				     GFP_TEMPORARY)) {
  		kfree(map);
68dff6a9a   Christoph Lameter   SLUB slab validat...
4126
4127
  		return sprintf(buf, "Out of memory
  ");
bbd7d57bf   Eric Dumazet   slub: Potential s...
4128
  	}
88a420e4e   Christoph Lameter   slub: add ability...
4129
4130
  	/* Push back cpu slabs */
  	flush_all(s);
f64dc58c5   Christoph Lameter   Memoryless nodes:...
4131
  	for_each_node_state(node, N_NORMAL_MEMORY) {
88a420e4e   Christoph Lameter   slub: add ability...
4132
4133
4134
  		struct kmem_cache_node *n = get_node(s, node);
  		unsigned long flags;
  		struct page *page;
9e86943b6   Christoph Lameter   SLUB: use atomic_...
4135
  		if (!atomic_long_read(&n->nr_slabs))
88a420e4e   Christoph Lameter   slub: add ability...
4136
4137
4138
4139
  			continue;
  
  		spin_lock_irqsave(&n->list_lock, flags);
  		list_for_each_entry(page, &n->partial, lru)
bbd7d57bf   Eric Dumazet   slub: Potential s...
4140
  			process_slab(&t, s, page, alloc, map);
88a420e4e   Christoph Lameter   slub: add ability...
4141
  		list_for_each_entry(page, &n->full, lru)
bbd7d57bf   Eric Dumazet   slub: Potential s...
4142
  			process_slab(&t, s, page, alloc, map);
88a420e4e   Christoph Lameter   slub: add ability...
4143
4144
4145
4146
  		spin_unlock_irqrestore(&n->list_lock, flags);
  	}
  
  	for (i = 0; i < t.count; i++) {
45edfa580   Christoph Lameter   SLUB: include lif...
4147
  		struct location *l = &t.loc[i];
88a420e4e   Christoph Lameter   slub: add ability...
4148

9c2462472   Hugh Dickins   KSYM_SYMBOL_LEN f...
4149
  		if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
88a420e4e   Christoph Lameter   slub: add ability...
4150
  			break;
e374d4835   Harvey Harrison   slub: fix shadowe...
4151
  		len += sprintf(buf + len, "%7ld ", l->count);
45edfa580   Christoph Lameter   SLUB: include lif...
4152
4153
  
  		if (l->addr)
62c70bce8   Joe Perches   mm: convert sprin...
4154
  			len += sprintf(buf + len, "%pS", (void *)l->addr);
88a420e4e   Christoph Lameter   slub: add ability...
4155
  		else
e374d4835   Harvey Harrison   slub: fix shadowe...
4156
  			len += sprintf(buf + len, "<not-available>");
45edfa580   Christoph Lameter   SLUB: include lif...
4157
4158
  
  		if (l->sum_time != l->min_time) {
e374d4835   Harvey Harrison   slub: fix shadowe...
4159
  			len += sprintf(buf + len, " age=%ld/%ld/%ld",
f8bd2258e   Roman Zippel   remove div_long_l...
4160
4161
4162
  				l->min_time,
  				(long)div_u64(l->sum_time, l->count),
  				l->max_time);
45edfa580   Christoph Lameter   SLUB: include lif...
4163
  		} else
e374d4835   Harvey Harrison   slub: fix shadowe...
4164
  			len += sprintf(buf + len, " age=%ld",
45edfa580   Christoph Lameter   SLUB: include lif...
4165
4166
4167
  				l->min_time);
  
  		if (l->min_pid != l->max_pid)
e374d4835   Harvey Harrison   slub: fix shadowe...
4168
  			len += sprintf(buf + len, " pid=%ld-%ld",
45edfa580   Christoph Lameter   SLUB: include lif...
4169
4170
  				l->min_pid, l->max_pid);
  		else
e374d4835   Harvey Harrison   slub: fix shadowe...
4171
  			len += sprintf(buf + len, " pid=%ld",
45edfa580   Christoph Lameter   SLUB: include lif...
4172
  				l->min_pid);
174596a0b   Rusty Russell   cpumask: convert mm/
4173
4174
  		if (num_online_cpus() > 1 &&
  				!cpumask_empty(to_cpumask(l->cpus)) &&
e374d4835   Harvey Harrison   slub: fix shadowe...
4175
4176
4177
  				len < PAGE_SIZE - 60) {
  			len += sprintf(buf + len, " cpus=");
  			len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
174596a0b   Rusty Russell   cpumask: convert mm/
4178
  						 to_cpumask(l->cpus));
45edfa580   Christoph Lameter   SLUB: include lif...
4179
  		}
62bc62a87   Christoph Lameter   page allocator: u...
4180
  		if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
e374d4835   Harvey Harrison   slub: fix shadowe...
4181
4182
4183
  				len < PAGE_SIZE - 60) {
  			len += sprintf(buf + len, " nodes=");
  			len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
45edfa580   Christoph Lameter   SLUB: include lif...
4184
4185
  					l->nodes);
  		}
e374d4835   Harvey Harrison   slub: fix shadowe...
4186
4187
  		len += sprintf(buf + len, "
  ");
88a420e4e   Christoph Lameter   slub: add ability...
4188
4189
4190
  	}
  
  	free_loc_track(&t);
bbd7d57bf   Eric Dumazet   slub: Potential s...
4191
  	kfree(map);
88a420e4e   Christoph Lameter   slub: add ability...
4192
  	if (!t.count)
e374d4835   Harvey Harrison   slub: fix shadowe...
4193
4194
4195
  		len += sprintf(buf, "No data
  ");
  	return len;
88a420e4e   Christoph Lameter   slub: add ability...
4196
  }
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4197
  #endif
88a420e4e   Christoph Lameter   slub: add ability...
4198

a5a84755c   Christoph Lameter   slub: Move functi...
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
  #ifdef SLUB_RESILIENCY_TEST
  static void resiliency_test(void)
  {
  	u8 *p;
  
  	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10);
  
  	printk(KERN_ERR "SLUB resiliency testing
  ");
  	printk(KERN_ERR "-----------------------
  ");
  	printk(KERN_ERR "A. Corruption after allocation
  ");
  
  	p = kzalloc(16, GFP_KERNEL);
  	p[16] = 0x12;
  	printk(KERN_ERR "
  1. kmalloc-16: Clobber Redzone/next pointer"
  			" 0x12->0x%p
  
  ", p + 16);
  
  	validate_slab_cache(kmalloc_caches[4]);
  
  	/* Hmmm... The next two are dangerous */
  	p = kzalloc(32, GFP_KERNEL);
  	p[32 + sizeof(void *)] = 0x34;
  	printk(KERN_ERR "
  2. kmalloc-32: Clobber next pointer/next slab"
  			" 0x34 -> -0x%p
  ", p);
  	printk(KERN_ERR
  		"If allocated object is overwritten then not detectable
  
  ");
  
  	validate_slab_cache(kmalloc_caches[5]);
  	p = kzalloc(64, GFP_KERNEL);
  	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
  	*p = 0x56;
  	printk(KERN_ERR "
  3. kmalloc-64: corrupting random byte 0x56->0x%p
  ",
  									p);
  	printk(KERN_ERR
  		"If allocated object is overwritten then not detectable
  
  ");
  	validate_slab_cache(kmalloc_caches[6]);
  
  	printk(KERN_ERR "
  B. Corruption after free
  ");
  	p = kzalloc(128, GFP_KERNEL);
  	kfree(p);
  	*p = 0x78;
  	printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p
  
  ", p);
  	validate_slab_cache(kmalloc_caches[7]);
  
  	p = kzalloc(256, GFP_KERNEL);
  	kfree(p);
  	p[50] = 0x9a;
  	printk(KERN_ERR "
  2. kmalloc-256: Clobber 50th byte 0x9a->0x%p
  
  ",
  			p);
  	validate_slab_cache(kmalloc_caches[8]);
  
  	p = kzalloc(512, GFP_KERNEL);
  	kfree(p);
  	p[512] = 0xab;
  	printk(KERN_ERR "
  3. kmalloc-512: Clobber redzone 0xab->0x%p
  
  ", p);
  	validate_slab_cache(kmalloc_caches[9]);
  }
  #else
  #ifdef CONFIG_SYSFS
  static void resiliency_test(void) {};
  #endif
  #endif
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4284
  #ifdef CONFIG_SYSFS
81819f0fc   Christoph Lameter   SLUB core
4285
  enum slab_stat_type {
205ab99dd   Christoph Lameter   slub: Update stat...
4286
4287
4288
4289
4290
  	SL_ALL,			/* All slabs */
  	SL_PARTIAL,		/* Only partially allocated slabs */
  	SL_CPU,			/* Only slabs used for cpu caches */
  	SL_OBJECTS,		/* Determine allocated objects not slabs */
  	SL_TOTAL		/* Determine object capacity not slabs */
81819f0fc   Christoph Lameter   SLUB core
4291
  };
205ab99dd   Christoph Lameter   slub: Update stat...
4292
  #define SO_ALL		(1 << SL_ALL)
81819f0fc   Christoph Lameter   SLUB core
4293
4294
4295
  #define SO_PARTIAL	(1 << SL_PARTIAL)
  #define SO_CPU		(1 << SL_CPU)
  #define SO_OBJECTS	(1 << SL_OBJECTS)
205ab99dd   Christoph Lameter   slub: Update stat...
4296
  #define SO_TOTAL	(1 << SL_TOTAL)
81819f0fc   Christoph Lameter   SLUB core
4297

62e5c4b4d   Cyrill Gorcunov   slub: fix possibl...
4298
4299
  static ssize_t show_slab_objects(struct kmem_cache *s,
  			    char *buf, unsigned long flags)
81819f0fc   Christoph Lameter   SLUB core
4300
4301
  {
  	unsigned long total = 0;
81819f0fc   Christoph Lameter   SLUB core
4302
4303
4304
4305
4306
4307
  	int node;
  	int x;
  	unsigned long *nodes;
  	unsigned long *per_cpu;
  
  	nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
62e5c4b4d   Cyrill Gorcunov   slub: fix possibl...
4308
4309
  	if (!nodes)
  		return -ENOMEM;
81819f0fc   Christoph Lameter   SLUB core
4310
  	per_cpu = nodes + nr_node_ids;
205ab99dd   Christoph Lameter   slub: Update stat...
4311
4312
  	if (flags & SO_CPU) {
  		int cpu;
81819f0fc   Christoph Lameter   SLUB core
4313

205ab99dd   Christoph Lameter   slub: Update stat...
4314
  		for_each_possible_cpu(cpu) {
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
4315
  			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
bc6697d8a   Eric Dumazet   slub: avoid poten...
4316
  			int node = ACCESS_ONCE(c->node);
49e225858   Christoph Lameter   slub: per cpu cac...
4317
  			struct page *page;
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
4318

bc6697d8a   Eric Dumazet   slub: avoid poten...
4319
  			if (node < 0)
205ab99dd   Christoph Lameter   slub: Update stat...
4320
  				continue;
bc6697d8a   Eric Dumazet   slub: avoid poten...
4321
4322
4323
4324
  			page = ACCESS_ONCE(c->page);
  			if (page) {
  				if (flags & SO_TOTAL)
  					x = page->objects;
205ab99dd   Christoph Lameter   slub: Update stat...
4325
  				else if (flags & SO_OBJECTS)
bc6697d8a   Eric Dumazet   slub: avoid poten...
4326
  					x = page->inuse;
81819f0fc   Christoph Lameter   SLUB core
4327
4328
  				else
  					x = 1;
205ab99dd   Christoph Lameter   slub: Update stat...
4329

81819f0fc   Christoph Lameter   SLUB core
4330
  				total += x;
bc6697d8a   Eric Dumazet   slub: avoid poten...
4331
  				nodes[node] += x;
81819f0fc   Christoph Lameter   SLUB core
4332
  			}
49e225858   Christoph Lameter   slub: per cpu cac...
4333
4334
4335
4336
  			page = c->partial;
  
  			if (page) {
  				x = page->pobjects;
bc6697d8a   Eric Dumazet   slub: avoid poten...
4337
4338
  				total += x;
  				nodes[node] += x;
49e225858   Christoph Lameter   slub: per cpu cac...
4339
  			}
bc6697d8a   Eric Dumazet   slub: avoid poten...
4340
  			per_cpu[node]++;
81819f0fc   Christoph Lameter   SLUB core
4341
4342
  		}
  	}
04d94879c   Christoph Lameter   slub: Avoid use o...
4343
  	lock_memory_hotplug();
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4344
  #ifdef CONFIG_SLUB_DEBUG
205ab99dd   Christoph Lameter   slub: Update stat...
4345
4346
4347
4348
4349
4350
4351
4352
4353
  	if (flags & SO_ALL) {
  		for_each_node_state(node, N_NORMAL_MEMORY) {
  			struct kmem_cache_node *n = get_node(s, node);
  
  		if (flags & SO_TOTAL)
  			x = atomic_long_read(&n->total_objects);
  		else if (flags & SO_OBJECTS)
  			x = atomic_long_read(&n->total_objects) -
  				count_partial(n, count_free);
81819f0fc   Christoph Lameter   SLUB core
4354

81819f0fc   Christoph Lameter   SLUB core
4355
  			else
205ab99dd   Christoph Lameter   slub: Update stat...
4356
  				x = atomic_long_read(&n->nr_slabs);
81819f0fc   Christoph Lameter   SLUB core
4357
4358
4359
  			total += x;
  			nodes[node] += x;
  		}
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4360
4361
4362
  	} else
  #endif
  	if (flags & SO_PARTIAL) {
205ab99dd   Christoph Lameter   slub: Update stat...
4363
4364
  		for_each_node_state(node, N_NORMAL_MEMORY) {
  			struct kmem_cache_node *n = get_node(s, node);
81819f0fc   Christoph Lameter   SLUB core
4365

205ab99dd   Christoph Lameter   slub: Update stat...
4366
4367
4368
4369
  			if (flags & SO_TOTAL)
  				x = count_partial(n, count_total);
  			else if (flags & SO_OBJECTS)
  				x = count_partial(n, count_inuse);
81819f0fc   Christoph Lameter   SLUB core
4370
  			else
205ab99dd   Christoph Lameter   slub: Update stat...
4371
  				x = n->nr_partial;
81819f0fc   Christoph Lameter   SLUB core
4372
4373
4374
4375
  			total += x;
  			nodes[node] += x;
  		}
  	}
81819f0fc   Christoph Lameter   SLUB core
4376
4377
  	x = sprintf(buf, "%lu", total);
  #ifdef CONFIG_NUMA
f64dc58c5   Christoph Lameter   Memoryless nodes:...
4378
  	for_each_node_state(node, N_NORMAL_MEMORY)
81819f0fc   Christoph Lameter   SLUB core
4379
4380
4381
4382
  		if (nodes[node])
  			x += sprintf(buf + x, " N%d=%lu",
  					node, nodes[node]);
  #endif
04d94879c   Christoph Lameter   slub: Avoid use o...
4383
  	unlock_memory_hotplug();
81819f0fc   Christoph Lameter   SLUB core
4384
4385
4386
4387
  	kfree(nodes);
  	return x + sprintf(buf + x, "
  ");
  }
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4388
  #ifdef CONFIG_SLUB_DEBUG
81819f0fc   Christoph Lameter   SLUB core
4389
4390
4391
  static int any_slab_objects(struct kmem_cache *s)
  {
  	int node;
81819f0fc   Christoph Lameter   SLUB core
4392

dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
4393
  	for_each_online_node(node) {
81819f0fc   Christoph Lameter   SLUB core
4394
  		struct kmem_cache_node *n = get_node(s, node);
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
4395
4396
  		if (!n)
  			continue;
4ea33e2dc   Benjamin Herrenschmidt   slub: fix atomic ...
4397
  		if (atomic_long_read(&n->total_objects))
81819f0fc   Christoph Lameter   SLUB core
4398
4399
4400
4401
  			return 1;
  	}
  	return 0;
  }
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4402
  #endif
81819f0fc   Christoph Lameter   SLUB core
4403
4404
  
  #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
497888cf6   Phil Carmody   treewide: fix pot...
4405
  #define to_slab(n) container_of(n, struct kmem_cache, kobj)
81819f0fc   Christoph Lameter   SLUB core
4406
4407
4408
4409
4410
4411
4412
4413
  
  struct slab_attribute {
  	struct attribute attr;
  	ssize_t (*show)(struct kmem_cache *s, char *buf);
  	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
  };
  
  #define SLAB_ATTR_RO(_name) \
ab067e99d   Vasiliy Kulikov   mm: restrict acce...
4414
4415
  	static struct slab_attribute _name##_attr = \
  	__ATTR(_name, 0400, _name##_show, NULL)
81819f0fc   Christoph Lameter   SLUB core
4416
4417
4418
  
  #define SLAB_ATTR(_name) \
  	static struct slab_attribute _name##_attr =  \
ab067e99d   Vasiliy Kulikov   mm: restrict acce...
4419
  	__ATTR(_name, 0600, _name##_show, _name##_store)
81819f0fc   Christoph Lameter   SLUB core
4420

81819f0fc   Christoph Lameter   SLUB core
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
  static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", s->size);
  }
  SLAB_ATTR_RO(slab_size);
  
  static ssize_t align_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", s->align);
  }
  SLAB_ATTR_RO(align);
  
  static ssize_t object_size_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", s->objsize);
  }
  SLAB_ATTR_RO(object_size);
  
  static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
  {
834f3d119   Christoph Lameter   slub: Add kmem_ca...
4444
4445
  	return sprintf(buf, "%d
  ", oo_objects(s->oo));
81819f0fc   Christoph Lameter   SLUB core
4446
4447
  }
  SLAB_ATTR_RO(objs_per_slab);
06b285dc3   Christoph Lameter   slub: Make the or...
4448
4449
4450
  static ssize_t order_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
0121c619d   Christoph Lameter   slub: Whitespace ...
4451
4452
4453
4454
4455
4456
  	unsigned long order;
  	int err;
  
  	err = strict_strtoul(buf, 10, &order);
  	if (err)
  		return err;
06b285dc3   Christoph Lameter   slub: Make the or...
4457
4458
4459
4460
4461
4462
4463
  
  	if (order > slub_max_order || order < slub_min_order)
  		return -EINVAL;
  
  	calculate_sizes(s, order);
  	return length;
  }
81819f0fc   Christoph Lameter   SLUB core
4464
4465
  static ssize_t order_show(struct kmem_cache *s, char *buf)
  {
834f3d119   Christoph Lameter   slub: Add kmem_ca...
4466
4467
  	return sprintf(buf, "%d
  ", oo_order(s->oo));
81819f0fc   Christoph Lameter   SLUB core
4468
  }
06b285dc3   Christoph Lameter   slub: Make the or...
4469
  SLAB_ATTR(order);
81819f0fc   Christoph Lameter   SLUB core
4470

73d342b16   David Rientjes   slub: add min_par...
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
  static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%lu
  ", s->min_partial);
  }
  
  static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
  				 size_t length)
  {
  	unsigned long min;
  	int err;
  
  	err = strict_strtoul(buf, 10, &min);
  	if (err)
  		return err;
c0bdb232b   David Rientjes   slub: rename calc...
4486
  	set_min_partial(s, min);
73d342b16   David Rientjes   slub: add min_par...
4487
4488
4489
  	return length;
  }
  SLAB_ATTR(min_partial);
49e225858   Christoph Lameter   slub: per cpu cac...
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
  static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%u
  ", s->cpu_partial);
  }
  
  static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
  				 size_t length)
  {
  	unsigned long objects;
  	int err;
  
  	err = strict_strtoul(buf, 10, &objects);
  	if (err)
  		return err;
74ee4ef1f   David Rientjes   slub: disallow ch...
4505
4506
  	if (objects && kmem_cache_debug(s))
  		return -EINVAL;
49e225858   Christoph Lameter   slub: per cpu cac...
4507
4508
4509
4510
4511
4512
  
  	s->cpu_partial = objects;
  	flush_all(s);
  	return length;
  }
  SLAB_ATTR(cpu_partial);
81819f0fc   Christoph Lameter   SLUB core
4513
4514
  static ssize_t ctor_show(struct kmem_cache *s, char *buf)
  {
62c70bce8   Joe Perches   mm: convert sprin...
4515
4516
4517
4518
  	if (!s->ctor)
  		return 0;
  	return sprintf(buf, "%pS
  ", s->ctor);
81819f0fc   Christoph Lameter   SLUB core
4519
4520
  }
  SLAB_ATTR_RO(ctor);
81819f0fc   Christoph Lameter   SLUB core
4521
4522
4523
4524
4525
4526
  static ssize_t aliases_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", s->refcount - 1);
  }
  SLAB_ATTR_RO(aliases);
81819f0fc   Christoph Lameter   SLUB core
4527
4528
  static ssize_t partial_show(struct kmem_cache *s, char *buf)
  {
d9acf4b7b   Christoph Lameter   slub: rename slab...
4529
  	return show_slab_objects(s, buf, SO_PARTIAL);
81819f0fc   Christoph Lameter   SLUB core
4530
4531
4532
4533
4534
  }
  SLAB_ATTR_RO(partial);
  
  static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
  {
d9acf4b7b   Christoph Lameter   slub: rename slab...
4535
  	return show_slab_objects(s, buf, SO_CPU);
81819f0fc   Christoph Lameter   SLUB core
4536
4537
4538
4539
4540
  }
  SLAB_ATTR_RO(cpu_slabs);
  
  static ssize_t objects_show(struct kmem_cache *s, char *buf)
  {
205ab99dd   Christoph Lameter   slub: Update stat...
4541
  	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
81819f0fc   Christoph Lameter   SLUB core
4542
4543
  }
  SLAB_ATTR_RO(objects);
205ab99dd   Christoph Lameter   slub: Update stat...
4544
4545
4546
4547
4548
  static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
  {
  	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
  }
  SLAB_ATTR_RO(objects_partial);
49e225858   Christoph Lameter   slub: per cpu cac...
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
  static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
  {
  	int objects = 0;
  	int pages = 0;
  	int cpu;
  	int len;
  
  	for_each_online_cpu(cpu) {
  		struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
  
  		if (page) {
  			pages += page->pages;
  			objects += page->pobjects;
  		}
  	}
  
  	len = sprintf(buf, "%d(%d)", objects, pages);
  
  #ifdef CONFIG_SMP
  	for_each_online_cpu(cpu) {
  		struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
  
  		if (page && len < PAGE_SIZE - 20)
  			len += sprintf(buf + len, " C%d=%d(%d)", cpu,
  				page->pobjects, page->pages);
  	}
  #endif
  	return len + sprintf(buf + len, "
  ");
  }
  SLAB_ATTR_RO(slabs_cpu_partial);
a5a84755c   Christoph Lameter   slub: Move functi...
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
  static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
  }
  
  static ssize_t reclaim_account_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
  	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
  	if (buf[0] == '1')
  		s->flags |= SLAB_RECLAIM_ACCOUNT;
  	return length;
  }
  SLAB_ATTR(reclaim_account);
  
  static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_HWCACHE_ALIGN));
  }
  SLAB_ATTR_RO(hwcache_align);
  
  #ifdef CONFIG_ZONE_DMA
  static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_CACHE_DMA));
  }
  SLAB_ATTR_RO(cache_dma);
  #endif
  
  static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_DESTROY_BY_RCU));
  }
  SLAB_ATTR_RO(destroy_by_rcu);
ab9a0f196   Lai Jiangshan   slub: automatical...
4618
4619
4620
4621
4622
4623
  static ssize_t reserved_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", s->reserved);
  }
  SLAB_ATTR_RO(reserved);
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4624
  #ifdef CONFIG_SLUB_DEBUG
a5a84755c   Christoph Lameter   slub: Move functi...
4625
4626
4627
4628
4629
  static ssize_t slabs_show(struct kmem_cache *s, char *buf)
  {
  	return show_slab_objects(s, buf, SO_ALL);
  }
  SLAB_ATTR_RO(slabs);
205ab99dd   Christoph Lameter   slub: Update stat...
4630
4631
4632
4633
4634
  static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
  {
  	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
  }
  SLAB_ATTR_RO(total_objects);
81819f0fc   Christoph Lameter   SLUB core
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644
  static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_DEBUG_FREE));
  }
  
  static ssize_t sanity_checks_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
  	s->flags &= ~SLAB_DEBUG_FREE;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4645
4646
  	if (buf[0] == '1') {
  		s->flags &= ~__CMPXCHG_DOUBLE;
81819f0fc   Christoph Lameter   SLUB core
4647
  		s->flags |= SLAB_DEBUG_FREE;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4648
  	}
81819f0fc   Christoph Lameter   SLUB core
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
  	return length;
  }
  SLAB_ATTR(sanity_checks);
  
  static ssize_t trace_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_TRACE));
  }
  
  static ssize_t trace_store(struct kmem_cache *s, const char *buf,
  							size_t length)
  {
  	s->flags &= ~SLAB_TRACE;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4663
4664
  	if (buf[0] == '1') {
  		s->flags &= ~__CMPXCHG_DOUBLE;
81819f0fc   Christoph Lameter   SLUB core
4665
  		s->flags |= SLAB_TRACE;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4666
  	}
81819f0fc   Christoph Lameter   SLUB core
4667
4668
4669
  	return length;
  }
  SLAB_ATTR(trace);
81819f0fc   Christoph Lameter   SLUB core
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
  static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_RED_ZONE));
  }
  
  static ssize_t red_zone_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
  	if (any_slab_objects(s))
  		return -EBUSY;
  
  	s->flags &= ~SLAB_RED_ZONE;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4683
4684
  	if (buf[0] == '1') {
  		s->flags &= ~__CMPXCHG_DOUBLE;
81819f0fc   Christoph Lameter   SLUB core
4685
  		s->flags |= SLAB_RED_ZONE;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4686
  	}
06b285dc3   Christoph Lameter   slub: Make the or...
4687
  	calculate_sizes(s, -1);
81819f0fc   Christoph Lameter   SLUB core
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704
  	return length;
  }
  SLAB_ATTR(red_zone);
  
  static ssize_t poison_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_POISON));
  }
  
  static ssize_t poison_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
  	if (any_slab_objects(s))
  		return -EBUSY;
  
  	s->flags &= ~SLAB_POISON;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4705
4706
  	if (buf[0] == '1') {
  		s->flags &= ~__CMPXCHG_DOUBLE;
81819f0fc   Christoph Lameter   SLUB core
4707
  		s->flags |= SLAB_POISON;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4708
  	}
06b285dc3   Christoph Lameter   slub: Make the or...
4709
  	calculate_sizes(s, -1);
81819f0fc   Christoph Lameter   SLUB core
4710
4711
4712
4713
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723
4724
4725
4726
  	return length;
  }
  SLAB_ATTR(poison);
  
  static ssize_t store_user_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_STORE_USER));
  }
  
  static ssize_t store_user_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
  	if (any_slab_objects(s))
  		return -EBUSY;
  
  	s->flags &= ~SLAB_STORE_USER;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4727
4728
  	if (buf[0] == '1') {
  		s->flags &= ~__CMPXCHG_DOUBLE;
81819f0fc   Christoph Lameter   SLUB core
4729
  		s->flags |= SLAB_STORE_USER;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4730
  	}
06b285dc3   Christoph Lameter   slub: Make the or...
4731
  	calculate_sizes(s, -1);
81819f0fc   Christoph Lameter   SLUB core
4732
4733
4734
  	return length;
  }
  SLAB_ATTR(store_user);
53e15af03   Christoph Lameter   slub: validation ...
4735
4736
4737
4738
4739
4740
4741
4742
  static ssize_t validate_show(struct kmem_cache *s, char *buf)
  {
  	return 0;
  }
  
  static ssize_t validate_store(struct kmem_cache *s,
  			const char *buf, size_t length)
  {
434e245dd   Christoph Lameter   SLUB: Do not allo...
4743
4744
4745
4746
4747
4748
4749
4750
  	int ret = -EINVAL;
  
  	if (buf[0] == '1') {
  		ret = validate_slab_cache(s);
  		if (ret >= 0)
  			ret = length;
  	}
  	return ret;
53e15af03   Christoph Lameter   slub: validation ...
4751
4752
  }
  SLAB_ATTR(validate);
a5a84755c   Christoph Lameter   slub: Move functi...
4753
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783
4784
4785
4786
  
  static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
  {
  	if (!(s->flags & SLAB_STORE_USER))
  		return -ENOSYS;
  	return list_locations(s, buf, TRACK_ALLOC);
  }
  SLAB_ATTR_RO(alloc_calls);
  
  static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
  {
  	if (!(s->flags & SLAB_STORE_USER))
  		return -ENOSYS;
  	return list_locations(s, buf, TRACK_FREE);
  }
  SLAB_ATTR_RO(free_calls);
  #endif /* CONFIG_SLUB_DEBUG */
  
  #ifdef CONFIG_FAILSLAB
  static ssize_t failslab_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_FAILSLAB));
  }
  
  static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
  							size_t length)
  {
  	s->flags &= ~SLAB_FAILSLAB;
  	if (buf[0] == '1')
  		s->flags |= SLAB_FAILSLAB;
  	return length;
  }
  SLAB_ATTR(failslab);
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4787
  #endif
53e15af03   Christoph Lameter   slub: validation ...
4788

2086d26a0   Christoph Lameter   SLUB: Free slabs ...
4789
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799
4800
4801
4802
4803
4804
4805
4806
  static ssize_t shrink_show(struct kmem_cache *s, char *buf)
  {
  	return 0;
  }
  
  static ssize_t shrink_store(struct kmem_cache *s,
  			const char *buf, size_t length)
  {
  	if (buf[0] == '1') {
  		int rc = kmem_cache_shrink(s);
  
  		if (rc)
  			return rc;
  	} else
  		return -EINVAL;
  	return length;
  }
  SLAB_ATTR(shrink);
81819f0fc   Christoph Lameter   SLUB core
4807
  #ifdef CONFIG_NUMA
9824601ea   Christoph Lameter   SLUB: rename defr...
4808
  static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
81819f0fc   Christoph Lameter   SLUB core
4809
  {
9824601ea   Christoph Lameter   SLUB: rename defr...
4810
4811
  	return sprintf(buf, "%d
  ", s->remote_node_defrag_ratio / 10);
81819f0fc   Christoph Lameter   SLUB core
4812
  }
9824601ea   Christoph Lameter   SLUB: rename defr...
4813
  static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
81819f0fc   Christoph Lameter   SLUB core
4814
4815
  				const char *buf, size_t length)
  {
0121c619d   Christoph Lameter   slub: Whitespace ...
4816
4817
4818
4819
4820
4821
  	unsigned long ratio;
  	int err;
  
  	err = strict_strtoul(buf, 10, &ratio);
  	if (err)
  		return err;
e2cb96b7e   Christoph Lameter   slub: Disable NUM...
4822
  	if (ratio <= 100)
0121c619d   Christoph Lameter   slub: Whitespace ...
4823
  		s->remote_node_defrag_ratio = ratio * 10;
81819f0fc   Christoph Lameter   SLUB core
4824

81819f0fc   Christoph Lameter   SLUB core
4825
4826
  	return length;
  }
9824601ea   Christoph Lameter   SLUB: rename defr...
4827
  SLAB_ATTR(remote_node_defrag_ratio);
81819f0fc   Christoph Lameter   SLUB core
4828
  #endif
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4829
  #ifdef CONFIG_SLUB_STATS
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4830
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840
  static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
  {
  	unsigned long sum  = 0;
  	int cpu;
  	int len;
  	int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
  
  	if (!data)
  		return -ENOMEM;
  
  	for_each_online_cpu(cpu) {
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
4841
  		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4842
4843
4844
4845
4846
4847
  
  		data[cpu] = x;
  		sum += x;
  	}
  
  	len = sprintf(buf, "%lu", sum);
50ef37b96   Christoph Lameter   slub: Fixes to pe...
4848
  #ifdef CONFIG_SMP
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4849
4850
  	for_each_online_cpu(cpu) {
  		if (data[cpu] && len < PAGE_SIZE - 20)
50ef37b96   Christoph Lameter   slub: Fixes to pe...
4851
  			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4852
  	}
50ef37b96   Christoph Lameter   slub: Fixes to pe...
4853
  #endif
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4854
4855
4856
4857
  	kfree(data);
  	return len + sprintf(buf + len, "
  ");
  }
78eb00cc5   David Rientjes   slub: allow stats...
4858
4859
4860
4861
4862
  static void clear_stat(struct kmem_cache *s, enum stat_item si)
  {
  	int cpu;
  
  	for_each_online_cpu(cpu)
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
4863
  		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
78eb00cc5   David Rientjes   slub: allow stats...
4864
  }
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4865
4866
4867
4868
4869
  #define STAT_ATTR(si, text) 					\
  static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
  {								\
  	return show_stat(s, buf, si);				\
  }								\
78eb00cc5   David Rientjes   slub: allow stats...
4870
4871
4872
4873
4874
4875
4876
4877
4878
  static ssize_t text##_store(struct kmem_cache *s,		\
  				const char *buf, size_t length)	\
  {								\
  	if (buf[0] != '0')					\
  		return -EINVAL;					\
  	clear_stat(s, si);					\
  	return length;						\
  }								\
  SLAB_ATTR(text);						\
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4879
4880
4881
4882
4883
4884
4885
4886
4887
4888
4889
  
  STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
  STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
  STAT_ATTR(FREE_FASTPATH, free_fastpath);
  STAT_ATTR(FREE_SLOWPATH, free_slowpath);
  STAT_ATTR(FREE_FROZEN, free_frozen);
  STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
  STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
  STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
  STAT_ATTR(ALLOC_SLAB, alloc_slab);
  STAT_ATTR(ALLOC_REFILL, alloc_refill);
e36a2652d   Christoph Lameter   slub: Add statist...
4890
  STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4891
4892
4893
4894
4895
4896
4897
  STAT_ATTR(FREE_SLAB, free_slab);
  STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
  STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
  STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
  STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
  STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
  STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
03e404af2   Christoph Lameter   slub: fast releas...
4898
  STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
65c3376aa   Christoph Lameter   slub: Fallback to...
4899
  STAT_ATTR(ORDER_FALLBACK, order_fallback);
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4900
4901
  STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
  STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
49e225858   Christoph Lameter   slub: per cpu cac...
4902
4903
  STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
  STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4904
  #endif
064287807   Pekka Enberg   SLUB: Fix coding ...
4905
  static struct attribute *slab_attrs[] = {
81819f0fc   Christoph Lameter   SLUB core
4906
4907
4908
4909
  	&slab_size_attr.attr,
  	&object_size_attr.attr,
  	&objs_per_slab_attr.attr,
  	&order_attr.attr,
73d342b16   David Rientjes   slub: add min_par...
4910
  	&min_partial_attr.attr,
49e225858   Christoph Lameter   slub: per cpu cac...
4911
  	&cpu_partial_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4912
  	&objects_attr.attr,
205ab99dd   Christoph Lameter   slub: Update stat...
4913
  	&objects_partial_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4914
4915
4916
  	&partial_attr.attr,
  	&cpu_slabs_attr.attr,
  	&ctor_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4917
4918
  	&aliases_attr.attr,
  	&align_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4919
4920
4921
  	&hwcache_align_attr.attr,
  	&reclaim_account_attr.attr,
  	&destroy_by_rcu_attr.attr,
a5a84755c   Christoph Lameter   slub: Move functi...
4922
  	&shrink_attr.attr,
ab9a0f196   Lai Jiangshan   slub: automatical...
4923
  	&reserved_attr.attr,
49e225858   Christoph Lameter   slub: per cpu cac...
4924
  	&slabs_cpu_partial_attr.attr,
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4925
  #ifdef CONFIG_SLUB_DEBUG
a5a84755c   Christoph Lameter   slub: Move functi...
4926
4927
4928
4929
  	&total_objects_attr.attr,
  	&slabs_attr.attr,
  	&sanity_checks_attr.attr,
  	&trace_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4930
4931
4932
  	&red_zone_attr.attr,
  	&poison_attr.attr,
  	&store_user_attr.attr,
53e15af03   Christoph Lameter   slub: validation ...
4933
  	&validate_attr.attr,
88a420e4e   Christoph Lameter   slub: add ability...
4934
4935
  	&alloc_calls_attr.attr,
  	&free_calls_attr.attr,
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4936
  #endif
81819f0fc   Christoph Lameter   SLUB core
4937
4938
4939
4940
  #ifdef CONFIG_ZONE_DMA
  	&cache_dma_attr.attr,
  #endif
  #ifdef CONFIG_NUMA
9824601ea   Christoph Lameter   SLUB: rename defr...
4941
  	&remote_node_defrag_ratio_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4942
  #endif
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953
  #ifdef CONFIG_SLUB_STATS
  	&alloc_fastpath_attr.attr,
  	&alloc_slowpath_attr.attr,
  	&free_fastpath_attr.attr,
  	&free_slowpath_attr.attr,
  	&free_frozen_attr.attr,
  	&free_add_partial_attr.attr,
  	&free_remove_partial_attr.attr,
  	&alloc_from_partial_attr.attr,
  	&alloc_slab_attr.attr,
  	&alloc_refill_attr.attr,
e36a2652d   Christoph Lameter   slub: Add statist...
4954
  	&alloc_node_mismatch_attr.attr,
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4955
4956
4957
4958
4959
4960
4961
  	&free_slab_attr.attr,
  	&cpuslab_flush_attr.attr,
  	&deactivate_full_attr.attr,
  	&deactivate_empty_attr.attr,
  	&deactivate_to_head_attr.attr,
  	&deactivate_to_tail_attr.attr,
  	&deactivate_remote_frees_attr.attr,
03e404af2   Christoph Lameter   slub: fast releas...
4962
  	&deactivate_bypass_attr.attr,
65c3376aa   Christoph Lameter   slub: Fallback to...
4963
  	&order_fallback_attr.attr,
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4964
4965
  	&cmpxchg_double_fail_attr.attr,
  	&cmpxchg_double_cpu_fail_attr.attr,
49e225858   Christoph Lameter   slub: per cpu cac...
4966
4967
  	&cpu_partial_alloc_attr.attr,
  	&cpu_partial_free_attr.attr,
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4968
  #endif
4c13dd3b4   Dmitry Monakhov   failslab: add abi...
4969
4970
4971
  #ifdef CONFIG_FAILSLAB
  	&failslab_attr.attr,
  #endif
81819f0fc   Christoph Lameter   SLUB core
4972
4973
4974
4975
4976
4977
4978
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992
4993
4994
4995
4996
4997
4998
4999
5000
5001
5002
5003
5004
5005
5006
5007
5008
5009
5010
5011
5012
5013
5014
5015
  	NULL
  };
  
  static struct attribute_group slab_attr_group = {
  	.attrs = slab_attrs,
  };
  
  static ssize_t slab_attr_show(struct kobject *kobj,
  				struct attribute *attr,
  				char *buf)
  {
  	struct slab_attribute *attribute;
  	struct kmem_cache *s;
  	int err;
  
  	attribute = to_slab_attr(attr);
  	s = to_slab(kobj);
  
  	if (!attribute->show)
  		return -EIO;
  
  	err = attribute->show(s, buf);
  
  	return err;
  }
  
  static ssize_t slab_attr_store(struct kobject *kobj,
  				struct attribute *attr,
  				const char *buf, size_t len)
  {
  	struct slab_attribute *attribute;
  	struct kmem_cache *s;
  	int err;
  
  	attribute = to_slab_attr(attr);
  	s = to_slab(kobj);
  
  	if (!attribute->store)
  		return -EIO;
  
  	err = attribute->store(s, buf, len);
  
  	return err;
  }
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
5016
5017
5018
  static void kmem_cache_release(struct kobject *kobj)
  {
  	struct kmem_cache *s = to_slab(kobj);
84c1cf624   Pekka Enberg   SLUB: Fix merged ...
5019
  	kfree(s->name);
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
5020
5021
  	kfree(s);
  }
52cf25d0a   Emese Revfy   Driver core: Cons...
5022
  static const struct sysfs_ops slab_sysfs_ops = {
81819f0fc   Christoph Lameter   SLUB core
5023
5024
5025
5026
5027
5028
  	.show = slab_attr_show,
  	.store = slab_attr_store,
  };
  
  static struct kobj_type slab_ktype = {
  	.sysfs_ops = &slab_sysfs_ops,
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
5029
  	.release = kmem_cache_release
81819f0fc   Christoph Lameter   SLUB core
5030
5031
5032
5033
5034
5035
5036
5037
5038
5039
  };
  
  static int uevent_filter(struct kset *kset, struct kobject *kobj)
  {
  	struct kobj_type *ktype = get_ktype(kobj);
  
  	if (ktype == &slab_ktype)
  		return 1;
  	return 0;
  }
9cd43611c   Emese Revfy   kobject: Constify...
5040
  static const struct kset_uevent_ops slab_uevent_ops = {
81819f0fc   Christoph Lameter   SLUB core
5041
5042
  	.filter = uevent_filter,
  };
27c3a314d   Greg Kroah-Hartman   kset: convert slu...
5043
  static struct kset *slab_kset;
81819f0fc   Christoph Lameter   SLUB core
5044
5045
5046
5047
  
  #define ID_STR_LENGTH 64
  
  /* Create a unique string id for a slab cache:
6446faa2f   Christoph Lameter   slub: Fix up comm...
5048
5049
   *
   * Format	:[flags-]size
81819f0fc   Christoph Lameter   SLUB core
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060
5061
5062
5063
5064
5065
5066
5067
5068
5069
5070
5071
   */
  static char *create_unique_id(struct kmem_cache *s)
  {
  	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
  	char *p = name;
  
  	BUG_ON(!name);
  
  	*p++ = ':';
  	/*
  	 * First flags affecting slabcache operations. We will only
  	 * get here for aliasable slabs so we do not need to support
  	 * too many flags. The flags here must cover all flags that
  	 * are matched during merging to guarantee that the id is
  	 * unique.
  	 */
  	if (s->flags & SLAB_CACHE_DMA)
  		*p++ = 'd';
  	if (s->flags & SLAB_RECLAIM_ACCOUNT)
  		*p++ = 'a';
  	if (s->flags & SLAB_DEBUG_FREE)
  		*p++ = 'F';
5a896d9e7   Vegard Nossum   slub: add hooks f...
5072
5073
  	if (!(s->flags & SLAB_NOTRACK))
  		*p++ = 't';
81819f0fc   Christoph Lameter   SLUB core
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095
5096
5097
  	if (p != name + 1)
  		*p++ = '-';
  	p += sprintf(p, "%07d", s->size);
  	BUG_ON(p > name + ID_STR_LENGTH - 1);
  	return name;
  }
  
  static int sysfs_slab_add(struct kmem_cache *s)
  {
  	int err;
  	const char *name;
  	int unmergeable;
  
  	if (slab_state < SYSFS)
  		/* Defer until later */
  		return 0;
  
  	unmergeable = slab_unmergeable(s);
  	if (unmergeable) {
  		/*
  		 * Slabcache can never be merged so we can use the name proper.
  		 * This is typically the case for debug situations. In that
  		 * case we can catch duplicate names easily.
  		 */
27c3a314d   Greg Kroah-Hartman   kset: convert slu...
5098
  		sysfs_remove_link(&slab_kset->kobj, s->name);
81819f0fc   Christoph Lameter   SLUB core
5099
5100
5101
5102
5103
5104
5105
5106
  		name = s->name;
  	} else {
  		/*
  		 * Create a unique name for the slab as a target
  		 * for the symlinks.
  		 */
  		name = create_unique_id(s);
  	}
27c3a314d   Greg Kroah-Hartman   kset: convert slu...
5107
  	s->kobj.kset = slab_kset;
1eada11c8   Greg Kroah-Hartman   Kobject: convert ...
5108
5109
5110
  	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
  	if (err) {
  		kobject_put(&s->kobj);
81819f0fc   Christoph Lameter   SLUB core
5111
  		return err;
1eada11c8   Greg Kroah-Hartman   Kobject: convert ...
5112
  	}
81819f0fc   Christoph Lameter   SLUB core
5113
5114
  
  	err = sysfs_create_group(&s->kobj, &slab_attr_group);
5788d8ad6   Xiaotian Feng   slub: release kob...
5115
5116
5117
  	if (err) {
  		kobject_del(&s->kobj);
  		kobject_put(&s->kobj);
81819f0fc   Christoph Lameter   SLUB core
5118
  		return err;
5788d8ad6   Xiaotian Feng   slub: release kob...
5119
  	}
81819f0fc   Christoph Lameter   SLUB core
5120
5121
5122
5123
5124
5125
5126
5127
5128
5129
5130
  	kobject_uevent(&s->kobj, KOBJ_ADD);
  	if (!unmergeable) {
  		/* Setup first alias */
  		sysfs_slab_alias(s, s->name);
  		kfree(name);
  	}
  	return 0;
  }
  
  static void sysfs_slab_remove(struct kmem_cache *s)
  {
2bce64858   Christoph Lameter   slub: Allow remov...
5131
5132
5133
5134
5135
5136
  	if (slab_state < SYSFS)
  		/*
  		 * Sysfs has not been setup yet so no need to remove the
  		 * cache from sysfs.
  		 */
  		return;
81819f0fc   Christoph Lameter   SLUB core
5137
5138
  	kobject_uevent(&s->kobj, KOBJ_REMOVE);
  	kobject_del(&s->kobj);
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
5139
  	kobject_put(&s->kobj);
81819f0fc   Christoph Lameter   SLUB core
5140
5141
5142
5143
  }
  
  /*
   * Need to buffer aliases during bootup until sysfs becomes
9f6c708e5   Nick Andrew   slub: Fix incorre...
5144
   * available lest we lose that information.
81819f0fc   Christoph Lameter   SLUB core
5145
5146
5147
5148
5149
5150
   */
  struct saved_alias {
  	struct kmem_cache *s;
  	const char *name;
  	struct saved_alias *next;
  };
5af328a51   Adrian Bunk   mm/slub.c: make c...
5151
  static struct saved_alias *alias_list;
81819f0fc   Christoph Lameter   SLUB core
5152
5153
5154
5155
5156
5157
5158
5159
5160
  
  static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
  {
  	struct saved_alias *al;
  
  	if (slab_state == SYSFS) {
  		/*
  		 * If we have a leftover link then remove it.
  		 */
27c3a314d   Greg Kroah-Hartman   kset: convert slu...
5161
5162
  		sysfs_remove_link(&slab_kset->kobj, name);
  		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
81819f0fc   Christoph Lameter   SLUB core
5163
5164
5165
5166
5167
5168
5169
5170
5171
5172
5173
5174
5175
5176
5177
  	}
  
  	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
  	if (!al)
  		return -ENOMEM;
  
  	al->s = s;
  	al->name = name;
  	al->next = alias_list;
  	alias_list = al;
  	return 0;
  }
  
  static int __init slab_sysfs_init(void)
  {
5b95a4acf   Christoph Lameter   SLUB: use list_fo...
5178
  	struct kmem_cache *s;
81819f0fc   Christoph Lameter   SLUB core
5179
  	int err;
2bce64858   Christoph Lameter   slub: Allow remov...
5180
  	down_write(&slub_lock);
0ff21e466   Greg Kroah-Hartman   kobject: convert ...
5181
  	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
27c3a314d   Greg Kroah-Hartman   kset: convert slu...
5182
  	if (!slab_kset) {
2bce64858   Christoph Lameter   slub: Allow remov...
5183
  		up_write(&slub_lock);
81819f0fc   Christoph Lameter   SLUB core
5184
5185
5186
5187
  		printk(KERN_ERR "Cannot register slab subsystem.
  ");
  		return -ENOSYS;
  	}
26a7bd030   Christoph Lameter   SLUB: get rid of ...
5188
  	slab_state = SYSFS;
5b95a4acf   Christoph Lameter   SLUB: use list_fo...
5189
  	list_for_each_entry(s, &slab_caches, list) {
26a7bd030   Christoph Lameter   SLUB: get rid of ...
5190
  		err = sysfs_slab_add(s);
5d540fb71   Christoph Lameter   slub: do not fail...
5191
5192
5193
5194
  		if (err)
  			printk(KERN_ERR "SLUB: Unable to add boot slab %s"
  						" to sysfs
  ", s->name);
26a7bd030   Christoph Lameter   SLUB: get rid of ...
5195
  	}
81819f0fc   Christoph Lameter   SLUB core
5196
5197
5198
5199
5200
5201
  
  	while (alias_list) {
  		struct saved_alias *al = alias_list;
  
  		alias_list = alias_list->next;
  		err = sysfs_slab_alias(al->s, al->name);
5d540fb71   Christoph Lameter   slub: do not fail...
5202
5203
5204
5205
  		if (err)
  			printk(KERN_ERR "SLUB: Unable to add boot slab alias"
  					" %s to sysfs
  ", s->name);
81819f0fc   Christoph Lameter   SLUB core
5206
5207
  		kfree(al);
  	}
2bce64858   Christoph Lameter   slub: Allow remov...
5208
  	up_write(&slub_lock);
81819f0fc   Christoph Lameter   SLUB core
5209
5210
5211
5212
5213
  	resiliency_test();
  	return 0;
  }
  
  __initcall(slab_sysfs_init);
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
5214
  #endif /* CONFIG_SYSFS */
57ed3eda9   Pekka J Enberg   slub: provide /pr...
5215
5216
5217
5218
  
  /*
   * The /proc/slabinfo ABI
   */
158a96242   Linus Torvalds   Unify /proc/slabi...
5219
  #ifdef CONFIG_SLABINFO
57ed3eda9   Pekka J Enberg   slub: provide /pr...
5220
5221
5222
5223
5224
5225
5226
5227
5228
5229
5230
5231
5232
5233
5234
5235
5236
5237
5238
5239
5240
5241
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257
  static void print_slabinfo_header(struct seq_file *m)
  {
  	seq_puts(m, "slabinfo - version: 2.1
  ");
  	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
  		 "<objperslab> <pagesperslab>");
  	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
  	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
  	seq_putc(m, '
  ');
  }
  
  static void *s_start(struct seq_file *m, loff_t *pos)
  {
  	loff_t n = *pos;
  
  	down_read(&slub_lock);
  	if (!n)
  		print_slabinfo_header(m);
  
  	return seq_list_start(&slab_caches, *pos);
  }
  
  static void *s_next(struct seq_file *m, void *p, loff_t *pos)
  {
  	return seq_list_next(p, &slab_caches, pos);
  }
  
  static void s_stop(struct seq_file *m, void *p)
  {
  	up_read(&slub_lock);
  }
  
  static int s_show(struct seq_file *m, void *p)
  {
  	unsigned long nr_partials = 0;
  	unsigned long nr_slabs = 0;
  	unsigned long nr_inuse = 0;
205ab99dd   Christoph Lameter   slub: Update stat...
5258
5259
  	unsigned long nr_objs = 0;
  	unsigned long nr_free = 0;
57ed3eda9   Pekka J Enberg   slub: provide /pr...
5260
5261
5262
5263
5264
5265
5266
5267
5268
5269
5270
5271
5272
  	struct kmem_cache *s;
  	int node;
  
  	s = list_entry(p, struct kmem_cache, list);
  
  	for_each_online_node(node) {
  		struct kmem_cache_node *n = get_node(s, node);
  
  		if (!n)
  			continue;
  
  		nr_partials += n->nr_partial;
  		nr_slabs += atomic_long_read(&n->nr_slabs);
205ab99dd   Christoph Lameter   slub: Update stat...
5273
5274
  		nr_objs += atomic_long_read(&n->total_objects);
  		nr_free += count_partial(n, count_free);
57ed3eda9   Pekka J Enberg   slub: provide /pr...
5275
  	}
205ab99dd   Christoph Lameter   slub: Update stat...
5276
  	nr_inuse = nr_objs - nr_free;
57ed3eda9   Pekka J Enberg   slub: provide /pr...
5277
5278
  
  	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
834f3d119   Christoph Lameter   slub: Add kmem_ca...
5279
5280
  		   nr_objs, s->size, oo_objects(s->oo),
  		   (1 << oo_order(s->oo)));
57ed3eda9   Pekka J Enberg   slub: provide /pr...
5281
5282
5283
5284
5285
5286
5287
  	seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
  	seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
  		   0UL);
  	seq_putc(m, '
  ');
  	return 0;
  }
7b3c3a50a   Alexey Dobriyan   proc: move /proc/...
5288
  static const struct seq_operations slabinfo_op = {
57ed3eda9   Pekka J Enberg   slub: provide /pr...
5289
5290
5291
5292
5293
  	.start = s_start,
  	.next = s_next,
  	.stop = s_stop,
  	.show = s_show,
  };
7b3c3a50a   Alexey Dobriyan   proc: move /proc/...
5294
5295
5296
5297
5298
5299
5300
5301
5302
5303
5304
5305
5306
5307
  static int slabinfo_open(struct inode *inode, struct file *file)
  {
  	return seq_open(file, &slabinfo_op);
  }
  
  static const struct file_operations proc_slabinfo_operations = {
  	.open		= slabinfo_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
  	.release	= seq_release,
  };
  
  static int __init slab_proc_init(void)
  {
ab067e99d   Vasiliy Kulikov   mm: restrict acce...
5308
  	proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations);
7b3c3a50a   Alexey Dobriyan   proc: move /proc/...
5309
5310
5311
  	return 0;
  }
  module_init(slab_proc_init);
158a96242   Linus Torvalds   Unify /proc/slabi...
5312
  #endif /* CONFIG_SLABINFO */