Blame view

mm/slub.c 128 KB
81819f0fc   Christoph Lameter   SLUB core
1
2
3
4
  /*
   * SLUB: A slab allocator that limits cache line use instead of queuing
   * objects in per cpu and per node lists.
   *
881db7fb0   Christoph Lameter   slub: Invert lock...
5
6
   * The allocator synchronizes using per slab locks or atomic operatios
   * and only uses a centralized lock to manage a pool of partial slabs.
81819f0fc   Christoph Lameter   SLUB core
7
   *
cde535359   Christoph Lameter   Christoph has moved
8
   * (C) 2007 SGI, Christoph Lameter
881db7fb0   Christoph Lameter   slub: Invert lock...
9
   * (C) 2011 Linux Foundation, Christoph Lameter
81819f0fc   Christoph Lameter   SLUB core
10
11
12
   */
  
  #include <linux/mm.h>
1eb5ac646   Nick Piggin   mm: SLUB fix recl...
13
  #include <linux/swap.h> /* struct reclaim_state */
81819f0fc   Christoph Lameter   SLUB core
14
15
16
17
18
  #include <linux/module.h>
  #include <linux/bit_spinlock.h>
  #include <linux/interrupt.h>
  #include <linux/bitops.h>
  #include <linux/slab.h>
7b3c3a50a   Alexey Dobriyan   proc: move /proc/...
19
  #include <linux/proc_fs.h>
81819f0fc   Christoph Lameter   SLUB core
20
  #include <linux/seq_file.h>
5a896d9e7   Vegard Nossum   slub: add hooks f...
21
  #include <linux/kmemcheck.h>
81819f0fc   Christoph Lameter   SLUB core
22
23
24
25
  #include <linux/cpu.h>
  #include <linux/cpuset.h>
  #include <linux/mempolicy.h>
  #include <linux/ctype.h>
3ac7fe5a4   Thomas Gleixner   infrastructure to...
26
  #include <linux/debugobjects.h>
81819f0fc   Christoph Lameter   SLUB core
27
  #include <linux/kallsyms.h>
b9049e234   Yasunori Goto   memory hotplug: m...
28
  #include <linux/memory.h>
f8bd2258e   Roman Zippel   remove div_long_l...
29
  #include <linux/math64.h>
773ff60e8   Akinobu Mita   SLUB: failslab su...
30
  #include <linux/fault-inject.h>
bfa71457a   Pekka Enberg   SLUB: Fix missing...
31
  #include <linux/stacktrace.h>
81819f0fc   Christoph Lameter   SLUB core
32

4a92379bd   Richard Kennedy   slub tracing: mov...
33
  #include <trace/events/kmem.h>
81819f0fc   Christoph Lameter   SLUB core
34
35
  /*
   * Lock order:
881db7fb0   Christoph Lameter   slub: Invert lock...
36
37
38
   *   1. slub_lock (Global Semaphore)
   *   2. node->list_lock
   *   3. slab_lock(page) (Only on some arches and for debugging)
81819f0fc   Christoph Lameter   SLUB core
39
   *
881db7fb0   Christoph Lameter   slub: Invert lock...
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
   *   slub_lock
   *
   *   The role of the slub_lock is to protect the list of all the slabs
   *   and to synchronize major metadata changes to slab cache structures.
   *
   *   The slab_lock is only used for debugging and on arches that do not
   *   have the ability to do a cmpxchg_double. It only protects the second
   *   double word in the page struct. Meaning
   *	A. page->freelist	-> List of object free in a page
   *	B. page->counters	-> Counters of objects
   *	C. page->frozen		-> frozen state
   *
   *   If a slab is frozen then it is exempt from list management. It is not
   *   on any list. The processor that froze the slab is the one who can
   *   perform list operations on the page. Other processors may put objects
   *   onto the freelist but the processor that froze the slab is the only
   *   one that can retrieve the objects from the page's freelist.
81819f0fc   Christoph Lameter   SLUB core
57
58
59
60
61
62
63
64
65
66
67
68
   *
   *   The list_lock protects the partial and full list on each node and
   *   the partial slab counter. If taken then no new slabs may be added or
   *   removed from the lists nor make the number of partial slabs be modified.
   *   (Note that the total number of slabs is an atomic value that may be
   *   modified without taking the list lock).
   *
   *   The list_lock is a centralized lock and thus we avoid taking it as
   *   much as possible. As long as SLUB does not have to handle partial
   *   slabs, operations can continue without any centralized lock. F.e.
   *   allocating a long series of objects that fill up slabs does not require
   *   the list lock.
81819f0fc   Christoph Lameter   SLUB core
69
70
71
72
73
74
75
76
   *   Interrupts are disabled during allocation and deallocation in order to
   *   make the slab allocator safe to use in the context of an irq. In addition
   *   interrupts are disabled to ensure that the processor does not change
   *   while handling per_cpu slabs, due to kernel preemption.
   *
   * SLUB assigns one slab for allocation to each processor.
   * Allocations only occur from these slabs called cpu slabs.
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
77
78
   * Slabs with free elements are kept on a partial list and during regular
   * operations no list for full slabs is used. If an object in a full slab is
81819f0fc   Christoph Lameter   SLUB core
79
   * freed then the slab will show up again on the partial lists.
672bba3a4   Christoph Lameter   SLUB: update comm...
80
81
   * We track full slabs for debugging purposes though because otherwise we
   * cannot scan all objects.
81819f0fc   Christoph Lameter   SLUB core
82
83
84
85
86
87
88
   *
   * Slabs are freed when they become empty. Teardown and setup is
   * minimal so we rely on the page allocators per cpu caches for
   * fast frees and allocs.
   *
   * Overloading of page flags that are otherwise used for LRU management.
   *
4b6f07504   Christoph Lameter   SLUB: Define func...
89
90
91
92
93
94
95
96
97
98
99
100
   * PageActive 		The slab is frozen and exempt from list processing.
   * 			This means that the slab is dedicated to a purpose
   * 			such as satisfying allocations for a specific
   * 			processor. Objects may be freed in the slab while
   * 			it is frozen but slab_free will then skip the usual
   * 			list operations. It is up to the processor holding
   * 			the slab to integrate the slab into the slab lists
   * 			when the slab is no longer needed.
   *
   * 			One use of this flag is to mark slabs that are
   * 			used for allocations. Then such a slab becomes a cpu
   * 			slab. The cpu slab may be equipped with an additional
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
101
   * 			freelist that allows lockless access to
894b8788d   Christoph Lameter   slub: support con...
102
103
   * 			free objects in addition to the regular freelist
   * 			that requires the slab lock.
81819f0fc   Christoph Lameter   SLUB core
104
105
106
   *
   * PageError		Slab requires special handling due to debug
   * 			options set. This moves	slab handling out of
894b8788d   Christoph Lameter   slub: support con...
107
   * 			the fast path and disables lockless freelists.
81819f0fc   Christoph Lameter   SLUB core
108
   */
af537b0a6   Christoph Lameter   slub: Use kmem_ca...
109
110
111
112
113
  #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
  		SLAB_TRACE | SLAB_DEBUG_FREE)
  
  static inline int kmem_cache_debug(struct kmem_cache *s)
  {
5577bd8a8   Christoph Lameter   SLUB: Do our own ...
114
  #ifdef CONFIG_SLUB_DEBUG
af537b0a6   Christoph Lameter   slub: Use kmem_ca...
115
  	return unlikely(s->flags & SLAB_DEBUG_FLAGS);
5577bd8a8   Christoph Lameter   SLUB: Do our own ...
116
  #else
af537b0a6   Christoph Lameter   slub: Use kmem_ca...
117
  	return 0;
5577bd8a8   Christoph Lameter   SLUB: Do our own ...
118
  #endif
af537b0a6   Christoph Lameter   slub: Use kmem_ca...
119
  }
5577bd8a8   Christoph Lameter   SLUB: Do our own ...
120

81819f0fc   Christoph Lameter   SLUB core
121
122
123
  /*
   * Issues still to be resolved:
   *
81819f0fc   Christoph Lameter   SLUB core
124
125
   * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
   *
81819f0fc   Christoph Lameter   SLUB core
126
127
128
129
130
   * - Variable sizing of the per node arrays
   */
  
  /* Enable to test recovery from slab corruption on boot */
  #undef SLUB_RESILIENCY_TEST
b789ef518   Christoph Lameter   slub: Add cmpxchg...
131
132
  /* Enable to log cmpxchg failures */
  #undef SLUB_DEBUG_CMPXCHG
81819f0fc   Christoph Lameter   SLUB core
133
  /*
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
134
135
136
   * Mininum number of partial slabs. These will be left on the partial
   * lists even if they are empty. kmem_cache_shrink may reclaim them.
   */
76be89500   Christoph Lameter   SLUB: Improve hac...
137
  #define MIN_PARTIAL 5
e95eed571   Christoph Lameter   SLUB: Add MIN_PAR...
138

2086d26a0   Christoph Lameter   SLUB: Free slabs ...
139
140
141
142
143
144
  /*
   * Maximum number of desirable partial slabs.
   * The existence of more partial slabs makes kmem_cache_shrink
   * sort the partial list by the number of objects in the.
   */
  #define MAX_PARTIAL 10
81819f0fc   Christoph Lameter   SLUB core
145
146
  #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
  				SLAB_POISON | SLAB_STORE_USER)
672bba3a4   Christoph Lameter   SLUB: update comm...
147

81819f0fc   Christoph Lameter   SLUB core
148
  /*
3de472138   David Rientjes   slub: use size an...
149
150
151
   * Debugging flags that require metadata to be stored in the slab.  These get
   * disabled when slub_debug=O is used and a cache's min order increases with
   * metadata.
fa5ec8a1f   David Rientjes   slub: add option ...
152
   */
3de472138   David Rientjes   slub: use size an...
153
  #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
fa5ec8a1f   David Rientjes   slub: add option ...
154
155
  
  /*
81819f0fc   Christoph Lameter   SLUB core
156
157
158
   * Set of flags that will prevent slab merging
   */
  #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
4c13dd3b4   Dmitry Monakhov   failslab: add abi...
159
160
  		SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
  		SLAB_FAILSLAB)
81819f0fc   Christoph Lameter   SLUB core
161
162
  
  #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
5a896d9e7   Vegard Nossum   slub: add hooks f...
163
  		SLAB_CACHE_DMA | SLAB_NOTRACK)
81819f0fc   Christoph Lameter   SLUB core
164

210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
165
166
  #define OO_SHIFT	16
  #define OO_MASK		((1 << OO_SHIFT) - 1)
50d5c41cd   Christoph Lameter   slub: Do not use ...
167
  #define MAX_OBJS_PER_PAGE	32767 /* since page.objects is u15 */
210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
168

81819f0fc   Christoph Lameter   SLUB core
169
  /* Internal SLUB flags */
f90ec3901   Christoph Lameter   SLUB: Constants n...
170
  #define __OBJECT_POISON		0x80000000UL /* Poison object */
b789ef518   Christoph Lameter   slub: Add cmpxchg...
171
  #define __CMPXCHG_DOUBLE	0x40000000UL /* Use cmpxchg_double */
81819f0fc   Christoph Lameter   SLUB core
172
173
174
175
176
177
178
179
180
  
  static int kmem_size = sizeof(struct kmem_cache);
  
  #ifdef CONFIG_SMP
  static struct notifier_block slab_notifier;
  #endif
  
  static enum {
  	DOWN,		/* No slab functionality available */
51df11428   Christoph Lameter   slub: Dynamically...
181
  	PARTIAL,	/* Kmem_cache_node works */
672bba3a4   Christoph Lameter   SLUB: update comm...
182
  	UP,		/* Everything works but does not show up in sysfs */
81819f0fc   Christoph Lameter   SLUB core
183
184
185
186
187
  	SYSFS		/* Sysfs up */
  } slab_state = DOWN;
  
  /* A list of all slab caches on the system */
  static DECLARE_RWSEM(slub_lock);
5af328a51   Adrian Bunk   mm/slub.c: make c...
188
  static LIST_HEAD(slab_caches);
81819f0fc   Christoph Lameter   SLUB core
189

02cbc8744   Christoph Lameter   SLUB: move tracki...
190
191
192
  /*
   * Tracking user of a slab.
   */
d6543e393   Ben Greear   slub: Enable back...
193
  #define TRACK_ADDRS_COUNT 16
02cbc8744   Christoph Lameter   SLUB: move tracki...
194
  struct track {
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
195
  	unsigned long addr;	/* Called from address */
d6543e393   Ben Greear   slub: Enable back...
196
197
198
  #ifdef CONFIG_STACKTRACE
  	unsigned long addrs[TRACK_ADDRS_COUNT];	/* Called from address */
  #endif
02cbc8744   Christoph Lameter   SLUB: move tracki...
199
200
201
202
203
204
  	int cpu;		/* Was running on cpu */
  	int pid;		/* Pid context */
  	unsigned long when;	/* When did the operation occur */
  };
  
  enum track_item { TRACK_ALLOC, TRACK_FREE };
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
205
  #ifdef CONFIG_SYSFS
81819f0fc   Christoph Lameter   SLUB core
206
207
208
  static int sysfs_slab_add(struct kmem_cache *);
  static int sysfs_slab_alias(struct kmem_cache *, const char *);
  static void sysfs_slab_remove(struct kmem_cache *);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
209

81819f0fc   Christoph Lameter   SLUB core
210
  #else
0c7100132   Christoph Lameter   SLUB: add some mo...
211
212
213
  static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
  static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
  							{ return 0; }
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
214
215
  static inline void sysfs_slab_remove(struct kmem_cache *s)
  {
84c1cf624   Pekka Enberg   SLUB: Fix merged ...
216
  	kfree(s->name);
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
217
218
  	kfree(s);
  }
8ff12cfc0   Christoph Lameter   SLUB: Support for...
219

81819f0fc   Christoph Lameter   SLUB core
220
  #endif
4fdccdfbb   Christoph Lameter   slub: Add statist...
221
  static inline void stat(const struct kmem_cache *s, enum stat_item si)
8ff12cfc0   Christoph Lameter   SLUB: Support for...
222
223
  {
  #ifdef CONFIG_SLUB_STATS
84e554e68   Christoph Lameter   SLUB: Make slub s...
224
  	__this_cpu_inc(s->cpu_slab->stat[si]);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
225
226
  #endif
  }
81819f0fc   Christoph Lameter   SLUB core
227
228
229
230
231
232
233
234
235
236
237
  /********************************************************************
   * 			Core slab cache functions
   *******************************************************************/
  
  int slab_is_available(void)
  {
  	return slab_state >= UP;
  }
  
  static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
  {
81819f0fc   Christoph Lameter   SLUB core
238
  	return s->node[node];
81819f0fc   Christoph Lameter   SLUB core
239
  }
6446faa2f   Christoph Lameter   slub: Fix up comm...
240
  /* Verify that a pointer has an address that is valid within a slab page */
02cbc8744   Christoph Lameter   SLUB: move tracki...
241
242
243
244
  static inline int check_valid_pointer(struct kmem_cache *s,
  				struct page *page, const void *object)
  {
  	void *base;
a973e9dd1   Christoph Lameter   Revert "unique en...
245
  	if (!object)
02cbc8744   Christoph Lameter   SLUB: move tracki...
246
  		return 1;
a973e9dd1   Christoph Lameter   Revert "unique en...
247
  	base = page_address(page);
39b264641   Christoph Lameter   slub: Store max n...
248
  	if (object < base || object >= base + page->objects * s->size ||
02cbc8744   Christoph Lameter   SLUB: move tracki...
249
250
251
252
253
254
  		(object - base) % s->size) {
  		return 0;
  	}
  
  	return 1;
  }
7656c72b5   Christoph Lameter   SLUB: add macros ...
255
256
257
258
  static inline void *get_freepointer(struct kmem_cache *s, void *object)
  {
  	return *(void **)(object + s->offset);
  }
1393d9a18   Christoph Lameter   slub: Make CONFIG...
259
260
261
262
263
264
265
266
267
268
269
  static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
  {
  	void *p;
  
  #ifdef CONFIG_DEBUG_PAGEALLOC
  	probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
  #else
  	p = get_freepointer(s, object);
  #endif
  	return p;
  }
7656c72b5   Christoph Lameter   SLUB: add macros ...
270
271
272
273
274
275
  static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
  {
  	*(void **)(object + s->offset) = fp;
  }
  
  /* Loop over all objects in a slab */
224a88be4   Christoph Lameter   slub: for_each_ob...
276
277
  #define for_each_object(__p, __s, __addr, __objects) \
  	for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
7656c72b5   Christoph Lameter   SLUB: add macros ...
278
  			__p += (__s)->size)
7656c72b5   Christoph Lameter   SLUB: add macros ...
279
280
281
282
283
  /* Determine object index from a given position */
  static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
  {
  	return (p - addr) / s->size;
  }
d71f606f6   Mariusz Kozlowski   slub: fix ksize()...
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
  static inline size_t slab_ksize(const struct kmem_cache *s)
  {
  #ifdef CONFIG_SLUB_DEBUG
  	/*
  	 * Debugging requires use of the padding between object
  	 * and whatever may come after it.
  	 */
  	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
  		return s->objsize;
  
  #endif
  	/*
  	 * If we have the need to store the freelist pointer
  	 * back there or track user information then we can
  	 * only use the space before that information.
  	 */
  	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
  		return s->inuse;
  	/*
  	 * Else we can use all the padding etc for the allocation
  	 */
  	return s->size;
  }
ab9a0f196   Lai Jiangshan   slub: automatical...
307
308
309
310
  static inline int order_objects(int order, unsigned long size, int reserved)
  {
  	return ((PAGE_SIZE << order) - reserved) / size;
  }
834f3d119   Christoph Lameter   slub: Add kmem_ca...
311
  static inline struct kmem_cache_order_objects oo_make(int order,
ab9a0f196   Lai Jiangshan   slub: automatical...
312
  		unsigned long size, int reserved)
834f3d119   Christoph Lameter   slub: Add kmem_ca...
313
314
  {
  	struct kmem_cache_order_objects x = {
ab9a0f196   Lai Jiangshan   slub: automatical...
315
  		(order << OO_SHIFT) + order_objects(order, size, reserved)
834f3d119   Christoph Lameter   slub: Add kmem_ca...
316
317
318
319
320
321
322
  	};
  
  	return x;
  }
  
  static inline int oo_order(struct kmem_cache_order_objects x)
  {
210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
323
  	return x.x >> OO_SHIFT;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
324
325
326
327
  }
  
  static inline int oo_objects(struct kmem_cache_order_objects x)
  {
210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
328
  	return x.x & OO_MASK;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
329
  }
881db7fb0   Christoph Lameter   slub: Invert lock...
330
331
332
333
334
335
336
337
338
339
340
341
  /*
   * Per slab locking using the pagelock
   */
  static __always_inline void slab_lock(struct page *page)
  {
  	bit_spin_lock(PG_locked, &page->flags);
  }
  
  static __always_inline void slab_unlock(struct page *page)
  {
  	__bit_spin_unlock(PG_locked, &page->flags);
  }
1d07171c5   Christoph Lameter   slub: disable int...
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
  /* Interrupts must be disabled (for the fallback code to work right) */
  static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
  		void *freelist_old, unsigned long counters_old,
  		void *freelist_new, unsigned long counters_new,
  		const char *n)
  {
  	VM_BUG_ON(!irqs_disabled());
  #ifdef CONFIG_CMPXCHG_DOUBLE
  	if (s->flags & __CMPXCHG_DOUBLE) {
  		if (cmpxchg_double(&page->freelist,
  			freelist_old, counters_old,
  			freelist_new, counters_new))
  		return 1;
  	} else
  #endif
  	{
  		slab_lock(page);
  		if (page->freelist == freelist_old && page->counters == counters_old) {
  			page->freelist = freelist_new;
  			page->counters = counters_new;
  			slab_unlock(page);
  			return 1;
  		}
  		slab_unlock(page);
  	}
  
  	cpu_relax();
  	stat(s, CMPXCHG_DOUBLE_FAIL);
  
  #ifdef SLUB_DEBUG_CMPXCHG
  	printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
  #endif
  
  	return 0;
  }
b789ef518   Christoph Lameter   slub: Add cmpxchg...
377
378
379
380
381
382
383
384
385
386
387
388
389
390
  static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
  		void *freelist_old, unsigned long counters_old,
  		void *freelist_new, unsigned long counters_new,
  		const char *n)
  {
  #ifdef CONFIG_CMPXCHG_DOUBLE
  	if (s->flags & __CMPXCHG_DOUBLE) {
  		if (cmpxchg_double(&page->freelist,
  			freelist_old, counters_old,
  			freelist_new, counters_new))
  		return 1;
  	} else
  #endif
  	{
1d07171c5   Christoph Lameter   slub: disable int...
391
392
393
  		unsigned long flags;
  
  		local_irq_save(flags);
881db7fb0   Christoph Lameter   slub: Invert lock...
394
  		slab_lock(page);
b789ef518   Christoph Lameter   slub: Add cmpxchg...
395
396
397
  		if (page->freelist == freelist_old && page->counters == counters_old) {
  			page->freelist = freelist_new;
  			page->counters = counters_new;
881db7fb0   Christoph Lameter   slub: Invert lock...
398
  			slab_unlock(page);
1d07171c5   Christoph Lameter   slub: disable int...
399
  			local_irq_restore(flags);
b789ef518   Christoph Lameter   slub: Add cmpxchg...
400
401
  			return 1;
  		}
881db7fb0   Christoph Lameter   slub: Invert lock...
402
  		slab_unlock(page);
1d07171c5   Christoph Lameter   slub: disable int...
403
  		local_irq_restore(flags);
b789ef518   Christoph Lameter   slub: Add cmpxchg...
404
405
406
407
408
409
410
411
412
413
414
  	}
  
  	cpu_relax();
  	stat(s, CMPXCHG_DOUBLE_FAIL);
  
  #ifdef SLUB_DEBUG_CMPXCHG
  	printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
  #endif
  
  	return 0;
  }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
415
416
  #ifdef CONFIG_SLUB_DEBUG
  /*
5f80b13ae   Christoph Lameter   slub: get_map() f...
417
418
   * Determine a map of object in use on a page.
   *
881db7fb0   Christoph Lameter   slub: Invert lock...
419
   * Node listlock must be held to guarantee that the page does
5f80b13ae   Christoph Lameter   slub: get_map() f...
420
421
422
423
424
425
426
427
428
429
   * not vanish from under us.
   */
  static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
  {
  	void *p;
  	void *addr = page_address(page);
  
  	for (p = page->freelist; p; p = get_freepointer(s, p))
  		set_bit(slab_index(p, s, addr), map);
  }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
430
431
432
  /*
   * Debug settings:
   */
f0630fff5   Christoph Lameter   SLUB: support slu...
433
434
435
  #ifdef CONFIG_SLUB_DEBUG_ON
  static int slub_debug = DEBUG_DEFAULT_FLAGS;
  #else
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
436
  static int slub_debug;
f0630fff5   Christoph Lameter   SLUB: support slu...
437
  #endif
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
438
439
  
  static char *slub_debug_slabs;
fa5ec8a1f   David Rientjes   slub: add option ...
440
  static int disable_higher_order_debug;
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
441

7656c72b5   Christoph Lameter   SLUB: add macros ...
442
  /*
81819f0fc   Christoph Lameter   SLUB core
443
444
445
446
   * Object debugging
   */
  static void print_section(char *text, u8 *addr, unsigned int length)
  {
ffc79d288   Sebastian Andrzej Siewior   slub: use print_h...
447
448
  	print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
  			length, 1);
81819f0fc   Christoph Lameter   SLUB core
449
  }
81819f0fc   Christoph Lameter   SLUB core
450
451
452
453
454
455
456
457
458
459
460
461
462
463
  static struct track *get_track(struct kmem_cache *s, void *object,
  	enum track_item alloc)
  {
  	struct track *p;
  
  	if (s->offset)
  		p = object + s->offset + sizeof(void *);
  	else
  		p = object + s->inuse;
  
  	return p + alloc;
  }
  
  static void set_track(struct kmem_cache *s, void *object,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
464
  			enum track_item alloc, unsigned long addr)
81819f0fc   Christoph Lameter   SLUB core
465
  {
1a00df4a2   Akinobu Mita   slub: use get_tra...
466
  	struct track *p = get_track(s, object, alloc);
81819f0fc   Christoph Lameter   SLUB core
467

81819f0fc   Christoph Lameter   SLUB core
468
  	if (addr) {
d6543e393   Ben Greear   slub: Enable back...
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
  #ifdef CONFIG_STACKTRACE
  		struct stack_trace trace;
  		int i;
  
  		trace.nr_entries = 0;
  		trace.max_entries = TRACK_ADDRS_COUNT;
  		trace.entries = p->addrs;
  		trace.skip = 3;
  		save_stack_trace(&trace);
  
  		/* See rant in lockdep.c */
  		if (trace.nr_entries != 0 &&
  		    trace.entries[trace.nr_entries - 1] == ULONG_MAX)
  			trace.nr_entries--;
  
  		for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
  			p->addrs[i] = 0;
  #endif
81819f0fc   Christoph Lameter   SLUB core
487
488
  		p->addr = addr;
  		p->cpu = smp_processor_id();
88e4ccf29   Alexey Dobriyan   slub: current is ...
489
  		p->pid = current->pid;
81819f0fc   Christoph Lameter   SLUB core
490
491
492
493
  		p->when = jiffies;
  	} else
  		memset(p, 0, sizeof(struct track));
  }
81819f0fc   Christoph Lameter   SLUB core
494
495
  static void init_tracking(struct kmem_cache *s, void *object)
  {
249226847   Christoph Lameter   SLUB: change erro...
496
497
  	if (!(s->flags & SLAB_STORE_USER))
  		return;
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
498
499
  	set_track(s, object, TRACK_FREE, 0UL);
  	set_track(s, object, TRACK_ALLOC, 0UL);
81819f0fc   Christoph Lameter   SLUB core
500
501
502
503
504
505
  }
  
  static void print_track(const char *s, struct track *t)
  {
  	if (!t->addr)
  		return;
7daf705f3   Linus Torvalds   Start using the n...
506
507
  	printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d
  ",
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
508
  		s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
d6543e393   Ben Greear   slub: Enable back...
509
510
511
512
513
514
515
516
517
518
519
  #ifdef CONFIG_STACKTRACE
  	{
  		int i;
  		for (i = 0; i < TRACK_ADDRS_COUNT; i++)
  			if (t->addrs[i])
  				printk(KERN_ERR "\t%pS
  ", (void *)t->addrs[i]);
  			else
  				break;
  	}
  #endif
249226847   Christoph Lameter   SLUB: change erro...
520
521
522
523
524
525
526
527
528
529
530
531
532
  }
  
  static void print_tracking(struct kmem_cache *s, void *object)
  {
  	if (!(s->flags & SLAB_STORE_USER))
  		return;
  
  	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
  	print_track("Freed", get_track(s, object, TRACK_FREE));
  }
  
  static void print_page_info(struct page *page)
  {
39b264641   Christoph Lameter   slub: Store max n...
533
534
535
  	printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx
  ",
  		page, page->objects, page->inuse, page->freelist, page->flags);
249226847   Christoph Lameter   SLUB: change erro...
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
  
  }
  
  static void slab_bug(struct kmem_cache *s, char *fmt, ...)
  {
  	va_list args;
  	char buf[100];
  
  	va_start(args, fmt);
  	vsnprintf(buf, sizeof(buf), fmt, args);
  	va_end(args);
  	printk(KERN_ERR "========================================"
  			"=====================================
  ");
  	printk(KERN_ERR "BUG %s: %s
  ", s->name, buf);
  	printk(KERN_ERR "----------------------------------------"
  			"-------------------------------------
  
  ");
81819f0fc   Christoph Lameter   SLUB core
556
  }
249226847   Christoph Lameter   SLUB: change erro...
557
558
559
560
561
562
563
564
565
566
567
568
569
  static void slab_fix(struct kmem_cache *s, char *fmt, ...)
  {
  	va_list args;
  	char buf[100];
  
  	va_start(args, fmt);
  	vsnprintf(buf, sizeof(buf), fmt, args);
  	va_end(args);
  	printk(KERN_ERR "FIX %s: %s
  ", s->name, buf);
  }
  
  static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
81819f0fc   Christoph Lameter   SLUB core
570
571
  {
  	unsigned int off;	/* Offset of last byte */
a973e9dd1   Christoph Lameter   Revert "unique en...
572
  	u8 *addr = page_address(page);
249226847   Christoph Lameter   SLUB: change erro...
573
574
575
576
577
578
579
580
581
582
583
  
  	print_tracking(s, p);
  
  	print_page_info(page);
  
  	printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p
  
  ",
  			p, p - addr, get_freepointer(s, p));
  
  	if (p > addr + 16)
ffc79d288   Sebastian Andrzej Siewior   slub: use print_h...
584
  		print_section("Bytes b4 ", p - 16, 16);
81819f0fc   Christoph Lameter   SLUB core
585

ffc79d288   Sebastian Andrzej Siewior   slub: use print_h...
586
587
  	print_section("Object ", p, min_t(unsigned long, s->objsize,
  				PAGE_SIZE));
81819f0fc   Christoph Lameter   SLUB core
588
  	if (s->flags & SLAB_RED_ZONE)
ffc79d288   Sebastian Andrzej Siewior   slub: use print_h...
589
  		print_section("Redzone ", p + s->objsize,
81819f0fc   Christoph Lameter   SLUB core
590
  			s->inuse - s->objsize);
81819f0fc   Christoph Lameter   SLUB core
591
592
593
594
  	if (s->offset)
  		off = s->offset + sizeof(void *);
  	else
  		off = s->inuse;
249226847   Christoph Lameter   SLUB: change erro...
595
  	if (s->flags & SLAB_STORE_USER)
81819f0fc   Christoph Lameter   SLUB core
596
  		off += 2 * sizeof(struct track);
81819f0fc   Christoph Lameter   SLUB core
597
598
599
  
  	if (off != s->size)
  		/* Beginning of the filler is the free pointer */
ffc79d288   Sebastian Andrzej Siewior   slub: use print_h...
600
  		print_section("Padding ", p + off, s->size - off);
249226847   Christoph Lameter   SLUB: change erro...
601
602
  
  	dump_stack();
81819f0fc   Christoph Lameter   SLUB core
603
604
605
606
607
  }
  
  static void object_err(struct kmem_cache *s, struct page *page,
  			u8 *object, char *reason)
  {
3dc506378   Christoph Lameter   slab_err: Pass pa...
608
  	slab_bug(s, "%s", reason);
249226847   Christoph Lameter   SLUB: change erro...
609
  	print_trailer(s, page, object);
81819f0fc   Christoph Lameter   SLUB core
610
  }
249226847   Christoph Lameter   SLUB: change erro...
611
  static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
81819f0fc   Christoph Lameter   SLUB core
612
613
614
  {
  	va_list args;
  	char buf[100];
249226847   Christoph Lameter   SLUB: change erro...
615
616
  	va_start(args, fmt);
  	vsnprintf(buf, sizeof(buf), fmt, args);
81819f0fc   Christoph Lameter   SLUB core
617
  	va_end(args);
3dc506378   Christoph Lameter   slab_err: Pass pa...
618
  	slab_bug(s, "%s", buf);
249226847   Christoph Lameter   SLUB: change erro...
619
  	print_page_info(page);
81819f0fc   Christoph Lameter   SLUB core
620
621
  	dump_stack();
  }
f7cb19336   Christoph Lameter   SLUB: Pass active...
622
  static void init_object(struct kmem_cache *s, void *object, u8 val)
81819f0fc   Christoph Lameter   SLUB core
623
624
625
626
627
  {
  	u8 *p = object;
  
  	if (s->flags & __OBJECT_POISON) {
  		memset(p, POISON_FREE, s->objsize - 1);
064287807   Pekka Enberg   SLUB: Fix coding ...
628
  		p[s->objsize - 1] = POISON_END;
81819f0fc   Christoph Lameter   SLUB core
629
630
631
  	}
  
  	if (s->flags & SLAB_RED_ZONE)
f7cb19336   Christoph Lameter   SLUB: Pass active...
632
  		memset(p + s->objsize, val, s->inuse - s->objsize);
81819f0fc   Christoph Lameter   SLUB core
633
  }
249226847   Christoph Lameter   SLUB: change erro...
634
635
636
637
638
639
640
641
642
643
  static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
  						void *from, void *to)
  {
  	slab_fix(s, "Restoring 0x%p-0x%p=0x%x
  ", from, to - 1, data);
  	memset(from, data, to - from);
  }
  
  static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
  			u8 *object, char *what,
064287807   Pekka Enberg   SLUB: Fix coding ...
644
  			u8 *start, unsigned int value, unsigned int bytes)
249226847   Christoph Lameter   SLUB: change erro...
645
646
647
  {
  	u8 *fault;
  	u8 *end;
798248206   Akinobu Mita   lib/string.c: int...
648
  	fault = memchr_inv(start, value, bytes);
249226847   Christoph Lameter   SLUB: change erro...
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
  	if (!fault)
  		return 1;
  
  	end = start + bytes;
  	while (end > fault && end[-1] == value)
  		end--;
  
  	slab_bug(s, "%s overwritten", what);
  	printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x
  ",
  					fault, end - 1, fault[0], value);
  	print_trailer(s, page, object);
  
  	restore_bytes(s, what, value, fault, end);
  	return 0;
81819f0fc   Christoph Lameter   SLUB core
664
  }
81819f0fc   Christoph Lameter   SLUB core
665
666
667
668
669
670
671
  /*
   * Object layout:
   *
   * object address
   * 	Bytes of the object to be managed.
   * 	If the freepointer may overlay the object then the free
   * 	pointer is the first word of the object.
672bba3a4   Christoph Lameter   SLUB: update comm...
672
   *
81819f0fc   Christoph Lameter   SLUB core
673
674
675
676
677
   * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
   * 	0xa5 (POISON_END)
   *
   * object + s->objsize
   * 	Padding to reach word boundary. This is also used for Redzoning.
672bba3a4   Christoph Lameter   SLUB: update comm...
678
679
680
   * 	Padding is extended by another word if Redzoning is enabled and
   * 	objsize == inuse.
   *
81819f0fc   Christoph Lameter   SLUB core
681
682
683
684
   * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
   * 	0xcc (RED_ACTIVE) for objects in use.
   *
   * object + s->inuse
672bba3a4   Christoph Lameter   SLUB: update comm...
685
686
   * 	Meta data starts here.
   *
81819f0fc   Christoph Lameter   SLUB core
687
688
   * 	A. Free pointer (if we cannot overwrite object on free)
   * 	B. Tracking data for SLAB_STORE_USER
672bba3a4   Christoph Lameter   SLUB: update comm...
689
   * 	C. Padding to reach required alignment boundary or at mininum
6446faa2f   Christoph Lameter   slub: Fix up comm...
690
   * 		one word if debugging is on to be able to detect writes
672bba3a4   Christoph Lameter   SLUB: update comm...
691
692
693
   * 		before the word boundary.
   *
   *	Padding is done using 0x5a (POISON_INUSE)
81819f0fc   Christoph Lameter   SLUB core
694
695
   *
   * object + s->size
672bba3a4   Christoph Lameter   SLUB: update comm...
696
   * 	Nothing is used beyond s->size.
81819f0fc   Christoph Lameter   SLUB core
697
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
698
699
   * If slabcaches are merged then the objsize and inuse boundaries are mostly
   * ignored. And therefore no slab options that rely on these boundaries
81819f0fc   Christoph Lameter   SLUB core
700
701
   * may be used with merged slabcaches.
   */
81819f0fc   Christoph Lameter   SLUB core
702
703
704
705
706
707
708
709
710
711
712
713
714
715
  static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
  {
  	unsigned long off = s->inuse;	/* The end of info */
  
  	if (s->offset)
  		/* Freepointer is placed after the object. */
  		off += sizeof(void *);
  
  	if (s->flags & SLAB_STORE_USER)
  		/* We also have user information there */
  		off += 2 * sizeof(struct track);
  
  	if (s->size == off)
  		return 1;
249226847   Christoph Lameter   SLUB: change erro...
716
717
  	return check_bytes_and_report(s, page, p, "Object padding",
  				p + off, POISON_INUSE, s->size - off);
81819f0fc   Christoph Lameter   SLUB core
718
  }
39b264641   Christoph Lameter   slub: Store max n...
719
  /* Check the pad bytes at the end of a slab page */
81819f0fc   Christoph Lameter   SLUB core
720
721
  static int slab_pad_check(struct kmem_cache *s, struct page *page)
  {
249226847   Christoph Lameter   SLUB: change erro...
722
723
724
725
726
  	u8 *start;
  	u8 *fault;
  	u8 *end;
  	int length;
  	int remainder;
81819f0fc   Christoph Lameter   SLUB core
727
728
729
  
  	if (!(s->flags & SLAB_POISON))
  		return 1;
a973e9dd1   Christoph Lameter   Revert "unique en...
730
  	start = page_address(page);
ab9a0f196   Lai Jiangshan   slub: automatical...
731
  	length = (PAGE_SIZE << compound_order(page)) - s->reserved;
39b264641   Christoph Lameter   slub: Store max n...
732
733
  	end = start + length;
  	remainder = length % s->size;
81819f0fc   Christoph Lameter   SLUB core
734
735
  	if (!remainder)
  		return 1;
798248206   Akinobu Mita   lib/string.c: int...
736
  	fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
249226847   Christoph Lameter   SLUB: change erro...
737
738
739
740
741
742
  	if (!fault)
  		return 1;
  	while (end > fault && end[-1] == POISON_INUSE)
  		end--;
  
  	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
ffc79d288   Sebastian Andrzej Siewior   slub: use print_h...
743
  	print_section("Padding ", end - remainder, remainder);
249226847   Christoph Lameter   SLUB: change erro...
744

8a3d271de   Eric Dumazet   slub: fix slab_pa...
745
  	restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
249226847   Christoph Lameter   SLUB: change erro...
746
  	return 0;
81819f0fc   Christoph Lameter   SLUB core
747
748
749
  }
  
  static int check_object(struct kmem_cache *s, struct page *page,
f7cb19336   Christoph Lameter   SLUB: Pass active...
750
  					void *object, u8 val)
81819f0fc   Christoph Lameter   SLUB core
751
752
753
754
755
  {
  	u8 *p = object;
  	u8 *endobject = object + s->objsize;
  
  	if (s->flags & SLAB_RED_ZONE) {
249226847   Christoph Lameter   SLUB: change erro...
756
  		if (!check_bytes_and_report(s, page, object, "Redzone",
f7cb19336   Christoph Lameter   SLUB: Pass active...
757
  			endobject, val, s->inuse - s->objsize))
81819f0fc   Christoph Lameter   SLUB core
758
  			return 0;
81819f0fc   Christoph Lameter   SLUB core
759
  	} else {
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
760
761
762
763
  		if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
  			check_bytes_and_report(s, page, p, "Alignment padding",
  				endobject, POISON_INUSE, s->inuse - s->objsize);
  		}
81819f0fc   Christoph Lameter   SLUB core
764
765
766
  	}
  
  	if (s->flags & SLAB_POISON) {
f7cb19336   Christoph Lameter   SLUB: Pass active...
767
  		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
249226847   Christoph Lameter   SLUB: change erro...
768
769
770
  			(!check_bytes_and_report(s, page, p, "Poison", p,
  					POISON_FREE, s->objsize - 1) ||
  			 !check_bytes_and_report(s, page, p, "Poison",
064287807   Pekka Enberg   SLUB: Fix coding ...
771
  				p + s->objsize - 1, POISON_END, 1)))
81819f0fc   Christoph Lameter   SLUB core
772
  			return 0;
81819f0fc   Christoph Lameter   SLUB core
773
774
775
776
777
  		/*
  		 * check_pad_bytes cleans up on its own.
  		 */
  		check_pad_bytes(s, page, p);
  	}
f7cb19336   Christoph Lameter   SLUB: Pass active...
778
  	if (!s->offset && val == SLUB_RED_ACTIVE)
81819f0fc   Christoph Lameter   SLUB core
779
780
781
782
783
784
785
786
787
788
  		/*
  		 * Object and freepointer overlap. Cannot check
  		 * freepointer while object is allocated.
  		 */
  		return 1;
  
  	/* Check free pointer validity */
  	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
  		object_err(s, page, p, "Freepointer corrupt");
  		/*
9f6c708e5   Nick Andrew   slub: Fix incorre...
789
  		 * No choice but to zap it and thus lose the remainder
81819f0fc   Christoph Lameter   SLUB core
790
  		 * of the free objects in this slab. May cause
672bba3a4   Christoph Lameter   SLUB: update comm...
791
  		 * another error because the object count is now wrong.
81819f0fc   Christoph Lameter   SLUB core
792
  		 */
a973e9dd1   Christoph Lameter   Revert "unique en...
793
  		set_freepointer(s, p, NULL);
81819f0fc   Christoph Lameter   SLUB core
794
795
796
797
798
799
800
  		return 0;
  	}
  	return 1;
  }
  
  static int check_slab(struct kmem_cache *s, struct page *page)
  {
39b264641   Christoph Lameter   slub: Store max n...
801
  	int maxobj;
81819f0fc   Christoph Lameter   SLUB core
802
803
804
  	VM_BUG_ON(!irqs_disabled());
  
  	if (!PageSlab(page)) {
249226847   Christoph Lameter   SLUB: change erro...
805
  		slab_err(s, page, "Not a valid slab page");
81819f0fc   Christoph Lameter   SLUB core
806
807
  		return 0;
  	}
39b264641   Christoph Lameter   slub: Store max n...
808

ab9a0f196   Lai Jiangshan   slub: automatical...
809
  	maxobj = order_objects(compound_order(page), s->size, s->reserved);
39b264641   Christoph Lameter   slub: Store max n...
810
811
812
813
814
815
  	if (page->objects > maxobj) {
  		slab_err(s, page, "objects %u > max %u",
  			s->name, page->objects, maxobj);
  		return 0;
  	}
  	if (page->inuse > page->objects) {
249226847   Christoph Lameter   SLUB: change erro...
816
  		slab_err(s, page, "inuse %u > max %u",
39b264641   Christoph Lameter   slub: Store max n...
817
  			s->name, page->inuse, page->objects);
81819f0fc   Christoph Lameter   SLUB core
818
819
820
821
822
823
824
825
  		return 0;
  	}
  	/* Slab_pad_check fixes things up after itself */
  	slab_pad_check(s, page);
  	return 1;
  }
  
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
826
827
   * Determine if a certain object on a page is on the freelist. Must hold the
   * slab lock to guarantee that the chains are in a consistent state.
81819f0fc   Christoph Lameter   SLUB core
828
829
830
831
   */
  static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
  {
  	int nr = 0;
881db7fb0   Christoph Lameter   slub: Invert lock...
832
  	void *fp;
81819f0fc   Christoph Lameter   SLUB core
833
  	void *object = NULL;
224a88be4   Christoph Lameter   slub: for_each_ob...
834
  	unsigned long max_objects;
81819f0fc   Christoph Lameter   SLUB core
835

881db7fb0   Christoph Lameter   slub: Invert lock...
836
  	fp = page->freelist;
39b264641   Christoph Lameter   slub: Store max n...
837
  	while (fp && nr <= page->objects) {
81819f0fc   Christoph Lameter   SLUB core
838
839
840
841
842
843
  		if (fp == search)
  			return 1;
  		if (!check_valid_pointer(s, page, fp)) {
  			if (object) {
  				object_err(s, page, object,
  					"Freechain corrupt");
a973e9dd1   Christoph Lameter   Revert "unique en...
844
  				set_freepointer(s, object, NULL);
81819f0fc   Christoph Lameter   SLUB core
845
846
  				break;
  			} else {
249226847   Christoph Lameter   SLUB: change erro...
847
  				slab_err(s, page, "Freepointer corrupt");
a973e9dd1   Christoph Lameter   Revert "unique en...
848
  				page->freelist = NULL;
39b264641   Christoph Lameter   slub: Store max n...
849
  				page->inuse = page->objects;
249226847   Christoph Lameter   SLUB: change erro...
850
  				slab_fix(s, "Freelist cleared");
81819f0fc   Christoph Lameter   SLUB core
851
852
853
854
855
856
857
858
  				return 0;
  			}
  			break;
  		}
  		object = fp;
  		fp = get_freepointer(s, object);
  		nr++;
  	}
ab9a0f196   Lai Jiangshan   slub: automatical...
859
  	max_objects = order_objects(compound_order(page), s->size, s->reserved);
210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
860
861
  	if (max_objects > MAX_OBJS_PER_PAGE)
  		max_objects = MAX_OBJS_PER_PAGE;
224a88be4   Christoph Lameter   slub: for_each_ob...
862
863
864
865
866
867
868
  
  	if (page->objects != max_objects) {
  		slab_err(s, page, "Wrong number of objects. Found %d but "
  			"should be %d", page->objects, max_objects);
  		page->objects = max_objects;
  		slab_fix(s, "Number of objects adjusted.");
  	}
39b264641   Christoph Lameter   slub: Store max n...
869
  	if (page->inuse != page->objects - nr) {
70d71228a   Christoph Lameter   slub: remove obje...
870
  		slab_err(s, page, "Wrong object count. Counter is %d but "
39b264641   Christoph Lameter   slub: Store max n...
871
872
  			"counted were %d", page->inuse, page->objects - nr);
  		page->inuse = page->objects - nr;
249226847   Christoph Lameter   SLUB: change erro...
873
  		slab_fix(s, "Object count adjusted.");
81819f0fc   Christoph Lameter   SLUB core
874
875
876
  	}
  	return search == NULL;
  }
0121c619d   Christoph Lameter   slub: Whitespace ...
877
878
  static void trace(struct kmem_cache *s, struct page *page, void *object,
  								int alloc)
3ec097421   Christoph Lameter   SLUB: Simplify de...
879
880
881
882
883
884
885
886
887
888
  {
  	if (s->flags & SLAB_TRACE) {
  		printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p
  ",
  			s->name,
  			alloc ? "alloc" : "free",
  			object, page->inuse,
  			page->freelist);
  
  		if (!alloc)
ffc79d288   Sebastian Andrzej Siewior   slub: use print_h...
889
  			print_section("Object ", (void *)object, s->objsize);
3ec097421   Christoph Lameter   SLUB: Simplify de...
890
891
892
893
  
  		dump_stack();
  	}
  }
643b11384   Christoph Lameter   slub: enable trac...
894
  /*
c016b0bde   Christoph Lameter   slub: Extract hoo...
895
896
897
898
899
   * Hooks for other subsystems that check memory allocations. In a typical
   * production configuration these hooks all should produce no code at all.
   */
  static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
  {
c1d508365   Christoph Lameter   slub: Move gfpfla...
900
  	flags &= gfp_allowed_mask;
c016b0bde   Christoph Lameter   slub: Extract hoo...
901
902
903
904
905
906
907
908
  	lockdep_trace_alloc(flags);
  	might_sleep_if(flags & __GFP_WAIT);
  
  	return should_failslab(s->objsize, flags, s->flags);
  }
  
  static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
  {
c1d508365   Christoph Lameter   slub: Move gfpfla...
909
  	flags &= gfp_allowed_mask;
b3d41885d   Eric Dumazet   slub: fix kmemche...
910
  	kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
c016b0bde   Christoph Lameter   slub: Extract hoo...
911
912
913
914
915
916
  	kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
  }
  
  static inline void slab_free_hook(struct kmem_cache *s, void *x)
  {
  	kmemleak_free_recursive(x, s->flags);
c016b0bde   Christoph Lameter   slub: Extract hoo...
917

d3f661d69   Christoph Lameter   slub: Get rid of ...
918
919
920
921
922
923
924
925
926
927
928
929
  	/*
  	 * Trouble is that we may no longer disable interupts in the fast path
  	 * So in order to make the debug calls that expect irqs to be
  	 * disabled we need to disable interrupts temporarily.
  	 */
  #if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
  	{
  		unsigned long flags;
  
  		local_irq_save(flags);
  		kmemcheck_slab_free(s, x, s->objsize);
  		debug_check_no_locks_freed(x, s->objsize);
d3f661d69   Christoph Lameter   slub: Get rid of ...
930
931
932
  		local_irq_restore(flags);
  	}
  #endif
f9b615de4   Thomas Gleixner   slub: Fix debugob...
933
934
  	if (!(s->flags & SLAB_DEBUG_OBJECTS))
  		debug_check_no_obj_freed(x, s->objsize);
c016b0bde   Christoph Lameter   slub: Extract hoo...
935
936
937
  }
  
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
938
   * Tracking of fully allocated slabs for debugging purposes.
5cc6eee8a   Christoph Lameter   slub: explicit li...
939
940
   *
   * list_lock must be held.
643b11384   Christoph Lameter   slub: enable trac...
941
   */
5cc6eee8a   Christoph Lameter   slub: explicit li...
942
943
  static void add_full(struct kmem_cache *s,
  	struct kmem_cache_node *n, struct page *page)
643b11384   Christoph Lameter   slub: enable trac...
944
  {
5cc6eee8a   Christoph Lameter   slub: explicit li...
945
946
  	if (!(s->flags & SLAB_STORE_USER))
  		return;
643b11384   Christoph Lameter   slub: enable trac...
947
  	list_add(&page->lru, &n->full);
643b11384   Christoph Lameter   slub: enable trac...
948
  }
5cc6eee8a   Christoph Lameter   slub: explicit li...
949
950
951
  /*
   * list_lock must be held.
   */
643b11384   Christoph Lameter   slub: enable trac...
952
953
  static void remove_full(struct kmem_cache *s, struct page *page)
  {
643b11384   Christoph Lameter   slub: enable trac...
954
955
  	if (!(s->flags & SLAB_STORE_USER))
  		return;
643b11384   Christoph Lameter   slub: enable trac...
956
  	list_del(&page->lru);
643b11384   Christoph Lameter   slub: enable trac...
957
  }
0f389ec63   Christoph Lameter   slub: No need for...
958
959
960
961
962
963
964
  /* Tracking of the number of slabs for debugging purposes */
  static inline unsigned long slabs_node(struct kmem_cache *s, int node)
  {
  	struct kmem_cache_node *n = get_node(s, node);
  
  	return atomic_long_read(&n->nr_slabs);
  }
26c02cf05   Alexander Beregalov   SLUB: fix build w...
965
966
967
968
  static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
  {
  	return atomic_long_read(&n->nr_slabs);
  }
205ab99dd   Christoph Lameter   slub: Update stat...
969
  static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec63   Christoph Lameter   slub: No need for...
970
971
972
973
974
975
976
977
978
  {
  	struct kmem_cache_node *n = get_node(s, node);
  
  	/*
  	 * May be called early in order to allocate a slab for the
  	 * kmem_cache_node structure. Solve the chicken-egg
  	 * dilemma by deferring the increment of the count during
  	 * bootstrap (see early_kmem_cache_node_alloc).
  	 */
7340cc841   Christoph Lameter   slub: reduce diff...
979
  	if (n) {
0f389ec63   Christoph Lameter   slub: No need for...
980
  		atomic_long_inc(&n->nr_slabs);
205ab99dd   Christoph Lameter   slub: Update stat...
981
982
  		atomic_long_add(objects, &n->total_objects);
  	}
0f389ec63   Christoph Lameter   slub: No need for...
983
  }
205ab99dd   Christoph Lameter   slub: Update stat...
984
  static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec63   Christoph Lameter   slub: No need for...
985
986
987
988
  {
  	struct kmem_cache_node *n = get_node(s, node);
  
  	atomic_long_dec(&n->nr_slabs);
205ab99dd   Christoph Lameter   slub: Update stat...
989
  	atomic_long_sub(objects, &n->total_objects);
0f389ec63   Christoph Lameter   slub: No need for...
990
991
992
  }
  
  /* Object debug checks for alloc/free paths */
3ec097421   Christoph Lameter   SLUB: Simplify de...
993
994
995
996
997
  static void setup_object_debug(struct kmem_cache *s, struct page *page,
  								void *object)
  {
  	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
  		return;
f7cb19336   Christoph Lameter   SLUB: Pass active...
998
  	init_object(s, object, SLUB_RED_INACTIVE);
3ec097421   Christoph Lameter   SLUB: Simplify de...
999
1000
  	init_tracking(s, object);
  }
1537066c6   Christoph Lameter   slub: Force no in...
1001
  static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
1002
  					void *object, unsigned long addr)
81819f0fc   Christoph Lameter   SLUB core
1003
1004
1005
  {
  	if (!check_slab(s, page))
  		goto bad;
81819f0fc   Christoph Lameter   SLUB core
1006
1007
  	if (!check_valid_pointer(s, page, object)) {
  		object_err(s, page, object, "Freelist Pointer check fails");
70d71228a   Christoph Lameter   slub: remove obje...
1008
  		goto bad;
81819f0fc   Christoph Lameter   SLUB core
1009
  	}
f7cb19336   Christoph Lameter   SLUB: Pass active...
1010
  	if (!check_object(s, page, object, SLUB_RED_INACTIVE))
81819f0fc   Christoph Lameter   SLUB core
1011
  		goto bad;
81819f0fc   Christoph Lameter   SLUB core
1012

3ec097421   Christoph Lameter   SLUB: Simplify de...
1013
1014
1015
1016
  	/* Success perform special debug activities for allocs */
  	if (s->flags & SLAB_STORE_USER)
  		set_track(s, object, TRACK_ALLOC, addr);
  	trace(s, page, object, 1);
f7cb19336   Christoph Lameter   SLUB: Pass active...
1017
  	init_object(s, object, SLUB_RED_ACTIVE);
81819f0fc   Christoph Lameter   SLUB core
1018
  	return 1;
3ec097421   Christoph Lameter   SLUB: Simplify de...
1019

81819f0fc   Christoph Lameter   SLUB core
1020
1021
1022
1023
1024
  bad:
  	if (PageSlab(page)) {
  		/*
  		 * If this is a slab page then lets do the best we can
  		 * to avoid issues in the future. Marking all objects
672bba3a4   Christoph Lameter   SLUB: update comm...
1025
  		 * as used avoids touching the remaining objects.
81819f0fc   Christoph Lameter   SLUB core
1026
  		 */
249226847   Christoph Lameter   SLUB: change erro...
1027
  		slab_fix(s, "Marking all objects used");
39b264641   Christoph Lameter   slub: Store max n...
1028
  		page->inuse = page->objects;
a973e9dd1   Christoph Lameter   Revert "unique en...
1029
  		page->freelist = NULL;
81819f0fc   Christoph Lameter   SLUB core
1030
1031
1032
  	}
  	return 0;
  }
1537066c6   Christoph Lameter   slub: Force no in...
1033
1034
  static noinline int free_debug_processing(struct kmem_cache *s,
  		 struct page *page, void *object, unsigned long addr)
81819f0fc   Christoph Lameter   SLUB core
1035
  {
5c2e4bbbd   Christoph Lameter   slub: Disable int...
1036
1037
1038
1039
  	unsigned long flags;
  	int rc = 0;
  
  	local_irq_save(flags);
881db7fb0   Christoph Lameter   slub: Invert lock...
1040
  	slab_lock(page);
81819f0fc   Christoph Lameter   SLUB core
1041
1042
1043
1044
  	if (!check_slab(s, page))
  		goto fail;
  
  	if (!check_valid_pointer(s, page, object)) {
70d71228a   Christoph Lameter   slub: remove obje...
1045
  		slab_err(s, page, "Invalid object pointer 0x%p", object);
81819f0fc   Christoph Lameter   SLUB core
1046
1047
1048
1049
  		goto fail;
  	}
  
  	if (on_freelist(s, page, object)) {
249226847   Christoph Lameter   SLUB: change erro...
1050
  		object_err(s, page, object, "Object already free");
81819f0fc   Christoph Lameter   SLUB core
1051
1052
  		goto fail;
  	}
f7cb19336   Christoph Lameter   SLUB: Pass active...
1053
  	if (!check_object(s, page, object, SLUB_RED_ACTIVE))
5c2e4bbbd   Christoph Lameter   slub: Disable int...
1054
  		goto out;
81819f0fc   Christoph Lameter   SLUB core
1055
1056
  
  	if (unlikely(s != page->slab)) {
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
1057
  		if (!PageSlab(page)) {
70d71228a   Christoph Lameter   slub: remove obje...
1058
1059
  			slab_err(s, page, "Attempt to free object(0x%p) "
  				"outside of slab", object);
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
1060
  		} else if (!page->slab) {
81819f0fc   Christoph Lameter   SLUB core
1061
  			printk(KERN_ERR
70d71228a   Christoph Lameter   slub: remove obje...
1062
1063
  				"SLUB <none>: no slab for object 0x%p.
  ",
81819f0fc   Christoph Lameter   SLUB core
1064
  						object);
70d71228a   Christoph Lameter   slub: remove obje...
1065
  			dump_stack();
064287807   Pekka Enberg   SLUB: Fix coding ...
1066
  		} else
249226847   Christoph Lameter   SLUB: change erro...
1067
1068
  			object_err(s, page, object,
  					"page slab pointer corrupt.");
81819f0fc   Christoph Lameter   SLUB core
1069
1070
  		goto fail;
  	}
3ec097421   Christoph Lameter   SLUB: Simplify de...
1071

3ec097421   Christoph Lameter   SLUB: Simplify de...
1072
1073
1074
  	if (s->flags & SLAB_STORE_USER)
  		set_track(s, object, TRACK_FREE, addr);
  	trace(s, page, object, 0);
f7cb19336   Christoph Lameter   SLUB: Pass active...
1075
  	init_object(s, object, SLUB_RED_INACTIVE);
5c2e4bbbd   Christoph Lameter   slub: Disable int...
1076
1077
  	rc = 1;
  out:
881db7fb0   Christoph Lameter   slub: Invert lock...
1078
  	slab_unlock(page);
5c2e4bbbd   Christoph Lameter   slub: Disable int...
1079
1080
  	local_irq_restore(flags);
  	return rc;
3ec097421   Christoph Lameter   SLUB: Simplify de...
1081

81819f0fc   Christoph Lameter   SLUB core
1082
  fail:
249226847   Christoph Lameter   SLUB: change erro...
1083
  	slab_fix(s, "Object at 0x%p not freed", object);
5c2e4bbbd   Christoph Lameter   slub: Disable int...
1084
  	goto out;
81819f0fc   Christoph Lameter   SLUB core
1085
  }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1086
1087
  static int __init setup_slub_debug(char *str)
  {
f0630fff5   Christoph Lameter   SLUB: support slu...
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
  	slub_debug = DEBUG_DEFAULT_FLAGS;
  	if (*str++ != '=' || !*str)
  		/*
  		 * No options specified. Switch on full debugging.
  		 */
  		goto out;
  
  	if (*str == ',')
  		/*
  		 * No options but restriction on slabs. This means full
  		 * debugging for slabs matching a pattern.
  		 */
  		goto check_slabs;
fa5ec8a1f   David Rientjes   slub: add option ...
1101
1102
1103
1104
1105
1106
1107
1108
  	if (tolower(*str) == 'o') {
  		/*
  		 * Avoid enabling debugging on caches if its minimum order
  		 * would increase as a result.
  		 */
  		disable_higher_order_debug = 1;
  		goto out;
  	}
f0630fff5   Christoph Lameter   SLUB: support slu...
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
  	slub_debug = 0;
  	if (*str == '-')
  		/*
  		 * Switch off all debugging measures.
  		 */
  		goto out;
  
  	/*
  	 * Determine which debug features should be switched on
  	 */
064287807   Pekka Enberg   SLUB: Fix coding ...
1119
  	for (; *str && *str != ','; str++) {
f0630fff5   Christoph Lameter   SLUB: support slu...
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
  		switch (tolower(*str)) {
  		case 'f':
  			slub_debug |= SLAB_DEBUG_FREE;
  			break;
  		case 'z':
  			slub_debug |= SLAB_RED_ZONE;
  			break;
  		case 'p':
  			slub_debug |= SLAB_POISON;
  			break;
  		case 'u':
  			slub_debug |= SLAB_STORE_USER;
  			break;
  		case 't':
  			slub_debug |= SLAB_TRACE;
  			break;
4c13dd3b4   Dmitry Monakhov   failslab: add abi...
1136
1137
1138
  		case 'a':
  			slub_debug |= SLAB_FAILSLAB;
  			break;
f0630fff5   Christoph Lameter   SLUB: support slu...
1139
1140
  		default:
  			printk(KERN_ERR "slub_debug option '%c' "
064287807   Pekka Enberg   SLUB: Fix coding ...
1141
1142
  				"unknown. skipped
  ", *str);
f0630fff5   Christoph Lameter   SLUB: support slu...
1143
  		}
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1144
  	}
f0630fff5   Christoph Lameter   SLUB: support slu...
1145
  check_slabs:
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1146
1147
  	if (*str == ',')
  		slub_debug_slabs = str + 1;
f0630fff5   Christoph Lameter   SLUB: support slu...
1148
  out:
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1149
1150
1151
1152
  	return 1;
  }
  
  __setup("slub_debug", setup_slub_debug);
ba0268a8b   Christoph Lameter   SLUB: accurately ...
1153
1154
  static unsigned long kmem_cache_flags(unsigned long objsize,
  	unsigned long flags, const char *name,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
1155
  	void (*ctor)(void *))
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1156
1157
  {
  	/*
e153362a5   Christoph Lameter   slub: Remove objs...
1158
  	 * Enable debugging if selected on the kernel commandline.
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1159
  	 */
e153362a5   Christoph Lameter   slub: Remove objs...
1160
  	if (slub_debug && (!slub_debug_slabs ||
3de472138   David Rientjes   slub: use size an...
1161
1162
  		!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
  		flags |= slub_debug;
ba0268a8b   Christoph Lameter   SLUB: accurately ...
1163
1164
  
  	return flags;
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1165
1166
  }
  #else
3ec097421   Christoph Lameter   SLUB: Simplify de...
1167
1168
  static inline void setup_object_debug(struct kmem_cache *s,
  			struct page *page, void *object) {}
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1169

3ec097421   Christoph Lameter   SLUB: Simplify de...
1170
  static inline int alloc_debug_processing(struct kmem_cache *s,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
1171
  	struct page *page, void *object, unsigned long addr) { return 0; }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1172

3ec097421   Christoph Lameter   SLUB: Simplify de...
1173
  static inline int free_debug_processing(struct kmem_cache *s,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
1174
  	struct page *page, void *object, unsigned long addr) { return 0; }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1175

41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1176
1177
1178
  static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
  			{ return 1; }
  static inline int check_object(struct kmem_cache *s, struct page *page,
f7cb19336   Christoph Lameter   SLUB: Pass active...
1179
  			void *object, u8 val) { return 1; }
5cc6eee8a   Christoph Lameter   slub: explicit li...
1180
1181
  static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
  					struct page *page) {}
2cfb7455d   Christoph Lameter   slub: Rework allo...
1182
  static inline void remove_full(struct kmem_cache *s, struct page *page) {}
ba0268a8b   Christoph Lameter   SLUB: accurately ...
1183
1184
  static inline unsigned long kmem_cache_flags(unsigned long objsize,
  	unsigned long flags, const char *name,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
1185
  	void (*ctor)(void *))
ba0268a8b   Christoph Lameter   SLUB: accurately ...
1186
1187
1188
  {
  	return flags;
  }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1189
  #define slub_debug 0
0f389ec63   Christoph Lameter   slub: No need for...
1190

fdaa45e95   Ingo Molnar   slub: Fix build e...
1191
  #define disable_higher_order_debug 0
0f389ec63   Christoph Lameter   slub: No need for...
1192
1193
  static inline unsigned long slabs_node(struct kmem_cache *s, int node)
  							{ return 0; }
26c02cf05   Alexander Beregalov   SLUB: fix build w...
1194
1195
  static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
  							{ return 0; }
205ab99dd   Christoph Lameter   slub: Update stat...
1196
1197
1198
1199
  static inline void inc_slabs_node(struct kmem_cache *s, int node,
  							int objects) {}
  static inline void dec_slabs_node(struct kmem_cache *s, int node,
  							int objects) {}
7d550c56a   Christoph Lameter   slub: Add dummy f...
1200
1201
1202
1203
1204
1205
1206
1207
  
  static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
  							{ return 0; }
  
  static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
  		void *object) {}
  
  static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
1208
  #endif /* CONFIG_SLUB_DEBUG */
205ab99dd   Christoph Lameter   slub: Update stat...
1209

81819f0fc   Christoph Lameter   SLUB core
1210
1211
1212
  /*
   * Slab allocation and freeing
   */
65c3376aa   Christoph Lameter   slub: Fallback to...
1213
1214
1215
1216
  static inline struct page *alloc_slab_page(gfp_t flags, int node,
  					struct kmem_cache_order_objects oo)
  {
  	int order = oo_order(oo);
b1eeab676   Vegard Nossum   kmemcheck: add ho...
1217
  	flags |= __GFP_NOTRACK;
2154a3363   Christoph Lameter   slub: Use a const...
1218
  	if (node == NUMA_NO_NODE)
65c3376aa   Christoph Lameter   slub: Fallback to...
1219
1220
  		return alloc_pages(flags, order);
  	else
6b65aaf30   Minchan Kim   slub: Use alloc_p...
1221
  		return alloc_pages_exact_node(node, flags, order);
65c3376aa   Christoph Lameter   slub: Fallback to...
1222
  }
81819f0fc   Christoph Lameter   SLUB core
1223
1224
  static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
  {
064287807   Pekka Enberg   SLUB: Fix coding ...
1225
  	struct page *page;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
1226
  	struct kmem_cache_order_objects oo = s->oo;
ba52270d1   Pekka Enberg   SLUB: Don't pass ...
1227
  	gfp_t alloc_gfp;
81819f0fc   Christoph Lameter   SLUB core
1228

7e0528dad   Christoph Lameter   slub: Push irq di...
1229
1230
1231
1232
  	flags &= gfp_allowed_mask;
  
  	if (flags & __GFP_WAIT)
  		local_irq_enable();
b7a49f0d4   Christoph Lameter   slub: Determine g...
1233
  	flags |= s->allocflags;
e12ba74d8   Mel Gorman   Group short-lived...
1234

ba52270d1   Pekka Enberg   SLUB: Don't pass ...
1235
1236
1237
1238
1239
1240
1241
  	/*
  	 * Let the initial higher-order allocation fail under memory pressure
  	 * so we fall-back to the minimum order allocation.
  	 */
  	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
  
  	page = alloc_slab_page(alloc_gfp, node, oo);
65c3376aa   Christoph Lameter   slub: Fallback to...
1242
1243
1244
1245
1246
1247
1248
  	if (unlikely(!page)) {
  		oo = s->min;
  		/*
  		 * Allocation may have failed due to fragmentation.
  		 * Try a lower order alloc if possible
  		 */
  		page = alloc_slab_page(flags, node, oo);
81819f0fc   Christoph Lameter   SLUB core
1249

7e0528dad   Christoph Lameter   slub: Push irq di...
1250
1251
  		if (page)
  			stat(s, ORDER_FALLBACK);
65c3376aa   Christoph Lameter   slub: Fallback to...
1252
  	}
5a896d9e7   Vegard Nossum   slub: add hooks f...
1253

7e0528dad   Christoph Lameter   slub: Push irq di...
1254
1255
1256
1257
1258
  	if (flags & __GFP_WAIT)
  		local_irq_disable();
  
  	if (!page)
  		return NULL;
5a896d9e7   Vegard Nossum   slub: add hooks f...
1259
  	if (kmemcheck_enabled
5086c389c   Amerigo Wang   SLUB: Fix some co...
1260
  		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
b1eeab676   Vegard Nossum   kmemcheck: add ho...
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
  		int pages = 1 << oo_order(oo);
  
  		kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
  
  		/*
  		 * Objects from caches that have a constructor don't get
  		 * cleared when they're allocated, so we need to do it here.
  		 */
  		if (s->ctor)
  			kmemcheck_mark_uninitialized_pages(page, pages);
  		else
  			kmemcheck_mark_unallocated_pages(page, pages);
5a896d9e7   Vegard Nossum   slub: add hooks f...
1273
  	}
834f3d119   Christoph Lameter   slub: Add kmem_ca...
1274
  	page->objects = oo_objects(oo);
81819f0fc   Christoph Lameter   SLUB core
1275
1276
1277
  	mod_zone_page_state(page_zone(page),
  		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
  		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
65c3376aa   Christoph Lameter   slub: Fallback to...
1278
  		1 << oo_order(oo));
81819f0fc   Christoph Lameter   SLUB core
1279
1280
1281
1282
1283
1284
1285
  
  	return page;
  }
  
  static void setup_object(struct kmem_cache *s, struct page *page,
  				void *object)
  {
3ec097421   Christoph Lameter   SLUB: Simplify de...
1286
  	setup_object_debug(s, page, object);
4f1049345   Christoph Lameter   slab allocators: ...
1287
  	if (unlikely(s->ctor))
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
1288
  		s->ctor(object);
81819f0fc   Christoph Lameter   SLUB core
1289
1290
1291
1292
1293
  }
  
  static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
  {
  	struct page *page;
81819f0fc   Christoph Lameter   SLUB core
1294
  	void *start;
81819f0fc   Christoph Lameter   SLUB core
1295
1296
  	void *last;
  	void *p;
6cb062296   Christoph Lameter   Categorize GFP flags
1297
  	BUG_ON(flags & GFP_SLAB_BUG_MASK);
81819f0fc   Christoph Lameter   SLUB core
1298

6cb062296   Christoph Lameter   Categorize GFP flags
1299
1300
  	page = allocate_slab(s,
  		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
81819f0fc   Christoph Lameter   SLUB core
1301
1302
  	if (!page)
  		goto out;
205ab99dd   Christoph Lameter   slub: Update stat...
1303
  	inc_slabs_node(s, page_to_nid(page), page->objects);
81819f0fc   Christoph Lameter   SLUB core
1304
1305
  	page->slab = s;
  	page->flags |= 1 << PG_slab;
81819f0fc   Christoph Lameter   SLUB core
1306
1307
  
  	start = page_address(page);
81819f0fc   Christoph Lameter   SLUB core
1308
1309
  
  	if (unlikely(s->flags & SLAB_POISON))
834f3d119   Christoph Lameter   slub: Add kmem_ca...
1310
  		memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
81819f0fc   Christoph Lameter   SLUB core
1311
1312
  
  	last = start;
224a88be4   Christoph Lameter   slub: for_each_ob...
1313
  	for_each_object(p, s, start, page->objects) {
81819f0fc   Christoph Lameter   SLUB core
1314
1315
1316
1317
1318
  		setup_object(s, page, last);
  		set_freepointer(s, last, p);
  		last = p;
  	}
  	setup_object(s, page, last);
a973e9dd1   Christoph Lameter   Revert "unique en...
1319
  	set_freepointer(s, last, NULL);
81819f0fc   Christoph Lameter   SLUB core
1320
1321
  
  	page->freelist = start;
e6e82ea11   Christoph Lameter   slub: Prepare inu...
1322
  	page->inuse = page->objects;
8cb0a5068   Christoph Lameter   slub: Move page->...
1323
  	page->frozen = 1;
81819f0fc   Christoph Lameter   SLUB core
1324
  out:
81819f0fc   Christoph Lameter   SLUB core
1325
1326
1327
1328
1329
  	return page;
  }
  
  static void __free_slab(struct kmem_cache *s, struct page *page)
  {
834f3d119   Christoph Lameter   slub: Add kmem_ca...
1330
1331
  	int order = compound_order(page);
  	int pages = 1 << order;
81819f0fc   Christoph Lameter   SLUB core
1332

af537b0a6   Christoph Lameter   slub: Use kmem_ca...
1333
  	if (kmem_cache_debug(s)) {
81819f0fc   Christoph Lameter   SLUB core
1334
1335
1336
  		void *p;
  
  		slab_pad_check(s, page);
224a88be4   Christoph Lameter   slub: for_each_ob...
1337
1338
  		for_each_object(p, s, page_address(page),
  						page->objects)
f7cb19336   Christoph Lameter   SLUB: Pass active...
1339
  			check_object(s, page, p, SLUB_RED_INACTIVE);
81819f0fc   Christoph Lameter   SLUB core
1340
  	}
b1eeab676   Vegard Nossum   kmemcheck: add ho...
1341
  	kmemcheck_free_shadow(page, compound_order(page));
5a896d9e7   Vegard Nossum   slub: add hooks f...
1342

81819f0fc   Christoph Lameter   SLUB core
1343
1344
1345
  	mod_zone_page_state(page_zone(page),
  		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
  		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
064287807   Pekka Enberg   SLUB: Fix coding ...
1346
  		-pages);
81819f0fc   Christoph Lameter   SLUB core
1347

49bd5221c   Christoph Lameter   slub: Move map/fl...
1348
1349
  	__ClearPageSlab(page);
  	reset_page_mapcount(page);
1eb5ac646   Nick Piggin   mm: SLUB fix recl...
1350
1351
  	if (current->reclaim_state)
  		current->reclaim_state->reclaimed_slab += pages;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
1352
  	__free_pages(page, order);
81819f0fc   Christoph Lameter   SLUB core
1353
  }
da9a638c6   Lai Jiangshan   slub,rcu: don't a...
1354
1355
  #define need_reserve_slab_rcu						\
  	(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
81819f0fc   Christoph Lameter   SLUB core
1356
1357
1358
  static void rcu_free_slab(struct rcu_head *h)
  {
  	struct page *page;
da9a638c6   Lai Jiangshan   slub,rcu: don't a...
1359
1360
1361
1362
  	if (need_reserve_slab_rcu)
  		page = virt_to_head_page(h);
  	else
  		page = container_of((struct list_head *)h, struct page, lru);
81819f0fc   Christoph Lameter   SLUB core
1363
1364
1365
1366
1367
1368
  	__free_slab(page->slab, page);
  }
  
  static void free_slab(struct kmem_cache *s, struct page *page)
  {
  	if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
da9a638c6   Lai Jiangshan   slub,rcu: don't a...
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
  		struct rcu_head *head;
  
  		if (need_reserve_slab_rcu) {
  			int order = compound_order(page);
  			int offset = (PAGE_SIZE << order) - s->reserved;
  
  			VM_BUG_ON(s->reserved != sizeof(*head));
  			head = page_address(page) + offset;
  		} else {
  			/*
  			 * RCU free overloads the RCU head over the LRU
  			 */
  			head = (void *)&page->lru;
  		}
81819f0fc   Christoph Lameter   SLUB core
1383
1384
1385
1386
1387
1388
1389
1390
  
  		call_rcu(head, rcu_free_slab);
  	} else
  		__free_slab(s, page);
  }
  
  static void discard_slab(struct kmem_cache *s, struct page *page)
  {
205ab99dd   Christoph Lameter   slub: Update stat...
1391
  	dec_slabs_node(s, page_to_nid(page), page->objects);
81819f0fc   Christoph Lameter   SLUB core
1392
1393
1394
1395
  	free_slab(s, page);
  }
  
  /*
5cc6eee8a   Christoph Lameter   slub: explicit li...
1396
1397
1398
   * Management of partially allocated slabs.
   *
   * list_lock must be held.
81819f0fc   Christoph Lameter   SLUB core
1399
   */
5cc6eee8a   Christoph Lameter   slub: explicit li...
1400
  static inline void add_partial(struct kmem_cache_node *n,
7c2e132c5   Christoph Lameter   Add parameter to ...
1401
  				struct page *page, int tail)
81819f0fc   Christoph Lameter   SLUB core
1402
  {
e95eed571   Christoph Lameter   SLUB: Add MIN_PAR...
1403
  	n->nr_partial++;
136333d10   Shaohua Li   slub: explicitly ...
1404
  	if (tail == DEACTIVATE_TO_TAIL)
7c2e132c5   Christoph Lameter   Add parameter to ...
1405
1406
1407
  		list_add_tail(&page->lru, &n->partial);
  	else
  		list_add(&page->lru, &n->partial);
81819f0fc   Christoph Lameter   SLUB core
1408
  }
5cc6eee8a   Christoph Lameter   slub: explicit li...
1409
1410
1411
1412
  /*
   * list_lock must be held.
   */
  static inline void remove_partial(struct kmem_cache_node *n,
62e346a83   Christoph Lameter   slub: extract com...
1413
1414
1415
1416
1417
  					struct page *page)
  {
  	list_del(&page->lru);
  	n->nr_partial--;
  }
81819f0fc   Christoph Lameter   SLUB core
1418
  /*
5cc6eee8a   Christoph Lameter   slub: explicit li...
1419
1420
   * Lock slab, remove from the partial list and put the object into the
   * per cpu freelist.
81819f0fc   Christoph Lameter   SLUB core
1421
   *
497b66f2e   Christoph Lameter   slub: return obje...
1422
1423
   * Returns a list of objects or NULL if it fails.
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
1424
   * Must hold list_lock.
81819f0fc   Christoph Lameter   SLUB core
1425
   */
497b66f2e   Christoph Lameter   slub: return obje...
1426
  static inline void *acquire_slab(struct kmem_cache *s,
acd19fd1a   Christoph Lameter   slub: pass kmem_c...
1427
  		struct kmem_cache_node *n, struct page *page,
49e225858   Christoph Lameter   slub: per cpu cac...
1428
  		int mode)
81819f0fc   Christoph Lameter   SLUB core
1429
  {
2cfb7455d   Christoph Lameter   slub: Rework allo...
1430
1431
1432
  	void *freelist;
  	unsigned long counters;
  	struct page new;
2cfb7455d   Christoph Lameter   slub: Rework allo...
1433
1434
1435
1436
1437
1438
1439
1440
1441
  	/*
  	 * Zap the freelist and set the frozen bit.
  	 * The old freelist is the list of objects for the
  	 * per cpu allocation list.
  	 */
  	do {
  		freelist = page->freelist;
  		counters = page->counters;
  		new.counters = counters;
49e225858   Christoph Lameter   slub: per cpu cac...
1442
1443
  		if (mode)
  			new.inuse = page->objects;
2cfb7455d   Christoph Lameter   slub: Rework allo...
1444
1445
1446
  
  		VM_BUG_ON(new.frozen);
  		new.frozen = 1;
1d07171c5   Christoph Lameter   slub: disable int...
1447
  	} while (!__cmpxchg_double_slab(s, page,
2cfb7455d   Christoph Lameter   slub: Rework allo...
1448
1449
1450
1451
1452
  			freelist, counters,
  			NULL, new.counters,
  			"lock and freeze"));
  
  	remove_partial(n, page);
49e225858   Christoph Lameter   slub: per cpu cac...
1453
  	return freelist;
81819f0fc   Christoph Lameter   SLUB core
1454
  }
49e225858   Christoph Lameter   slub: per cpu cac...
1455
  static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
81819f0fc   Christoph Lameter   SLUB core
1456
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
1457
   * Try to allocate a partial slab from a specific node.
81819f0fc   Christoph Lameter   SLUB core
1458
   */
497b66f2e   Christoph Lameter   slub: return obje...
1459
  static void *get_partial_node(struct kmem_cache *s,
acd19fd1a   Christoph Lameter   slub: pass kmem_c...
1460
  		struct kmem_cache_node *n, struct kmem_cache_cpu *c)
81819f0fc   Christoph Lameter   SLUB core
1461
  {
49e225858   Christoph Lameter   slub: per cpu cac...
1462
1463
  	struct page *page, *page2;
  	void *object = NULL;
81819f0fc   Christoph Lameter   SLUB core
1464
1465
1466
1467
  
  	/*
  	 * Racy check. If we mistakenly see no partial slabs then we
  	 * just allocate an empty slab. If we mistakenly try to get a
672bba3a4   Christoph Lameter   SLUB: update comm...
1468
1469
  	 * partial slab and there is none available then get_partials()
  	 * will return NULL.
81819f0fc   Christoph Lameter   SLUB core
1470
1471
1472
1473
1474
  	 */
  	if (!n || !n->nr_partial)
  		return NULL;
  
  	spin_lock(&n->list_lock);
49e225858   Christoph Lameter   slub: per cpu cac...
1475
  	list_for_each_entry_safe(page, page2, &n->partial, lru) {
12d79634f   Alex,Shi   slub: Code optimi...
1476
  		void *t = acquire_slab(s, n, page, object == NULL);
49e225858   Christoph Lameter   slub: per cpu cac...
1477
1478
1479
1480
  		int available;
  
  		if (!t)
  			break;
12d79634f   Alex,Shi   slub: Code optimi...
1481
  		if (!object) {
49e225858   Christoph Lameter   slub: per cpu cac...
1482
1483
1484
  			c->page = page;
  			c->node = page_to_nid(page);
  			stat(s, ALLOC_FROM_PARTIAL);
49e225858   Christoph Lameter   slub: per cpu cac...
1485
1486
1487
1488
1489
1490
1491
1492
  			object = t;
  			available =  page->objects - page->inuse;
  		} else {
  			page->freelist = t;
  			available = put_cpu_partial(s, page, 0);
  		}
  		if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
  			break;
497b66f2e   Christoph Lameter   slub: return obje...
1493
  	}
81819f0fc   Christoph Lameter   SLUB core
1494
  	spin_unlock(&n->list_lock);
497b66f2e   Christoph Lameter   slub: return obje...
1495
  	return object;
81819f0fc   Christoph Lameter   SLUB core
1496
1497
1498
  }
  
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
1499
   * Get a page from somewhere. Search in increasing NUMA distances.
81819f0fc   Christoph Lameter   SLUB core
1500
   */
acd19fd1a   Christoph Lameter   slub: pass kmem_c...
1501
1502
  static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
  		struct kmem_cache_cpu *c)
81819f0fc   Christoph Lameter   SLUB core
1503
1504
1505
  {
  #ifdef CONFIG_NUMA
  	struct zonelist *zonelist;
dd1a239f6   Mel Gorman   mm: have zonelist...
1506
  	struct zoneref *z;
54a6eb5c4   Mel Gorman   mm: use two zonel...
1507
1508
  	struct zone *zone;
  	enum zone_type high_zoneidx = gfp_zone(flags);
497b66f2e   Christoph Lameter   slub: return obje...
1509
  	void *object;
81819f0fc   Christoph Lameter   SLUB core
1510
1511
  
  	/*
672bba3a4   Christoph Lameter   SLUB: update comm...
1512
1513
1514
1515
  	 * The defrag ratio allows a configuration of the tradeoffs between
  	 * inter node defragmentation and node local allocations. A lower
  	 * defrag_ratio increases the tendency to do local allocations
  	 * instead of attempting to obtain partial slabs from other nodes.
81819f0fc   Christoph Lameter   SLUB core
1516
  	 *
672bba3a4   Christoph Lameter   SLUB: update comm...
1517
1518
1519
1520
  	 * If the defrag_ratio is set to 0 then kmalloc() always
  	 * returns node local objects. If the ratio is higher then kmalloc()
  	 * may return off node objects because partial slabs are obtained
  	 * from other nodes and filled up.
81819f0fc   Christoph Lameter   SLUB core
1521
  	 *
6446faa2f   Christoph Lameter   slub: Fix up comm...
1522
  	 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
672bba3a4   Christoph Lameter   SLUB: update comm...
1523
1524
1525
1526
1527
  	 * defrag_ratio = 1000) then every (well almost) allocation will
  	 * first attempt to defrag slab caches on other nodes. This means
  	 * scanning over all nodes to look for partial slabs which may be
  	 * expensive if we do it every time we are trying to find a slab
  	 * with available objects.
81819f0fc   Christoph Lameter   SLUB core
1528
  	 */
9824601ea   Christoph Lameter   SLUB: rename defr...
1529
1530
  	if (!s->remote_node_defrag_ratio ||
  			get_cycles() % 1024 > s->remote_node_defrag_ratio)
81819f0fc   Christoph Lameter   SLUB core
1531
  		return NULL;
c0ff7453b   Miao Xie   cpuset,mm: fix no...
1532
  	get_mems_allowed();
0e88460da   Mel Gorman   mm: introduce nod...
1533
  	zonelist = node_zonelist(slab_node(current->mempolicy), flags);
54a6eb5c4   Mel Gorman   mm: use two zonel...
1534
  	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
81819f0fc   Christoph Lameter   SLUB core
1535
  		struct kmem_cache_node *n;
54a6eb5c4   Mel Gorman   mm: use two zonel...
1536
  		n = get_node(s, zone_to_nid(zone));
81819f0fc   Christoph Lameter   SLUB core
1537

54a6eb5c4   Mel Gorman   mm: use two zonel...
1538
  		if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
3b89d7d88   David Rientjes   slub: move min_pa...
1539
  				n->nr_partial > s->min_partial) {
497b66f2e   Christoph Lameter   slub: return obje...
1540
1541
  			object = get_partial_node(s, n, c);
  			if (object) {
c0ff7453b   Miao Xie   cpuset,mm: fix no...
1542
  				put_mems_allowed();
497b66f2e   Christoph Lameter   slub: return obje...
1543
  				return object;
c0ff7453b   Miao Xie   cpuset,mm: fix no...
1544
  			}
81819f0fc   Christoph Lameter   SLUB core
1545
1546
  		}
  	}
c0ff7453b   Miao Xie   cpuset,mm: fix no...
1547
  	put_mems_allowed();
81819f0fc   Christoph Lameter   SLUB core
1548
1549
1550
1551
1552
1553
1554
  #endif
  	return NULL;
  }
  
  /*
   * Get a partial page, lock it and return it.
   */
497b66f2e   Christoph Lameter   slub: return obje...
1555
  static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
acd19fd1a   Christoph Lameter   slub: pass kmem_c...
1556
  		struct kmem_cache_cpu *c)
81819f0fc   Christoph Lameter   SLUB core
1557
  {
497b66f2e   Christoph Lameter   slub: return obje...
1558
  	void *object;
2154a3363   Christoph Lameter   slub: Use a const...
1559
  	int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
81819f0fc   Christoph Lameter   SLUB core
1560

497b66f2e   Christoph Lameter   slub: return obje...
1561
1562
1563
  	object = get_partial_node(s, get_node(s, searchnode), c);
  	if (object || node != NUMA_NO_NODE)
  		return object;
81819f0fc   Christoph Lameter   SLUB core
1564

acd19fd1a   Christoph Lameter   slub: pass kmem_c...
1565
  	return get_any_partial(s, flags, c);
81819f0fc   Christoph Lameter   SLUB core
1566
  }
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
  #ifdef CONFIG_PREEMPT
  /*
   * Calculate the next globally unique transaction for disambiguiation
   * during cmpxchg. The transactions start with the cpu number and are then
   * incremented by CONFIG_NR_CPUS.
   */
  #define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
  #else
  /*
   * No preemption supported therefore also no need to check for
   * different cpus.
   */
  #define TID_STEP 1
  #endif
  
  static inline unsigned long next_tid(unsigned long tid)
  {
  	return tid + TID_STEP;
  }
  
  static inline unsigned int tid_to_cpu(unsigned long tid)
  {
  	return tid % TID_STEP;
  }
  
  static inline unsigned long tid_to_event(unsigned long tid)
  {
  	return tid / TID_STEP;
  }
  
  static inline unsigned int init_tid(int cpu)
  {
  	return cpu;
  }
  
  static inline void note_cmpxchg_failure(const char *n,
  		const struct kmem_cache *s, unsigned long tid)
  {
  #ifdef SLUB_DEBUG_CMPXCHG
  	unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
  
  	printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name);
  
  #ifdef CONFIG_PREEMPT
  	if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
  		printk("due to cpu change %d -> %d
  ",
  			tid_to_cpu(tid), tid_to_cpu(actual_tid));
  	else
  #endif
  	if (tid_to_event(tid) != tid_to_event(actual_tid))
  		printk("due to cpu running other code. Event %ld->%ld
  ",
  			tid_to_event(tid), tid_to_event(actual_tid));
  	else
  		printk("for unknown reason: actual=%lx was=%lx target=%lx
  ",
  			actual_tid, tid, next_tid(tid));
  #endif
4fdccdfbb   Christoph Lameter   slub: Add statist...
1626
  	stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
1627
  }
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
1628
1629
  void init_kmem_cache_cpus(struct kmem_cache *s)
  {
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
1630
1631
1632
1633
  	int cpu;
  
  	for_each_possible_cpu(cpu)
  		per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
1634
  }
2cfb7455d   Christoph Lameter   slub: Rework allo...
1635
1636
1637
1638
  
  /*
   * Remove the cpu slab
   */
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1639
  static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
81819f0fc   Christoph Lameter   SLUB core
1640
  {
2cfb7455d   Christoph Lameter   slub: Rework allo...
1641
  	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1642
  	struct page *page = c->page;
2cfb7455d   Christoph Lameter   slub: Rework allo...
1643
1644
1645
1646
1647
  	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
  	int lock = 0;
  	enum slab_modes l = M_NONE, m = M_NONE;
  	void *freelist;
  	void *nextfree;
136333d10   Shaohua Li   slub: explicitly ...
1648
  	int tail = DEACTIVATE_TO_HEAD;
2cfb7455d   Christoph Lameter   slub: Rework allo...
1649
1650
1651
1652
  	struct page new;
  	struct page old;
  
  	if (page->freelist) {
84e554e68   Christoph Lameter   SLUB: Make slub s...
1653
  		stat(s, DEACTIVATE_REMOTE_FREES);
136333d10   Shaohua Li   slub: explicitly ...
1654
  		tail = DEACTIVATE_TO_TAIL;
2cfb7455d   Christoph Lameter   slub: Rework allo...
1655
1656
1657
1658
1659
1660
  	}
  
  	c->tid = next_tid(c->tid);
  	c->page = NULL;
  	freelist = c->freelist;
  	c->freelist = NULL;
894b8788d   Christoph Lameter   slub: support con...
1661
  	/*
2cfb7455d   Christoph Lameter   slub: Rework allo...
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
  	 * Stage one: Free all available per cpu objects back
  	 * to the page freelist while it is still frozen. Leave the
  	 * last one.
  	 *
  	 * There is no need to take the list->lock because the page
  	 * is still frozen.
  	 */
  	while (freelist && (nextfree = get_freepointer(s, freelist))) {
  		void *prior;
  		unsigned long counters;
  
  		do {
  			prior = page->freelist;
  			counters = page->counters;
  			set_freepointer(s, freelist, prior);
  			new.counters = counters;
  			new.inuse--;
  			VM_BUG_ON(!new.frozen);
1d07171c5   Christoph Lameter   slub: disable int...
1680
  		} while (!__cmpxchg_double_slab(s, page,
2cfb7455d   Christoph Lameter   slub: Rework allo...
1681
1682
1683
1684
1685
1686
  			prior, counters,
  			freelist, new.counters,
  			"drain percpu freelist"));
  
  		freelist = nextfree;
  	}
894b8788d   Christoph Lameter   slub: support con...
1687
  	/*
2cfb7455d   Christoph Lameter   slub: Rework allo...
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
  	 * Stage two: Ensure that the page is unfrozen while the
  	 * list presence reflects the actual number of objects
  	 * during unfreeze.
  	 *
  	 * We setup the list membership and then perform a cmpxchg
  	 * with the count. If there is a mismatch then the page
  	 * is not unfrozen but the page is on the wrong list.
  	 *
  	 * Then we restart the process which may have to remove
  	 * the page from the list that we just put it on again
  	 * because the number of objects in the slab may have
  	 * changed.
894b8788d   Christoph Lameter   slub: support con...
1700
  	 */
2cfb7455d   Christoph Lameter   slub: Rework allo...
1701
  redo:
894b8788d   Christoph Lameter   slub: support con...
1702

2cfb7455d   Christoph Lameter   slub: Rework allo...
1703
1704
1705
  	old.freelist = page->freelist;
  	old.counters = page->counters;
  	VM_BUG_ON(!old.frozen);
7c2e132c5   Christoph Lameter   Add parameter to ...
1706

2cfb7455d   Christoph Lameter   slub: Rework allo...
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
  	/* Determine target state of the slab */
  	new.counters = old.counters;
  	if (freelist) {
  		new.inuse--;
  		set_freepointer(s, freelist, old.freelist);
  		new.freelist = freelist;
  	} else
  		new.freelist = old.freelist;
  
  	new.frozen = 0;
81107188f   Christoph Lameter   slub: Fix partial...
1717
  	if (!new.inuse && n->nr_partial > s->min_partial)
2cfb7455d   Christoph Lameter   slub: Rework allo...
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
  		m = M_FREE;
  	else if (new.freelist) {
  		m = M_PARTIAL;
  		if (!lock) {
  			lock = 1;
  			/*
  			 * Taking the spinlock removes the possiblity
  			 * that acquire_slab() will see a slab page that
  			 * is frozen
  			 */
  			spin_lock(&n->list_lock);
  		}
  	} else {
  		m = M_FULL;
  		if (kmem_cache_debug(s) && !lock) {
  			lock = 1;
  			/*
  			 * This also ensures that the scanning of full
  			 * slabs from diagnostic functions will not see
  			 * any frozen slabs.
  			 */
  			spin_lock(&n->list_lock);
  		}
  	}
  
  	if (l != m) {
  
  		if (l == M_PARTIAL)
  
  			remove_partial(n, page);
  
  		else if (l == M_FULL)
894b8788d   Christoph Lameter   slub: support con...
1750

2cfb7455d   Christoph Lameter   slub: Rework allo...
1751
1752
1753
1754
1755
  			remove_full(s, page);
  
  		if (m == M_PARTIAL) {
  
  			add_partial(n, page, tail);
136333d10   Shaohua Li   slub: explicitly ...
1756
  			stat(s, tail);
2cfb7455d   Christoph Lameter   slub: Rework allo...
1757
1758
  
  		} else if (m == M_FULL) {
894b8788d   Christoph Lameter   slub: support con...
1759

2cfb7455d   Christoph Lameter   slub: Rework allo...
1760
1761
1762
1763
1764
1765
1766
  			stat(s, DEACTIVATE_FULL);
  			add_full(s, n, page);
  
  		}
  	}
  
  	l = m;
1d07171c5   Christoph Lameter   slub: disable int...
1767
  	if (!__cmpxchg_double_slab(s, page,
2cfb7455d   Christoph Lameter   slub: Rework allo...
1768
1769
1770
1771
  				old.freelist, old.counters,
  				new.freelist, new.counters,
  				"unfreezing slab"))
  		goto redo;
2cfb7455d   Christoph Lameter   slub: Rework allo...
1772
1773
1774
1775
1776
1777
1778
  	if (lock)
  		spin_unlock(&n->list_lock);
  
  	if (m == M_FREE) {
  		stat(s, DEACTIVATE_EMPTY);
  		discard_slab(s, page);
  		stat(s, FREE_SLAB);
894b8788d   Christoph Lameter   slub: support con...
1779
  	}
81819f0fc   Christoph Lameter   SLUB core
1780
  }
49e225858   Christoph Lameter   slub: per cpu cac...
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
  /* Unfreeze all the cpu partial slabs */
  static void unfreeze_partials(struct kmem_cache *s)
  {
  	struct kmem_cache_node *n = NULL;
  	struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
  	struct page *page;
  
  	while ((page = c->partial)) {
  		enum slab_modes { M_PARTIAL, M_FREE };
  		enum slab_modes l, m;
  		struct page new;
  		struct page old;
  
  		c->partial = page->next;
  		l = M_FREE;
  
  		do {
  
  			old.freelist = page->freelist;
  			old.counters = page->counters;
  			VM_BUG_ON(!old.frozen);
  
  			new.counters = old.counters;
  			new.freelist = old.freelist;
  
  			new.frozen = 0;
dcc3be6a5   Alex Shi   slub: Discard sla...
1807
  			if (!new.inuse && (!n || n->nr_partial > s->min_partial))
49e225858   Christoph Lameter   slub: per cpu cac...
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
  				m = M_FREE;
  			else {
  				struct kmem_cache_node *n2 = get_node(s,
  							page_to_nid(page));
  
  				m = M_PARTIAL;
  				if (n != n2) {
  					if (n)
  						spin_unlock(&n->list_lock);
  
  					n = n2;
  					spin_lock(&n->list_lock);
  				}
  			}
  
  			if (l != m) {
  				if (l == M_PARTIAL)
  					remove_partial(n, page);
  				else
  					add_partial(n, page, 1);
  
  				l = m;
  			}
  
  		} while (!cmpxchg_double_slab(s, page,
  				old.freelist, old.counters,
  				new.freelist, new.counters,
  				"unfreezing slab"));
  
  		if (m == M_FREE) {
  			stat(s, DEACTIVATE_EMPTY);
  			discard_slab(s, page);
  			stat(s, FREE_SLAB);
  		}
  	}
  
  	if (n)
  		spin_unlock(&n->list_lock);
  }
  
  /*
   * Put a page that was just frozen (in __slab_free) into a partial page
   * slot if available. This is done without interrupts disabled and without
   * preemption disabled. The cmpxchg is racy and may put the partial page
   * onto a random cpus partial slot.
   *
   * If we did not find a slot then simply move all the partials to the
   * per node partial list.
   */
  int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
  {
  	struct page *oldpage;
  	int pages;
  	int pobjects;
  
  	do {
  		pages = 0;
  		pobjects = 0;
  		oldpage = this_cpu_read(s->cpu_slab->partial);
  
  		if (oldpage) {
  			pobjects = oldpage->pobjects;
  			pages = oldpage->pages;
  			if (drain && pobjects > s->cpu_partial) {
  				unsigned long flags;
  				/*
  				 * partial array is full. Move the existing
  				 * set to the per node partial list.
  				 */
  				local_irq_save(flags);
  				unfreeze_partials(s);
  				local_irq_restore(flags);
  				pobjects = 0;
  				pages = 0;
  			}
  		}
  
  		pages++;
  		pobjects += page->objects - page->inuse;
  
  		page->pages = pages;
  		page->pobjects = pobjects;
  		page->next = oldpage;
  
  	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
  	stat(s, CPU_PARTIAL_FREE);
  	return pobjects;
  }
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1896
  static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
81819f0fc   Christoph Lameter   SLUB core
1897
  {
84e554e68   Christoph Lameter   SLUB: Make slub s...
1898
  	stat(s, CPUSLAB_FLUSH);
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1899
  	deactivate_slab(s, c);
81819f0fc   Christoph Lameter   SLUB core
1900
1901
1902
1903
  }
  
  /*
   * Flush cpu slab.
6446faa2f   Christoph Lameter   slub: Fix up comm...
1904
   *
81819f0fc   Christoph Lameter   SLUB core
1905
1906
   * Called from IPI handler with interrupts disabled.
   */
0c7100132   Christoph Lameter   SLUB: add some mo...
1907
  static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
81819f0fc   Christoph Lameter   SLUB core
1908
  {
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
1909
  	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
81819f0fc   Christoph Lameter   SLUB core
1910

49e225858   Christoph Lameter   slub: per cpu cac...
1911
1912
1913
1914
1915
1916
  	if (likely(c)) {
  		if (c->page)
  			flush_slab(s, c);
  
  		unfreeze_partials(s);
  	}
81819f0fc   Christoph Lameter   SLUB core
1917
1918
1919
1920
1921
  }
  
  static void flush_cpu_slab(void *d)
  {
  	struct kmem_cache *s = d;
81819f0fc   Christoph Lameter   SLUB core
1922

dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1923
  	__flush_cpu_slab(s, smp_processor_id());
81819f0fc   Christoph Lameter   SLUB core
1924
1925
1926
1927
  }
  
  static void flush_all(struct kmem_cache *s)
  {
15c8b6c1a   Jens Axboe   on_each_cpu(): ki...
1928
  	on_each_cpu(flush_cpu_slab, s, 1);
81819f0fc   Christoph Lameter   SLUB core
1929
1930
1931
  }
  
  /*
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1932
1933
1934
1935
1936
1937
   * Check if the objects in a per cpu structure fit numa
   * locality expectations.
   */
  static inline int node_match(struct kmem_cache_cpu *c, int node)
  {
  #ifdef CONFIG_NUMA
2154a3363   Christoph Lameter   slub: Use a const...
1938
  	if (node != NUMA_NO_NODE && c->node != node)
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1939
1940
1941
1942
  		return 0;
  #endif
  	return 1;
  }
781b2ba6e   Pekka Enberg   SLUB: Out-of-memo...
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
  static int count_free(struct page *page)
  {
  	return page->objects - page->inuse;
  }
  
  static unsigned long count_partial(struct kmem_cache_node *n,
  					int (*get_count)(struct page *))
  {
  	unsigned long flags;
  	unsigned long x = 0;
  	struct page *page;
  
  	spin_lock_irqsave(&n->list_lock, flags);
  	list_for_each_entry(page, &n->partial, lru)
  		x += get_count(page);
  	spin_unlock_irqrestore(&n->list_lock, flags);
  	return x;
  }
26c02cf05   Alexander Beregalov   SLUB: fix build w...
1961
1962
1963
1964
1965
1966
1967
1968
  static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
  {
  #ifdef CONFIG_SLUB_DEBUG
  	return atomic_long_read(&n->total_objects);
  #else
  	return 0;
  #endif
  }
781b2ba6e   Pekka Enberg   SLUB: Out-of-memo...
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
  static noinline void
  slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
  {
  	int node;
  
  	printk(KERN_WARNING
  		"SLUB: Unable to allocate memory on node %d (gfp=0x%x)
  ",
  		nid, gfpflags);
  	printk(KERN_WARNING "  cache: %s, object size: %d, buffer size: %d, "
  		"default order: %d, min order: %d
  ", s->name, s->objsize,
  		s->size, oo_order(s->oo), oo_order(s->min));
fa5ec8a1f   David Rientjes   slub: add option ...
1982
1983
1984
1985
  	if (oo_order(s->min) > get_order(s->objsize))
  		printk(KERN_WARNING "  %s debugging increased min order, use "
  		       "slub_debug=O to disable.
  ", s->name);
781b2ba6e   Pekka Enberg   SLUB: Out-of-memo...
1986
1987
1988
1989
1990
1991
1992
1993
  	for_each_online_node(node) {
  		struct kmem_cache_node *n = get_node(s, node);
  		unsigned long nr_slabs;
  		unsigned long nr_objs;
  		unsigned long nr_free;
  
  		if (!n)
  			continue;
26c02cf05   Alexander Beregalov   SLUB: fix build w...
1994
1995
1996
  		nr_free  = count_partial(n, count_free);
  		nr_slabs = node_nr_slabs(n);
  		nr_objs  = node_nr_objs(n);
781b2ba6e   Pekka Enberg   SLUB: Out-of-memo...
1997
1998
1999
2000
2001
2002
2003
  
  		printk(KERN_WARNING
  			"  node %d: slabs: %ld, objs: %ld, free: %ld
  ",
  			node, nr_slabs, nr_objs, nr_free);
  	}
  }
497b66f2e   Christoph Lameter   slub: return obje...
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
  static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
  			int node, struct kmem_cache_cpu **pc)
  {
  	void *object;
  	struct kmem_cache_cpu *c;
  	struct page *page = new_slab(s, flags, node);
  
  	if (page) {
  		c = __this_cpu_ptr(s->cpu_slab);
  		if (c->page)
  			flush_slab(s, c);
  
  		/*
  		 * No other reference to the page yet so we can
  		 * muck around with it freely without cmpxchg
  		 */
  		object = page->freelist;
  		page->freelist = NULL;
  
  		stat(s, ALLOC_SLAB);
  		c->node = page_to_nid(page);
  		c->page = page;
  		*pc = c;
  	} else
  		object = NULL;
  
  	return object;
  }
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
2032
  /*
894b8788d   Christoph Lameter   slub: support con...
2033
2034
2035
   * Slow path. The lockless freelist is empty or we need to perform
   * debugging duties.
   *
894b8788d   Christoph Lameter   slub: support con...
2036
2037
2038
   * Processing is still very fast if new objects have been freed to the
   * regular freelist. In that case we simply take over the regular freelist
   * as the lockless freelist and zap the regular freelist.
81819f0fc   Christoph Lameter   SLUB core
2039
   *
894b8788d   Christoph Lameter   slub: support con...
2040
2041
2042
   * If that is not working then we fall back to the partial lists. We take the
   * first element of the freelist as the object to allocate now and move the
   * rest of the freelist to the lockless freelist.
81819f0fc   Christoph Lameter   SLUB core
2043
   *
894b8788d   Christoph Lameter   slub: support con...
2044
   * And if we were unable to get a new slab from the partial slab lists then
6446faa2f   Christoph Lameter   slub: Fix up comm...
2045
2046
   * we need to allocate a new slab. This is the slowest path since it involves
   * a call to the page allocator and the setup of a new slab.
81819f0fc   Christoph Lameter   SLUB core
2047
   */
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
2048
2049
  static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
  			  unsigned long addr, struct kmem_cache_cpu *c)
81819f0fc   Christoph Lameter   SLUB core
2050
  {
81819f0fc   Christoph Lameter   SLUB core
2051
  	void **object;
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2052
  	unsigned long flags;
2cfb7455d   Christoph Lameter   slub: Rework allo...
2053
2054
  	struct page new;
  	unsigned long counters;
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
  
  	local_irq_save(flags);
  #ifdef CONFIG_PREEMPT
  	/*
  	 * We may have been preempted and rescheduled on a different
  	 * cpu before disabling interrupts. Need to reload cpu area
  	 * pointer.
  	 */
  	c = this_cpu_ptr(s->cpu_slab);
  #endif
81819f0fc   Christoph Lameter   SLUB core
2065

497b66f2e   Christoph Lameter   slub: return obje...
2066
  	if (!c->page)
81819f0fc   Christoph Lameter   SLUB core
2067
  		goto new_slab;
49e225858   Christoph Lameter   slub: per cpu cac...
2068
  redo:
fc59c0530   Christoph Lameter   slub: Get rid of ...
2069
  	if (unlikely(!node_match(c, node))) {
e36a2652d   Christoph Lameter   slub: Add statist...
2070
  		stat(s, ALLOC_NODE_MISMATCH);
fc59c0530   Christoph Lameter   slub: Get rid of ...
2071
2072
2073
  		deactivate_slab(s, c);
  		goto new_slab;
  	}
6446faa2f   Christoph Lameter   slub: Fix up comm...
2074

2cfb7455d   Christoph Lameter   slub: Rework allo...
2075
2076
2077
  	stat(s, ALLOC_SLOWPATH);
  
  	do {
497b66f2e   Christoph Lameter   slub: return obje...
2078
2079
  		object = c->page->freelist;
  		counters = c->page->counters;
2cfb7455d   Christoph Lameter   slub: Rework allo...
2080
  		new.counters = counters;
2cfb7455d   Christoph Lameter   slub: Rework allo...
2081
  		VM_BUG_ON(!new.frozen);
03e404af2   Christoph Lameter   slub: fast releas...
2082
2083
2084
2085
2086
2087
2088
2089
  		/*
  		 * If there is no object left then we use this loop to
  		 * deactivate the slab which is simple since no objects
  		 * are left in the slab and therefore we do not need to
  		 * put the page back onto the partial list.
  		 *
  		 * If there are objects left then we retrieve them
  		 * and use them to refill the per cpu queue.
497b66f2e   Christoph Lameter   slub: return obje...
2090
  		 */
03e404af2   Christoph Lameter   slub: fast releas...
2091

497b66f2e   Christoph Lameter   slub: return obje...
2092
  		new.inuse = c->page->objects;
03e404af2   Christoph Lameter   slub: fast releas...
2093
  		new.frozen = object != NULL;
497b66f2e   Christoph Lameter   slub: return obje...
2094
  	} while (!__cmpxchg_double_slab(s, c->page,
2cfb7455d   Christoph Lameter   slub: Rework allo...
2095
2096
2097
  			object, counters,
  			NULL, new.counters,
  			"__slab_alloc"));
6446faa2f   Christoph Lameter   slub: Fix up comm...
2098

49e225858   Christoph Lameter   slub: per cpu cac...
2099
  	if (!object) {
03e404af2   Christoph Lameter   slub: fast releas...
2100
2101
  		c->page = NULL;
  		stat(s, DEACTIVATE_BYPASS);
fc59c0530   Christoph Lameter   slub: Get rid of ...
2102
  		goto new_slab;
03e404af2   Christoph Lameter   slub: fast releas...
2103
  	}
6446faa2f   Christoph Lameter   slub: Fix up comm...
2104

84e554e68   Christoph Lameter   SLUB: Make slub s...
2105
  	stat(s, ALLOC_REFILL);
6446faa2f   Christoph Lameter   slub: Fix up comm...
2106

894b8788d   Christoph Lameter   slub: support con...
2107
  load_freelist:
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
2108
  	c->freelist = get_freepointer(s, object);
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2109
2110
  	c->tid = next_tid(c->tid);
  	local_irq_restore(flags);
81819f0fc   Christoph Lameter   SLUB core
2111
  	return object;
81819f0fc   Christoph Lameter   SLUB core
2112
  new_slab:
2cfb7455d   Christoph Lameter   slub: Rework allo...
2113

49e225858   Christoph Lameter   slub: per cpu cac...
2114
2115
2116
2117
2118
2119
2120
  	if (c->partial) {
  		c->page = c->partial;
  		c->partial = c->page->next;
  		c->node = page_to_nid(c->page);
  		stat(s, CPU_PARTIAL_ALLOC);
  		c->freelist = NULL;
  		goto redo;
81819f0fc   Christoph Lameter   SLUB core
2121
  	}
49e225858   Christoph Lameter   slub: per cpu cac...
2122
  	/* Then do expensive stuff like retrieving pages from the partial lists */
497b66f2e   Christoph Lameter   slub: return obje...
2123
  	object = get_partial(s, gfpflags, node, c);
b811c202a   Christoph Lameter   SLUB: simplify IR...
2124

497b66f2e   Christoph Lameter   slub: return obje...
2125
  	if (unlikely(!object)) {
01ad8a7bc   Christoph Lameter   slub: Eliminate r...
2126

497b66f2e   Christoph Lameter   slub: return obje...
2127
  		object = new_slab_objects(s, gfpflags, node, &c);
2cfb7455d   Christoph Lameter   slub: Rework allo...
2128

497b66f2e   Christoph Lameter   slub: return obje...
2129
2130
2131
  		if (unlikely(!object)) {
  			if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
  				slab_out_of_memory(s, gfpflags, node);
9e577e8b4   Christoph Lameter   slub: When alloca...
2132

497b66f2e   Christoph Lameter   slub: return obje...
2133
2134
2135
  			local_irq_restore(flags);
  			return NULL;
  		}
81819f0fc   Christoph Lameter   SLUB core
2136
  	}
2cfb7455d   Christoph Lameter   slub: Rework allo...
2137

497b66f2e   Christoph Lameter   slub: return obje...
2138
  	if (likely(!kmem_cache_debug(s)))
4b6f07504   Christoph Lameter   SLUB: Define func...
2139
  		goto load_freelist;
2cfb7455d   Christoph Lameter   slub: Rework allo...
2140

497b66f2e   Christoph Lameter   slub: return obje...
2141
2142
2143
  	/* Only entered in the debug case */
  	if (!alloc_debug_processing(s, c->page, object, addr))
  		goto new_slab;	/* Slab failed checks. Next slab needed */
894b8788d   Christoph Lameter   slub: support con...
2144

2cfb7455d   Christoph Lameter   slub: Rework allo...
2145
  	c->freelist = get_freepointer(s, object);
442b06bce   Christoph Lameter   slub: Remove node...
2146
  	deactivate_slab(s, c);
15b7c5142   Pekka Enberg   SLUB: Optimize sl...
2147
  	c->node = NUMA_NO_NODE;
a71ae47a2   Christoph Lameter   slub: Fix double ...
2148
2149
  	local_irq_restore(flags);
  	return object;
894b8788d   Christoph Lameter   slub: support con...
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
  }
  
  /*
   * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
   * have the fastpath folded into their functions. So no function call
   * overhead for requests that can be satisfied on the fastpath.
   *
   * The fastpath works by first checking if the lockless freelist can be used.
   * If not then __slab_alloc is called for slow processing.
   *
   * Otherwise we can simply pick the next object from the lockless free list.
   */
064287807   Pekka Enberg   SLUB: Fix coding ...
2162
  static __always_inline void *slab_alloc(struct kmem_cache *s,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
2163
  		gfp_t gfpflags, int node, unsigned long addr)
894b8788d   Christoph Lameter   slub: support con...
2164
  {
894b8788d   Christoph Lameter   slub: support con...
2165
  	void **object;
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
2166
  	struct kmem_cache_cpu *c;
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2167
  	unsigned long tid;
1f84260c8   Christoph Lameter   SLUB: Alternate f...
2168

c016b0bde   Christoph Lameter   slub: Extract hoo...
2169
  	if (slab_pre_alloc_hook(s, gfpflags))
773ff60e8   Akinobu Mita   SLUB: failslab su...
2170
  		return NULL;
1f84260c8   Christoph Lameter   SLUB: Alternate f...
2171

8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2172
  redo:
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2173
2174
2175
2176
2177
2178
2179
  
  	/*
  	 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
  	 * enabled. We may switch back and forth between cpus while
  	 * reading from one cpu area. That does not matter as long
  	 * as we end up on the original cpu again when doing the cmpxchg.
  	 */
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
2180
  	c = __this_cpu_ptr(s->cpu_slab);
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2181

8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2182
2183
2184
2185
2186
2187
2188
2189
  	/*
  	 * The transaction ids are globally unique per cpu and per operation on
  	 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
  	 * occurs on the right processor and that there was no operation on the
  	 * linked list in between.
  	 */
  	tid = c->tid;
  	barrier();
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2190

9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
2191
  	object = c->freelist;
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
2192
  	if (unlikely(!object || !node_match(c, node)))
894b8788d   Christoph Lameter   slub: support con...
2193

dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
2194
  		object = __slab_alloc(s, gfpflags, node, addr, c);
894b8788d   Christoph Lameter   slub: support con...
2195
2196
  
  	else {
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2197
  		/*
25985edce   Lucas De Marchi   Fix common misspe...
2198
  		 * The cmpxchg will only match if there was no additional
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
  		 * operation and if we are on the right processor.
  		 *
  		 * The cmpxchg does the following atomically (without lock semantics!)
  		 * 1. Relocate first pointer to the current per cpu area.
  		 * 2. Verify that tid and freelist have not been changed
  		 * 3. If they were not changed replace tid and freelist
  		 *
  		 * Since this is without lock semantics the protection is only against
  		 * code executing on this cpu *not* from access by other cpus.
  		 */
30106b8ce   Thomas Gleixner   slub: Fix the loc...
2209
  		if (unlikely(!irqsafe_cpu_cmpxchg_double(
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2210
2211
  				s->cpu_slab->freelist, s->cpu_slab->tid,
  				object, tid,
1393d9a18   Christoph Lameter   slub: Make CONFIG...
2212
  				get_freepointer_safe(s, object), next_tid(tid)))) {
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2213
2214
2215
2216
  
  			note_cmpxchg_failure("slab_alloc", s, tid);
  			goto redo;
  		}
84e554e68   Christoph Lameter   SLUB: Make slub s...
2217
  		stat(s, ALLOC_FASTPATH);
894b8788d   Christoph Lameter   slub: support con...
2218
  	}
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2219

74e2134ff   Pekka Enberg   SLUB: Fix __GFP_Z...
2220
  	if (unlikely(gfpflags & __GFP_ZERO) && object)
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
2221
  		memset(object, 0, s->objsize);
d07dbea46   Christoph Lameter   Slab allocators: ...
2222

c016b0bde   Christoph Lameter   slub: Extract hoo...
2223
  	slab_post_alloc_hook(s, gfpflags, object);
5a896d9e7   Vegard Nossum   slub: add hooks f...
2224

894b8788d   Christoph Lameter   slub: support con...
2225
  	return object;
81819f0fc   Christoph Lameter   SLUB core
2226
2227
2228
2229
  }
  
  void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
  {
2154a3363   Christoph Lameter   slub: Use a const...
2230
  	void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2231

ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
2232
  	trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2233
2234
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
2235
2236
  }
  EXPORT_SYMBOL(kmem_cache_alloc);
0f24f1287   Li Zefan   tracing, slab: De...
2237
  #ifdef CONFIG_TRACING
4a92379bd   Richard Kennedy   slub tracing: mov...
2238
2239
2240
2241
2242
2243
2244
2245
2246
  void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
  {
  	void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
  	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
  	return ret;
  }
  EXPORT_SYMBOL(kmem_cache_alloc_trace);
  
  void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2247
  {
4a92379bd   Richard Kennedy   slub tracing: mov...
2248
2249
2250
  	void *ret = kmalloc_order(size, flags, order);
  	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
  	return ret;
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2251
  }
4a92379bd   Richard Kennedy   slub tracing: mov...
2252
  EXPORT_SYMBOL(kmalloc_order_trace);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2253
  #endif
81819f0fc   Christoph Lameter   SLUB core
2254
2255
2256
  #ifdef CONFIG_NUMA
  void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
  {
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2257
  	void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
2258
2259
  	trace_kmem_cache_alloc_node(_RET_IP_, ret,
  				    s->objsize, s->size, gfpflags, node);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2260
2261
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
2262
2263
  }
  EXPORT_SYMBOL(kmem_cache_alloc_node);
81819f0fc   Christoph Lameter   SLUB core
2264

0f24f1287   Li Zefan   tracing, slab: De...
2265
  #ifdef CONFIG_TRACING
4a92379bd   Richard Kennedy   slub tracing: mov...
2266
  void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2267
  				    gfp_t gfpflags,
4a92379bd   Richard Kennedy   slub tracing: mov...
2268
  				    int node, size_t size)
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2269
  {
4a92379bd   Richard Kennedy   slub tracing: mov...
2270
2271
2272
2273
2274
  	void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
  
  	trace_kmalloc_node(_RET_IP_, ret,
  			   size, s->size, gfpflags, node);
  	return ret;
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2275
  }
4a92379bd   Richard Kennedy   slub tracing: mov...
2276
  EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2277
  #endif
5d1f57e4d   Namhyung Kim   slub: Move NUMA-r...
2278
  #endif
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2279

81819f0fc   Christoph Lameter   SLUB core
2280
  /*
894b8788d   Christoph Lameter   slub: support con...
2281
2282
   * Slow patch handling. This may still be called frequently since objects
   * have a longer lifetime than the cpu slabs in most processing loads.
81819f0fc   Christoph Lameter   SLUB core
2283
   *
894b8788d   Christoph Lameter   slub: support con...
2284
2285
2286
   * So we still attempt to reduce cache line usage. Just take the slab
   * lock and free the item. If there is no additional partial page
   * handling required then we can return immediately.
81819f0fc   Christoph Lameter   SLUB core
2287
   */
894b8788d   Christoph Lameter   slub: support con...
2288
  static void __slab_free(struct kmem_cache *s, struct page *page,
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
2289
  			void *x, unsigned long addr)
81819f0fc   Christoph Lameter   SLUB core
2290
2291
2292
  {
  	void *prior;
  	void **object = (void *)x;
2cfb7455d   Christoph Lameter   slub: Rework allo...
2293
2294
2295
2296
2297
  	int was_frozen;
  	int inuse;
  	struct page new;
  	unsigned long counters;
  	struct kmem_cache_node *n = NULL;
61728d1ef   Christoph Lameter   slub: Pass kmem_c...
2298
  	unsigned long uninitialized_var(flags);
81819f0fc   Christoph Lameter   SLUB core
2299

8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2300
  	stat(s, FREE_SLOWPATH);
81819f0fc   Christoph Lameter   SLUB core
2301

8dc16c6c0   Christoph Lameter   slub: Move debug ...
2302
  	if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr))
80f08c191   Christoph Lameter   slub: Avoid disab...
2303
  		return;
6446faa2f   Christoph Lameter   slub: Fix up comm...
2304

2cfb7455d   Christoph Lameter   slub: Rework allo...
2305
2306
2307
2308
2309
2310
2311
2312
  	do {
  		prior = page->freelist;
  		counters = page->counters;
  		set_freepointer(s, object, prior);
  		new.counters = counters;
  		was_frozen = new.frozen;
  		new.inuse--;
  		if ((!new.inuse || !prior) && !was_frozen && !n) {
49e225858   Christoph Lameter   slub: per cpu cac...
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
  
  			if (!kmem_cache_debug(s) && !prior)
  
  				/*
  				 * Slab was on no list before and will be partially empty
  				 * We can defer the list move and instead freeze it.
  				 */
  				new.frozen = 1;
  
  			else { /* Needs to be taken off a list */
  
  	                        n = get_node(s, page_to_nid(page));
  				/*
  				 * Speculatively acquire the list_lock.
  				 * If the cmpxchg does not succeed then we may
  				 * drop the list_lock without any processing.
  				 *
  				 * Otherwise the list_lock will synchronize with
  				 * other processors updating the list of slabs.
  				 */
  				spin_lock_irqsave(&n->list_lock, flags);
  
  			}
2cfb7455d   Christoph Lameter   slub: Rework allo...
2336
2337
  		}
  		inuse = new.inuse;
81819f0fc   Christoph Lameter   SLUB core
2338

2cfb7455d   Christoph Lameter   slub: Rework allo...
2339
2340
2341
2342
  	} while (!cmpxchg_double_slab(s, page,
  		prior, counters,
  		object, new.counters,
  		"__slab_free"));
81819f0fc   Christoph Lameter   SLUB core
2343

2cfb7455d   Christoph Lameter   slub: Rework allo...
2344
  	if (likely(!n)) {
49e225858   Christoph Lameter   slub: per cpu cac...
2345
2346
2347
2348
2349
2350
2351
2352
2353
  
  		/*
  		 * If we just froze the page then put it onto the
  		 * per cpu partial list.
  		 */
  		if (new.frozen && !was_frozen)
  			put_cpu_partial(s, page, 1);
  
  		/*
2cfb7455d   Christoph Lameter   slub: Rework allo...
2354
2355
2356
2357
2358
  		 * The list lock was not taken therefore no list
  		 * activity can be necessary.
  		 */
                  if (was_frozen)
                          stat(s, FREE_FROZEN);
80f08c191   Christoph Lameter   slub: Avoid disab...
2359
                  return;
2cfb7455d   Christoph Lameter   slub: Rework allo...
2360
          }
81819f0fc   Christoph Lameter   SLUB core
2361
2362
  
  	/*
2cfb7455d   Christoph Lameter   slub: Rework allo...
2363
2364
  	 * was_frozen may have been set after we acquired the list_lock in
  	 * an earlier loop. So we need to check it here again.
81819f0fc   Christoph Lameter   SLUB core
2365
  	 */
2cfb7455d   Christoph Lameter   slub: Rework allo...
2366
2367
2368
2369
2370
  	if (was_frozen)
  		stat(s, FREE_FROZEN);
  	else {
  		if (unlikely(!inuse && n->nr_partial > s->min_partial))
                          goto slab_empty;
81819f0fc   Christoph Lameter   SLUB core
2371

2cfb7455d   Christoph Lameter   slub: Rework allo...
2372
2373
2374
2375
2376
2377
  		/*
  		 * Objects left in the slab. If it was not on the partial list before
  		 * then add it.
  		 */
  		if (unlikely(!prior)) {
  			remove_full(s, page);
136333d10   Shaohua Li   slub: explicitly ...
2378
  			add_partial(n, page, DEACTIVATE_TO_TAIL);
2cfb7455d   Christoph Lameter   slub: Rework allo...
2379
2380
  			stat(s, FREE_ADD_PARTIAL);
  		}
8ff12cfc0   Christoph Lameter   SLUB: Support for...
2381
  	}
80f08c191   Christoph Lameter   slub: Avoid disab...
2382
  	spin_unlock_irqrestore(&n->list_lock, flags);
81819f0fc   Christoph Lameter   SLUB core
2383
2384
2385
  	return;
  
  slab_empty:
a973e9dd1   Christoph Lameter   Revert "unique en...
2386
  	if (prior) {
81819f0fc   Christoph Lameter   SLUB core
2387
  		/*
6fbabb20f   Christoph Lameter   slub: Fix full li...
2388
  		 * Slab on the partial list.
81819f0fc   Christoph Lameter   SLUB core
2389
  		 */
5cc6eee8a   Christoph Lameter   slub: explicit li...
2390
  		remove_partial(n, page);
84e554e68   Christoph Lameter   SLUB: Make slub s...
2391
  		stat(s, FREE_REMOVE_PARTIAL);
6fbabb20f   Christoph Lameter   slub: Fix full li...
2392
2393
2394
  	} else
  		/* Slab must be on the full list */
  		remove_full(s, page);
2cfb7455d   Christoph Lameter   slub: Rework allo...
2395

80f08c191   Christoph Lameter   slub: Avoid disab...
2396
  	spin_unlock_irqrestore(&n->list_lock, flags);
84e554e68   Christoph Lameter   SLUB: Make slub s...
2397
  	stat(s, FREE_SLAB);
81819f0fc   Christoph Lameter   SLUB core
2398
  	discard_slab(s, page);
81819f0fc   Christoph Lameter   SLUB core
2399
  }
894b8788d   Christoph Lameter   slub: support con...
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
  /*
   * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
   * can perform fastpath freeing without additional function calls.
   *
   * The fastpath is only possible if we are freeing to the current cpu slab
   * of this processor. This typically the case if we have just allocated
   * the item before.
   *
   * If fastpath is not possible then fall back to __slab_free where we deal
   * with all sorts of special processing.
   */
064287807   Pekka Enberg   SLUB: Fix coding ...
2411
  static __always_inline void slab_free(struct kmem_cache *s,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
2412
  			struct page *page, void *x, unsigned long addr)
894b8788d   Christoph Lameter   slub: support con...
2413
2414
  {
  	void **object = (void *)x;
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
2415
  	struct kmem_cache_cpu *c;
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2416
  	unsigned long tid;
1f84260c8   Christoph Lameter   SLUB: Alternate f...
2417

c016b0bde   Christoph Lameter   slub: Extract hoo...
2418
  	slab_free_hook(s, x);
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2419
2420
2421
2422
2423
2424
2425
  redo:
  	/*
  	 * Determine the currently cpus per cpu slab.
  	 * The cpu may change afterward. However that does not matter since
  	 * data is retrieved via this pointer. If we are on the same cpu
  	 * during the cmpxchg then the free will succedd.
  	 */
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
2426
  	c = __this_cpu_ptr(s->cpu_slab);
c016b0bde   Christoph Lameter   slub: Extract hoo...
2427

8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2428
2429
  	tid = c->tid;
  	barrier();
c016b0bde   Christoph Lameter   slub: Extract hoo...
2430

442b06bce   Christoph Lameter   slub: Remove node...
2431
  	if (likely(page == c->page)) {
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
2432
  		set_freepointer(s, object, c->freelist);
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2433

30106b8ce   Thomas Gleixner   slub: Fix the loc...
2434
  		if (unlikely(!irqsafe_cpu_cmpxchg_double(
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2435
2436
2437
2438
2439
2440
2441
  				s->cpu_slab->freelist, s->cpu_slab->tid,
  				c->freelist, tid,
  				object, next_tid(tid)))) {
  
  			note_cmpxchg_failure("slab_free", s, tid);
  			goto redo;
  		}
84e554e68   Christoph Lameter   SLUB: Make slub s...
2442
  		stat(s, FREE_FASTPATH);
894b8788d   Christoph Lameter   slub: support con...
2443
  	} else
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
2444
  		__slab_free(s, page, x, addr);
894b8788d   Christoph Lameter   slub: support con...
2445

894b8788d   Christoph Lameter   slub: support con...
2446
  }
81819f0fc   Christoph Lameter   SLUB core
2447
2448
  void kmem_cache_free(struct kmem_cache *s, void *x)
  {
77c5e2d01   Christoph Lameter   slub: fix object ...
2449
  	struct page *page;
81819f0fc   Christoph Lameter   SLUB core
2450

b49af68ff   Christoph Lameter   Add virt_to_head_...
2451
  	page = virt_to_head_page(x);
81819f0fc   Christoph Lameter   SLUB core
2452

ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
2453
  	slab_free(s, page, x, _RET_IP_);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2454

ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
2455
  	trace_kmem_cache_free(_RET_IP_, x);
81819f0fc   Christoph Lameter   SLUB core
2456
2457
  }
  EXPORT_SYMBOL(kmem_cache_free);
81819f0fc   Christoph Lameter   SLUB core
2458
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
2459
2460
2461
2462
   * Object placement in a slab is made very easy because we always start at
   * offset 0. If we tune the size of the object to the alignment then we can
   * get the required alignment by putting one properly sized object after
   * another.
81819f0fc   Christoph Lameter   SLUB core
2463
2464
2465
2466
   *
   * Notice that the allocation order determines the sizes of the per cpu
   * caches. Each processor has always one slab available for allocations.
   * Increasing the allocation order reduces the number of times that slabs
672bba3a4   Christoph Lameter   SLUB: update comm...
2467
   * must be moved on and off the partial lists and is therefore a factor in
81819f0fc   Christoph Lameter   SLUB core
2468
   * locking overhead.
81819f0fc   Christoph Lameter   SLUB core
2469
2470
2471
2472
2473
2474
2475
2476
2477
   */
  
  /*
   * Mininum / Maximum order of slab pages. This influences locking overhead
   * and slab fragmentation. A higher order reduces the number of partial slabs
   * and increases the number of allocations possible without having to
   * take the list_lock.
   */
  static int slub_min_order;
114e9e89e   Christoph Lameter   slub: Drop DEFAUL...
2478
  static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
9b2cd506e   Christoph Lameter   slub: Calculate m...
2479
  static int slub_min_objects;
81819f0fc   Christoph Lameter   SLUB core
2480
2481
2482
  
  /*
   * Merge control. If this is set then no merging of slab caches will occur.
672bba3a4   Christoph Lameter   SLUB: update comm...
2483
   * (Could be removed. This was introduced to pacify the merge skeptics.)
81819f0fc   Christoph Lameter   SLUB core
2484
2485
2486
2487
   */
  static int slub_nomerge;
  
  /*
81819f0fc   Christoph Lameter   SLUB core
2488
2489
   * Calculate the order of allocation given an slab object size.
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
2490
2491
2492
2493
   * The order of allocation has significant impact on performance and other
   * system components. Generally order 0 allocations should be preferred since
   * order 0 does not cause fragmentation in the page allocator. Larger objects
   * be problematic to put into order 0 slabs because there may be too much
c124f5b54   Christoph Lameter   slub: pack object...
2494
   * unused space left. We go to a higher order if more than 1/16th of the slab
672bba3a4   Christoph Lameter   SLUB: update comm...
2495
2496
2497
2498
2499
2500
   * would be wasted.
   *
   * In order to reach satisfactory performance we must ensure that a minimum
   * number of objects is in one slab. Otherwise we may generate too much
   * activity on the partial lists which requires taking the list_lock. This is
   * less a concern for large slabs though which are rarely used.
81819f0fc   Christoph Lameter   SLUB core
2501
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
2502
2503
2504
2505
   * slub_max_order specifies the order where we begin to stop considering the
   * number of objects in a slab as critical. If we reach slub_max_order then
   * we try to keep the page order as low as possible. So we accept more waste
   * of space in favor of a small page order.
81819f0fc   Christoph Lameter   SLUB core
2506
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
2507
2508
2509
2510
   * Higher order allocations also allow the placement of more objects in a
   * slab and thereby reduce object handling overhead. If the user has
   * requested a higher mininum order then we start with that one instead of
   * the smallest order which will fit the object.
81819f0fc   Christoph Lameter   SLUB core
2511
   */
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2512
  static inline int slab_order(int size, int min_objects,
ab9a0f196   Lai Jiangshan   slub: automatical...
2513
  				int max_order, int fract_leftover, int reserved)
81819f0fc   Christoph Lameter   SLUB core
2514
2515
2516
  {
  	int order;
  	int rem;
6300ea750   Christoph Lameter   SLUB: ensure that...
2517
  	int min_order = slub_min_order;
81819f0fc   Christoph Lameter   SLUB core
2518

ab9a0f196   Lai Jiangshan   slub: automatical...
2519
  	if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
2520
  		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
39b264641   Christoph Lameter   slub: Store max n...
2521

6300ea750   Christoph Lameter   SLUB: ensure that...
2522
  	for (order = max(min_order,
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2523
2524
  				fls(min_objects * size - 1) - PAGE_SHIFT);
  			order <= max_order; order++) {
81819f0fc   Christoph Lameter   SLUB core
2525

5e6d444ea   Christoph Lameter   SLUB: rework slab...
2526
  		unsigned long slab_size = PAGE_SIZE << order;
81819f0fc   Christoph Lameter   SLUB core
2527

ab9a0f196   Lai Jiangshan   slub: automatical...
2528
  		if (slab_size < min_objects * size + reserved)
81819f0fc   Christoph Lameter   SLUB core
2529
  			continue;
ab9a0f196   Lai Jiangshan   slub: automatical...
2530
  		rem = (slab_size - reserved) % size;
81819f0fc   Christoph Lameter   SLUB core
2531

5e6d444ea   Christoph Lameter   SLUB: rework slab...
2532
  		if (rem <= slab_size / fract_leftover)
81819f0fc   Christoph Lameter   SLUB core
2533
2534
2535
  			break;
  
  	}
672bba3a4   Christoph Lameter   SLUB: update comm...
2536

81819f0fc   Christoph Lameter   SLUB core
2537
2538
  	return order;
  }
ab9a0f196   Lai Jiangshan   slub: automatical...
2539
  static inline int calculate_order(int size, int reserved)
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2540
2541
2542
2543
  {
  	int order;
  	int min_objects;
  	int fraction;
e8120ff1f   Zhang Yanmin   SLUB: Fix default...
2544
  	int max_objects;
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
  
  	/*
  	 * Attempt to find best configuration for a slab. This
  	 * works by first attempting to generate a layout with
  	 * the best configuration and backing off gradually.
  	 *
  	 * First we reduce the acceptable waste in a slab. Then
  	 * we reduce the minimum objects required in a slab.
  	 */
  	min_objects = slub_min_objects;
9b2cd506e   Christoph Lameter   slub: Calculate m...
2555
2556
  	if (!min_objects)
  		min_objects = 4 * (fls(nr_cpu_ids) + 1);
ab9a0f196   Lai Jiangshan   slub: automatical...
2557
  	max_objects = order_objects(slub_max_order, size, reserved);
e8120ff1f   Zhang Yanmin   SLUB: Fix default...
2558
  	min_objects = min(min_objects, max_objects);
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2559
  	while (min_objects > 1) {
c124f5b54   Christoph Lameter   slub: pack object...
2560
  		fraction = 16;
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2561
2562
  		while (fraction >= 4) {
  			order = slab_order(size, min_objects,
ab9a0f196   Lai Jiangshan   slub: automatical...
2563
  					slub_max_order, fraction, reserved);
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2564
2565
2566
2567
  			if (order <= slub_max_order)
  				return order;
  			fraction /= 2;
  		}
5086c389c   Amerigo Wang   SLUB: Fix some co...
2568
  		min_objects--;
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2569
2570
2571
2572
2573
2574
  	}
  
  	/*
  	 * We were unable to place multiple objects in a slab. Now
  	 * lets see if we can place a single object there.
  	 */
ab9a0f196   Lai Jiangshan   slub: automatical...
2575
  	order = slab_order(size, 1, slub_max_order, 1, reserved);
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2576
2577
2578
2579
2580
2581
  	if (order <= slub_max_order)
  		return order;
  
  	/*
  	 * Doh this slab cannot be placed using slub_max_order.
  	 */
ab9a0f196   Lai Jiangshan   slub: automatical...
2582
  	order = slab_order(size, 1, MAX_ORDER, 1, reserved);
818cf5909   David Rientjes   slub: enforce MAX...
2583
  	if (order < MAX_ORDER)
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2584
2585
2586
  		return order;
  	return -ENOSYS;
  }
81819f0fc   Christoph Lameter   SLUB core
2587
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
2588
   * Figure out what the alignment of the objects will be.
81819f0fc   Christoph Lameter   SLUB core
2589
2590
2591
2592
2593
   */
  static unsigned long calculate_alignment(unsigned long flags,
  		unsigned long align, unsigned long size)
  {
  	/*
6446faa2f   Christoph Lameter   slub: Fix up comm...
2594
2595
  	 * If the user wants hardware cache aligned objects then follow that
  	 * suggestion if the object is sufficiently large.
81819f0fc   Christoph Lameter   SLUB core
2596
  	 *
6446faa2f   Christoph Lameter   slub: Fix up comm...
2597
2598
  	 * The hardware cache alignment cannot override the specified
  	 * alignment though. If that is greater then use it.
81819f0fc   Christoph Lameter   SLUB core
2599
  	 */
b62103867   Nick Piggin   slub: Do not cros...
2600
2601
2602
2603
2604
2605
  	if (flags & SLAB_HWCACHE_ALIGN) {
  		unsigned long ralign = cache_line_size();
  		while (size <= ralign / 2)
  			ralign /= 2;
  		align = max(align, ralign);
  	}
81819f0fc   Christoph Lameter   SLUB core
2606
2607
  
  	if (align < ARCH_SLAB_MINALIGN)
b62103867   Nick Piggin   slub: Do not cros...
2608
  		align = ARCH_SLAB_MINALIGN;
81819f0fc   Christoph Lameter   SLUB core
2609
2610
2611
  
  	return ALIGN(align, sizeof(void *));
  }
5595cffc8   Pekka Enberg   SLUB: dynamic per...
2612
2613
  static void
  init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
81819f0fc   Christoph Lameter   SLUB core
2614
2615
  {
  	n->nr_partial = 0;
81819f0fc   Christoph Lameter   SLUB core
2616
2617
  	spin_lock_init(&n->list_lock);
  	INIT_LIST_HEAD(&n->partial);
8ab1372fa   Christoph Lameter   SLUB: Fix CONFIG_...
2618
  #ifdef CONFIG_SLUB_DEBUG
0f389ec63   Christoph Lameter   slub: No need for...
2619
  	atomic_long_set(&n->nr_slabs, 0);
02b71b701   Salman Qazi   slub: fixed unini...
2620
  	atomic_long_set(&n->total_objects, 0);
643b11384   Christoph Lameter   slub: enable trac...
2621
  	INIT_LIST_HEAD(&n->full);
8ab1372fa   Christoph Lameter   SLUB: Fix CONFIG_...
2622
  #endif
81819f0fc   Christoph Lameter   SLUB core
2623
  }
55136592f   Christoph Lameter   slub: Remove dyna...
2624
  static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2625
  {
6c182dc0d   Christoph Lameter   slub: Remove stat...
2626
2627
  	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
  			SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2628

8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2629
  	/*
d4d84fef6   Chris Metcalf   slub: always alig...
2630
2631
  	 * Must align to double word boundary for the double cmpxchg
  	 * instructions to work; see __pcpu_double_call_return_bool().
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2632
  	 */
d4d84fef6   Chris Metcalf   slub: always alig...
2633
2634
  	s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
  				     2 * sizeof(void *));
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2635
2636
2637
2638
2639
  
  	if (!s->cpu_slab)
  		return 0;
  
  	init_kmem_cache_cpus(s);
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2640

8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2641
  	return 1;
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2642
  }
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2643

51df11428   Christoph Lameter   slub: Dynamically...
2644
  static struct kmem_cache *kmem_cache_node;
81819f0fc   Christoph Lameter   SLUB core
2645
2646
2647
2648
2649
2650
  /*
   * No kmalloc_node yet so do it by hand. We know that this is the first
   * slab on the node for this slabcache. There are no concurrent accesses
   * possible.
   *
   * Note that this function only works on the kmalloc_node_cache
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2651
2652
   * when allocating for the kmalloc_node_cache. This is used for bootstrapping
   * memory on a fresh node that has no slab structures yet.
81819f0fc   Christoph Lameter   SLUB core
2653
   */
55136592f   Christoph Lameter   slub: Remove dyna...
2654
  static void early_kmem_cache_node_alloc(int node)
81819f0fc   Christoph Lameter   SLUB core
2655
2656
2657
  {
  	struct page *page;
  	struct kmem_cache_node *n;
51df11428   Christoph Lameter   slub: Dynamically...
2658
  	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
81819f0fc   Christoph Lameter   SLUB core
2659

51df11428   Christoph Lameter   slub: Dynamically...
2660
  	page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
81819f0fc   Christoph Lameter   SLUB core
2661
2662
  
  	BUG_ON(!page);
a2f92ee7e   Christoph Lameter   SLUB: do not fail...
2663
2664
2665
2666
2667
2668
2669
2670
  	if (page_to_nid(page) != node) {
  		printk(KERN_ERR "SLUB: Unable to allocate memory from "
  				"node %d
  ", node);
  		printk(KERN_ERR "SLUB: Allocating a useless per node structure "
  				"in order to be able to continue
  ");
  	}
81819f0fc   Christoph Lameter   SLUB core
2671
2672
  	n = page->freelist;
  	BUG_ON(!n);
51df11428   Christoph Lameter   slub: Dynamically...
2673
  	page->freelist = get_freepointer(kmem_cache_node, n);
e6e82ea11   Christoph Lameter   slub: Prepare inu...
2674
  	page->inuse = 1;
8cb0a5068   Christoph Lameter   slub: Move page->...
2675
  	page->frozen = 0;
51df11428   Christoph Lameter   slub: Dynamically...
2676
  	kmem_cache_node->node[node] = n;
8ab1372fa   Christoph Lameter   SLUB: Fix CONFIG_...
2677
  #ifdef CONFIG_SLUB_DEBUG
f7cb19336   Christoph Lameter   SLUB: Pass active...
2678
  	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
51df11428   Christoph Lameter   slub: Dynamically...
2679
  	init_tracking(kmem_cache_node, n);
8ab1372fa   Christoph Lameter   SLUB: Fix CONFIG_...
2680
  #endif
51df11428   Christoph Lameter   slub: Dynamically...
2681
2682
  	init_kmem_cache_node(n, kmem_cache_node);
  	inc_slabs_node(kmem_cache_node, node, page->objects);
6446faa2f   Christoph Lameter   slub: Fix up comm...
2683

136333d10   Shaohua Li   slub: explicitly ...
2684
  	add_partial(n, page, DEACTIVATE_TO_HEAD);
81819f0fc   Christoph Lameter   SLUB core
2685
2686
2687
2688
2689
  }
  
  static void free_kmem_cache_nodes(struct kmem_cache *s)
  {
  	int node;
f64dc58c5   Christoph Lameter   Memoryless nodes:...
2690
  	for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0fc   Christoph Lameter   SLUB core
2691
  		struct kmem_cache_node *n = s->node[node];
51df11428   Christoph Lameter   slub: Dynamically...
2692

73367bd8e   Alexander Duyck   slub: move kmem_c...
2693
  		if (n)
51df11428   Christoph Lameter   slub: Dynamically...
2694
  			kmem_cache_free(kmem_cache_node, n);
81819f0fc   Christoph Lameter   SLUB core
2695
2696
2697
  		s->node[node] = NULL;
  	}
  }
55136592f   Christoph Lameter   slub: Remove dyna...
2698
  static int init_kmem_cache_nodes(struct kmem_cache *s)
81819f0fc   Christoph Lameter   SLUB core
2699
2700
  {
  	int node;
81819f0fc   Christoph Lameter   SLUB core
2701

f64dc58c5   Christoph Lameter   Memoryless nodes:...
2702
  	for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0fc   Christoph Lameter   SLUB core
2703
  		struct kmem_cache_node *n;
73367bd8e   Alexander Duyck   slub: move kmem_c...
2704
  		if (slab_state == DOWN) {
55136592f   Christoph Lameter   slub: Remove dyna...
2705
  			early_kmem_cache_node_alloc(node);
73367bd8e   Alexander Duyck   slub: move kmem_c...
2706
2707
  			continue;
  		}
51df11428   Christoph Lameter   slub: Dynamically...
2708
  		n = kmem_cache_alloc_node(kmem_cache_node,
55136592f   Christoph Lameter   slub: Remove dyna...
2709
  						GFP_KERNEL, node);
81819f0fc   Christoph Lameter   SLUB core
2710

73367bd8e   Alexander Duyck   slub: move kmem_c...
2711
2712
2713
  		if (!n) {
  			free_kmem_cache_nodes(s);
  			return 0;
81819f0fc   Christoph Lameter   SLUB core
2714
  		}
73367bd8e   Alexander Duyck   slub: move kmem_c...
2715

81819f0fc   Christoph Lameter   SLUB core
2716
  		s->node[node] = n;
5595cffc8   Pekka Enberg   SLUB: dynamic per...
2717
  		init_kmem_cache_node(n, s);
81819f0fc   Christoph Lameter   SLUB core
2718
2719
2720
  	}
  	return 1;
  }
81819f0fc   Christoph Lameter   SLUB core
2721

c0bdb232b   David Rientjes   slub: rename calc...
2722
  static void set_min_partial(struct kmem_cache *s, unsigned long min)
3b89d7d88   David Rientjes   slub: move min_pa...
2723
2724
2725
2726
2727
2728
2729
  {
  	if (min < MIN_PARTIAL)
  		min = MIN_PARTIAL;
  	else if (min > MAX_PARTIAL)
  		min = MAX_PARTIAL;
  	s->min_partial = min;
  }
81819f0fc   Christoph Lameter   SLUB core
2730
2731
2732
2733
  /*
   * calculate_sizes() determines the order and the distribution of data within
   * a slab object.
   */
06b285dc3   Christoph Lameter   slub: Make the or...
2734
  static int calculate_sizes(struct kmem_cache *s, int forced_order)
81819f0fc   Christoph Lameter   SLUB core
2735
2736
2737
2738
  {
  	unsigned long flags = s->flags;
  	unsigned long size = s->objsize;
  	unsigned long align = s->align;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
2739
  	int order;
81819f0fc   Christoph Lameter   SLUB core
2740
2741
  
  	/*
d8b42bf54   Christoph Lameter   slub: Rearrange #...
2742
2743
2744
2745
2746
2747
2748
2749
  	 * Round up object size to the next word boundary. We can only
  	 * place the free pointer at word boundaries and this determines
  	 * the possible location of the free pointer.
  	 */
  	size = ALIGN(size, sizeof(void *));
  
  #ifdef CONFIG_SLUB_DEBUG
  	/*
81819f0fc   Christoph Lameter   SLUB core
2750
2751
2752
2753
2754
  	 * Determine if we can poison the object itself. If the user of
  	 * the slab may touch the object after free or before allocation
  	 * then we should never poison the object itself.
  	 */
  	if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
c59def9f2   Christoph Lameter   Slab allocators: ...
2755
  			!s->ctor)
81819f0fc   Christoph Lameter   SLUB core
2756
2757
2758
  		s->flags |= __OBJECT_POISON;
  	else
  		s->flags &= ~__OBJECT_POISON;
81819f0fc   Christoph Lameter   SLUB core
2759
2760
  
  	/*
672bba3a4   Christoph Lameter   SLUB: update comm...
2761
  	 * If we are Redzoning then check if there is some space between the
81819f0fc   Christoph Lameter   SLUB core
2762
  	 * end of the object and the free pointer. If not then add an
672bba3a4   Christoph Lameter   SLUB: update comm...
2763
  	 * additional word to have some bytes to store Redzone information.
81819f0fc   Christoph Lameter   SLUB core
2764
2765
2766
  	 */
  	if ((flags & SLAB_RED_ZONE) && size == s->objsize)
  		size += sizeof(void *);
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
2767
  #endif
81819f0fc   Christoph Lameter   SLUB core
2768
2769
  
  	/*
672bba3a4   Christoph Lameter   SLUB: update comm...
2770
2771
  	 * With that we have determined the number of bytes in actual use
  	 * by the object. This is the potential offset to the free pointer.
81819f0fc   Christoph Lameter   SLUB core
2772
2773
2774
2775
  	 */
  	s->inuse = size;
  
  	if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
c59def9f2   Christoph Lameter   Slab allocators: ...
2776
  		s->ctor)) {
81819f0fc   Christoph Lameter   SLUB core
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
  		/*
  		 * Relocate free pointer after the object if it is not
  		 * permitted to overwrite the first word of the object on
  		 * kmem_cache_free.
  		 *
  		 * This is the case if we do RCU, have a constructor or
  		 * destructor or are poisoning the objects.
  		 */
  		s->offset = size;
  		size += sizeof(void *);
  	}
c12b3c625   Christoph Lameter   SLUB Debug: Fix o...
2788
  #ifdef CONFIG_SLUB_DEBUG
81819f0fc   Christoph Lameter   SLUB core
2789
2790
2791
2792
2793
2794
  	if (flags & SLAB_STORE_USER)
  		/*
  		 * Need to store information about allocs and frees after
  		 * the object.
  		 */
  		size += 2 * sizeof(struct track);
be7b3fbce   Christoph Lameter   SLUB: after objec...
2795
  	if (flags & SLAB_RED_ZONE)
81819f0fc   Christoph Lameter   SLUB core
2796
2797
2798
2799
  		/*
  		 * Add some empty padding so that we can catch
  		 * overwrites from earlier objects rather than let
  		 * tracking information or the free pointer be
0211a9c85   Frederik Schwarzer   trivial: fix an -...
2800
  		 * corrupted if a user writes before the start
81819f0fc   Christoph Lameter   SLUB core
2801
2802
2803
  		 * of the object.
  		 */
  		size += sizeof(void *);
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
2804
  #endif
672bba3a4   Christoph Lameter   SLUB: update comm...
2805

81819f0fc   Christoph Lameter   SLUB core
2806
2807
  	/*
  	 * Determine the alignment based on various parameters that the
65c02d4cf   Christoph Lameter   SLUB: add support...
2808
2809
  	 * user specified and the dynamic determination of cache line size
  	 * on bootup.
81819f0fc   Christoph Lameter   SLUB core
2810
2811
  	 */
  	align = calculate_alignment(flags, align, s->objsize);
dcb0ce1bd   Zhang, Yanmin   slub: change kmem...
2812
  	s->align = align;
81819f0fc   Christoph Lameter   SLUB core
2813
2814
2815
2816
2817
2818
2819
2820
  
  	/*
  	 * SLUB stores one object immediately after another beginning from
  	 * offset 0. In order to align the objects we have to simply size
  	 * each object to conform to the alignment.
  	 */
  	size = ALIGN(size, align);
  	s->size = size;
06b285dc3   Christoph Lameter   slub: Make the or...
2821
2822
2823
  	if (forced_order >= 0)
  		order = forced_order;
  	else
ab9a0f196   Lai Jiangshan   slub: automatical...
2824
  		order = calculate_order(size, s->reserved);
81819f0fc   Christoph Lameter   SLUB core
2825

834f3d119   Christoph Lameter   slub: Add kmem_ca...
2826
  	if (order < 0)
81819f0fc   Christoph Lameter   SLUB core
2827
  		return 0;
b7a49f0d4   Christoph Lameter   slub: Determine g...
2828
  	s->allocflags = 0;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
2829
  	if (order)
b7a49f0d4   Christoph Lameter   slub: Determine g...
2830
2831
2832
2833
2834
2835
2836
  		s->allocflags |= __GFP_COMP;
  
  	if (s->flags & SLAB_CACHE_DMA)
  		s->allocflags |= SLUB_DMA;
  
  	if (s->flags & SLAB_RECLAIM_ACCOUNT)
  		s->allocflags |= __GFP_RECLAIMABLE;
81819f0fc   Christoph Lameter   SLUB core
2837
2838
2839
  	/*
  	 * Determine the number of objects per slab
  	 */
ab9a0f196   Lai Jiangshan   slub: automatical...
2840
2841
  	s->oo = oo_make(order, size, s->reserved);
  	s->min = oo_make(get_order(size), size, s->reserved);
205ab99dd   Christoph Lameter   slub: Update stat...
2842
2843
  	if (oo_objects(s->oo) > oo_objects(s->max))
  		s->max = s->oo;
81819f0fc   Christoph Lameter   SLUB core
2844

834f3d119   Christoph Lameter   slub: Add kmem_ca...
2845
  	return !!oo_objects(s->oo);
81819f0fc   Christoph Lameter   SLUB core
2846
2847
  
  }
55136592f   Christoph Lameter   slub: Remove dyna...
2848
  static int kmem_cache_open(struct kmem_cache *s,
81819f0fc   Christoph Lameter   SLUB core
2849
2850
  		const char *name, size_t size,
  		size_t align, unsigned long flags,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
2851
  		void (*ctor)(void *))
81819f0fc   Christoph Lameter   SLUB core
2852
2853
2854
2855
  {
  	memset(s, 0, kmem_size);
  	s->name = name;
  	s->ctor = ctor;
81819f0fc   Christoph Lameter   SLUB core
2856
  	s->objsize = size;
81819f0fc   Christoph Lameter   SLUB core
2857
  	s->align = align;
ba0268a8b   Christoph Lameter   SLUB: accurately ...
2858
  	s->flags = kmem_cache_flags(size, flags, name, ctor);
ab9a0f196   Lai Jiangshan   slub: automatical...
2859
  	s->reserved = 0;
81819f0fc   Christoph Lameter   SLUB core
2860

da9a638c6   Lai Jiangshan   slub,rcu: don't a...
2861
2862
  	if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
  		s->reserved = sizeof(struct rcu_head);
81819f0fc   Christoph Lameter   SLUB core
2863

06b285dc3   Christoph Lameter   slub: Make the or...
2864
  	if (!calculate_sizes(s, -1))
81819f0fc   Christoph Lameter   SLUB core
2865
  		goto error;
3de472138   David Rientjes   slub: use size an...
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
  	if (disable_higher_order_debug) {
  		/*
  		 * Disable debugging flags that store metadata if the min slab
  		 * order increased.
  		 */
  		if (get_order(s->size) > get_order(s->objsize)) {
  			s->flags &= ~DEBUG_METADATA_FLAGS;
  			s->offset = 0;
  			if (!calculate_sizes(s, -1))
  				goto error;
  		}
  	}
81819f0fc   Christoph Lameter   SLUB core
2878

b789ef518   Christoph Lameter   slub: Add cmpxchg...
2879
2880
2881
2882
2883
  #ifdef CONFIG_CMPXCHG_DOUBLE
  	if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
  		/* Enable fast mode */
  		s->flags |= __CMPXCHG_DOUBLE;
  #endif
3b89d7d88   David Rientjes   slub: move min_pa...
2884
2885
2886
2887
  	/*
  	 * The larger the object size is, the more pages we want on the partial
  	 * list to avoid pounding the page allocator excessively.
  	 */
49e225858   Christoph Lameter   slub: per cpu cac...
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
  	set_min_partial(s, ilog2(s->size) / 2);
  
  	/*
  	 * cpu_partial determined the maximum number of objects kept in the
  	 * per cpu partial lists of a processor.
  	 *
  	 * Per cpu partial lists mainly contain slabs that just have one
  	 * object freed. If they are used for allocation then they can be
  	 * filled up again with minimal effort. The slab will never hit the
  	 * per node partial lists and therefore no locking will be required.
  	 *
  	 * This setting also determines
  	 *
  	 * A) The number of objects from per cpu partial slabs dumped to the
  	 *    per node list when we reach the limit.
9f2649041   Alex Shi   slub: correct com...
2903
  	 * B) The number of objects in cpu partial slabs to extract from the
49e225858   Christoph Lameter   slub: per cpu cac...
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
  	 *    per node list when we run out of per cpu objects. We only fetch 50%
  	 *    to keep some capacity around for frees.
  	 */
  	if (s->size >= PAGE_SIZE)
  		s->cpu_partial = 2;
  	else if (s->size >= 1024)
  		s->cpu_partial = 6;
  	else if (s->size >= 256)
  		s->cpu_partial = 13;
  	else
  		s->cpu_partial = 30;
81819f0fc   Christoph Lameter   SLUB core
2915
2916
  	s->refcount = 1;
  #ifdef CONFIG_NUMA
e2cb96b7e   Christoph Lameter   slub: Disable NUM...
2917
  	s->remote_node_defrag_ratio = 1000;
81819f0fc   Christoph Lameter   SLUB core
2918
  #endif
55136592f   Christoph Lameter   slub: Remove dyna...
2919
  	if (!init_kmem_cache_nodes(s))
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
2920
  		goto error;
81819f0fc   Christoph Lameter   SLUB core
2921

55136592f   Christoph Lameter   slub: Remove dyna...
2922
  	if (alloc_kmem_cache_cpus(s))
81819f0fc   Christoph Lameter   SLUB core
2923
  		return 1;
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
2924

4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2925
  	free_kmem_cache_nodes(s);
81819f0fc   Christoph Lameter   SLUB core
2926
2927
2928
2929
2930
  error:
  	if (flags & SLAB_PANIC)
  		panic("Cannot create slab %s size=%lu realsize=%u "
  			"order=%u offset=%u flags=%lx
  ",
834f3d119   Christoph Lameter   slub: Add kmem_ca...
2931
  			s->name, (unsigned long)size, s->size, oo_order(s->oo),
81819f0fc   Christoph Lameter   SLUB core
2932
2933
2934
  			s->offset, flags);
  	return 0;
  }
81819f0fc   Christoph Lameter   SLUB core
2935
2936
  
  /*
81819f0fc   Christoph Lameter   SLUB core
2937
2938
2939
2940
2941
2942
2943
   * Determine the size of a slab object
   */
  unsigned int kmem_cache_size(struct kmem_cache *s)
  {
  	return s->objsize;
  }
  EXPORT_SYMBOL(kmem_cache_size);
33b12c381   Christoph Lameter   slub: Dump list o...
2944
2945
2946
2947
2948
2949
  static void list_slab_objects(struct kmem_cache *s, struct page *page,
  							const char *text)
  {
  #ifdef CONFIG_SLUB_DEBUG
  	void *addr = page_address(page);
  	void *p;
a5dd5c117   Namhyung Kim   slub: Fix signedn...
2950
2951
  	unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
  				     sizeof(long), GFP_ATOMIC);
bbd7d57bf   Eric Dumazet   slub: Potential s...
2952
2953
  	if (!map)
  		return;
33b12c381   Christoph Lameter   slub: Dump list o...
2954
2955
  	slab_err(s, page, "%s", text);
  	slab_lock(page);
33b12c381   Christoph Lameter   slub: Dump list o...
2956

5f80b13ae   Christoph Lameter   slub: get_map() f...
2957
  	get_map(s, page, map);
33b12c381   Christoph Lameter   slub: Dump list o...
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
  	for_each_object(p, s, addr, page->objects) {
  
  		if (!test_bit(slab_index(p, s, addr), map)) {
  			printk(KERN_ERR "INFO: Object 0x%p @offset=%tu
  ",
  							p, p - addr);
  			print_tracking(s, p);
  		}
  	}
  	slab_unlock(page);
bbd7d57bf   Eric Dumazet   slub: Potential s...
2968
  	kfree(map);
33b12c381   Christoph Lameter   slub: Dump list o...
2969
2970
  #endif
  }
81819f0fc   Christoph Lameter   SLUB core
2971
  /*
599870b17   Christoph Lameter   slub: free_list()...
2972
   * Attempt to free all partial slabs on a node.
69cb8e6b7   Christoph Lameter   slub: free slabs ...
2973
2974
   * This is called from kmem_cache_close(). We must be the last thread
   * using the cache and therefore we do not need to lock anymore.
81819f0fc   Christoph Lameter   SLUB core
2975
   */
599870b17   Christoph Lameter   slub: free_list()...
2976
  static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
81819f0fc   Christoph Lameter   SLUB core
2977
  {
81819f0fc   Christoph Lameter   SLUB core
2978
  	struct page *page, *h;
33b12c381   Christoph Lameter   slub: Dump list o...
2979
  	list_for_each_entry_safe(page, h, &n->partial, lru) {
81819f0fc   Christoph Lameter   SLUB core
2980
  		if (!page->inuse) {
5cc6eee8a   Christoph Lameter   slub: explicit li...
2981
  			remove_partial(n, page);
81819f0fc   Christoph Lameter   SLUB core
2982
  			discard_slab(s, page);
33b12c381   Christoph Lameter   slub: Dump list o...
2983
2984
2985
  		} else {
  			list_slab_objects(s, page,
  				"Objects remaining on kmem_cache_close()");
599870b17   Christoph Lameter   slub: free_list()...
2986
  		}
33b12c381   Christoph Lameter   slub: Dump list o...
2987
  	}
81819f0fc   Christoph Lameter   SLUB core
2988
2989
2990
  }
  
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
2991
   * Release all resources used by a slab cache.
81819f0fc   Christoph Lameter   SLUB core
2992
   */
0c7100132   Christoph Lameter   SLUB: add some mo...
2993
  static inline int kmem_cache_close(struct kmem_cache *s)
81819f0fc   Christoph Lameter   SLUB core
2994
2995
2996
2997
  {
  	int node;
  
  	flush_all(s);
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
2998
  	free_percpu(s->cpu_slab);
81819f0fc   Christoph Lameter   SLUB core
2999
  	/* Attempt to free all objects */
f64dc58c5   Christoph Lameter   Memoryless nodes:...
3000
  	for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0fc   Christoph Lameter   SLUB core
3001
  		struct kmem_cache_node *n = get_node(s, node);
599870b17   Christoph Lameter   slub: free_list()...
3002
3003
  		free_partial(s, n);
  		if (n->nr_partial || slabs_node(s, node))
81819f0fc   Christoph Lameter   SLUB core
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
  			return 1;
  	}
  	free_kmem_cache_nodes(s);
  	return 0;
  }
  
  /*
   * Close a cache and release the kmem_cache structure
   * (must be used for caches created using kmem_cache_create)
   */
  void kmem_cache_destroy(struct kmem_cache *s)
  {
  	down_write(&slub_lock);
  	s->refcount--;
  	if (!s->refcount) {
  		list_del(&s->list);
69cb8e6b7   Christoph Lameter   slub: free slabs ...
3020
  		up_write(&slub_lock);
d629d8195   Pekka Enberg   slub: improve kme...
3021
3022
3023
3024
3025
3026
  		if (kmem_cache_close(s)) {
  			printk(KERN_ERR "SLUB %s: %s called for cache that "
  				"still has objects.
  ", s->name, __func__);
  			dump_stack();
  		}
d76b1590e   Eric Dumazet   slub: Fix kmem_ca...
3027
3028
  		if (s->flags & SLAB_DESTROY_BY_RCU)
  			rcu_barrier();
81819f0fc   Christoph Lameter   SLUB core
3029
  		sysfs_slab_remove(s);
69cb8e6b7   Christoph Lameter   slub: free slabs ...
3030
3031
  	} else
  		up_write(&slub_lock);
81819f0fc   Christoph Lameter   SLUB core
3032
3033
3034
3035
3036
3037
  }
  EXPORT_SYMBOL(kmem_cache_destroy);
  
  /********************************************************************
   *		Kmalloc subsystem
   *******************************************************************/
51df11428   Christoph Lameter   slub: Dynamically...
3038
  struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
81819f0fc   Christoph Lameter   SLUB core
3039
  EXPORT_SYMBOL(kmalloc_caches);
51df11428   Christoph Lameter   slub: Dynamically...
3040
  static struct kmem_cache *kmem_cache;
55136592f   Christoph Lameter   slub: Remove dyna...
3041
  #ifdef CONFIG_ZONE_DMA
51df11428   Christoph Lameter   slub: Dynamically...
3042
  static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
55136592f   Christoph Lameter   slub: Remove dyna...
3043
  #endif
81819f0fc   Christoph Lameter   SLUB core
3044
3045
  static int __init setup_slub_min_order(char *str)
  {
064287807   Pekka Enberg   SLUB: Fix coding ...
3046
  	get_option(&str, &slub_min_order);
81819f0fc   Christoph Lameter   SLUB core
3047
3048
3049
3050
3051
3052
3053
3054
  
  	return 1;
  }
  
  __setup("slub_min_order=", setup_slub_min_order);
  
  static int __init setup_slub_max_order(char *str)
  {
064287807   Pekka Enberg   SLUB: Fix coding ...
3055
  	get_option(&str, &slub_max_order);
818cf5909   David Rientjes   slub: enforce MAX...
3056
  	slub_max_order = min(slub_max_order, MAX_ORDER - 1);
81819f0fc   Christoph Lameter   SLUB core
3057
3058
3059
3060
3061
3062
3063
3064
  
  	return 1;
  }
  
  __setup("slub_max_order=", setup_slub_max_order);
  
  static int __init setup_slub_min_objects(char *str)
  {
064287807   Pekka Enberg   SLUB: Fix coding ...
3065
  	get_option(&str, &slub_min_objects);
81819f0fc   Christoph Lameter   SLUB core
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
  
  	return 1;
  }
  
  __setup("slub_min_objects=", setup_slub_min_objects);
  
  static int __init setup_slub_nomerge(char *str)
  {
  	slub_nomerge = 1;
  	return 1;
  }
  
  __setup("slub_nomerge", setup_slub_nomerge);
51df11428   Christoph Lameter   slub: Dynamically...
3079
3080
  static struct kmem_cache *__init create_kmalloc_cache(const char *name,
  						int size, unsigned int flags)
81819f0fc   Christoph Lameter   SLUB core
3081
  {
51df11428   Christoph Lameter   slub: Dynamically...
3082
3083
3084
  	struct kmem_cache *s;
  
  	s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
83b519e8b   Pekka Enberg   slab: setup alloc...
3085
3086
3087
3088
  	/*
  	 * This function is called with IRQs disabled during early-boot on
  	 * single CPU so there's no need to take slub_lock here.
  	 */
55136592f   Christoph Lameter   slub: Remove dyna...
3089
  	if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
319d1e240   Christoph Lameter   slub: Drop fallba...
3090
  								flags, NULL))
81819f0fc   Christoph Lameter   SLUB core
3091
3092
3093
  		goto panic;
  
  	list_add(&s->list, &slab_caches);
51df11428   Christoph Lameter   slub: Dynamically...
3094
  	return s;
81819f0fc   Christoph Lameter   SLUB core
3095
3096
3097
3098
  
  panic:
  	panic("Creation of kmalloc slab %s size=%d failed.
  ", name, size);
51df11428   Christoph Lameter   slub: Dynamically...
3099
  	return NULL;
81819f0fc   Christoph Lameter   SLUB core
3100
  }
f1b263393   Christoph Lameter   SLUB: faster more...
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
  /*
   * Conversion table for small slabs sizes / 8 to the index in the
   * kmalloc array. This is necessary for slabs < 192 since we have non power
   * of two cache sizes there. The size of larger slabs can be determined using
   * fls.
   */
  static s8 size_index[24] = {
  	3,	/* 8 */
  	4,	/* 16 */
  	5,	/* 24 */
  	5,	/* 32 */
  	6,	/* 40 */
  	6,	/* 48 */
  	6,	/* 56 */
  	6,	/* 64 */
  	1,	/* 72 */
  	1,	/* 80 */
  	1,	/* 88 */
  	1,	/* 96 */
  	7,	/* 104 */
  	7,	/* 112 */
  	7,	/* 120 */
  	7,	/* 128 */
  	2,	/* 136 */
  	2,	/* 144 */
  	2,	/* 152 */
  	2,	/* 160 */
  	2,	/* 168 */
  	2,	/* 176 */
  	2,	/* 184 */
  	2	/* 192 */
  };
acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
3133
3134
3135
3136
  static inline int size_index_elem(size_t bytes)
  {
  	return (bytes - 1) / 8;
  }
81819f0fc   Christoph Lameter   SLUB core
3137
3138
  static struct kmem_cache *get_slab(size_t size, gfp_t flags)
  {
f1b263393   Christoph Lameter   SLUB: faster more...
3139
  	int index;
81819f0fc   Christoph Lameter   SLUB core
3140

f1b263393   Christoph Lameter   SLUB: faster more...
3141
3142
3143
  	if (size <= 192) {
  		if (!size)
  			return ZERO_SIZE_PTR;
81819f0fc   Christoph Lameter   SLUB core
3144

acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
3145
  		index = size_index[size_index_elem(size)];
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3146
  	} else
f1b263393   Christoph Lameter   SLUB: faster more...
3147
  		index = fls(size - 1);
81819f0fc   Christoph Lameter   SLUB core
3148
3149
  
  #ifdef CONFIG_ZONE_DMA
f1b263393   Christoph Lameter   SLUB: faster more...
3150
  	if (unlikely((flags & SLUB_DMA)))
51df11428   Christoph Lameter   slub: Dynamically...
3151
  		return kmalloc_dma_caches[index];
f1b263393   Christoph Lameter   SLUB: faster more...
3152

81819f0fc   Christoph Lameter   SLUB core
3153
  #endif
51df11428   Christoph Lameter   slub: Dynamically...
3154
  	return kmalloc_caches[index];
81819f0fc   Christoph Lameter   SLUB core
3155
3156
3157
3158
  }
  
  void *__kmalloc(size_t size, gfp_t flags)
  {
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3159
  	struct kmem_cache *s;
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3160
  	void *ret;
81819f0fc   Christoph Lameter   SLUB core
3161

ffadd4d0f   Christoph Lameter   SLUB: Introduce a...
3162
  	if (unlikely(size > SLUB_MAX_SIZE))
eada35efc   Pekka Enberg   slub: kmalloc pag...
3163
  		return kmalloc_large(size, flags);
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3164
3165
3166
3167
  
  	s = get_slab(size, flags);
  
  	if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f9132   Christoph Lameter   Slab allocators: ...
3168
  		return s;
2154a3363   Christoph Lameter   slub: Use a const...
3169
  	ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3170

ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
3171
  	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3172
3173
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
3174
3175
  }
  EXPORT_SYMBOL(__kmalloc);
5d1f57e4d   Namhyung Kim   slub: Move NUMA-r...
3176
  #ifdef CONFIG_NUMA
f619cfe1b   Christoph Lameter   slub: Add kmalloc...
3177
3178
  static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
  {
b1eeab676   Vegard Nossum   kmemcheck: add ho...
3179
  	struct page *page;
e4f7c0b44   Catalin Marinas   kmemleak: Trace t...
3180
  	void *ptr = NULL;
f619cfe1b   Christoph Lameter   slub: Add kmalloc...
3181

b1eeab676   Vegard Nossum   kmemcheck: add ho...
3182
3183
  	flags |= __GFP_COMP | __GFP_NOTRACK;
  	page = alloc_pages_node(node, flags, get_order(size));
f619cfe1b   Christoph Lameter   slub: Add kmalloc...
3184
  	if (page)
e4f7c0b44   Catalin Marinas   kmemleak: Trace t...
3185
3186
3187
3188
  		ptr = page_address(page);
  
  	kmemleak_alloc(ptr, size, 1, flags);
  	return ptr;
f619cfe1b   Christoph Lameter   slub: Add kmalloc...
3189
  }
81819f0fc   Christoph Lameter   SLUB core
3190
3191
  void *__kmalloc_node(size_t size, gfp_t flags, int node)
  {
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3192
  	struct kmem_cache *s;
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3193
  	void *ret;
81819f0fc   Christoph Lameter   SLUB core
3194

057685cf5   Ingo Molnar   Merge branch 'for...
3195
  	if (unlikely(size > SLUB_MAX_SIZE)) {
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3196
  		ret = kmalloc_large_node(size, flags, node);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
3197
3198
3199
  		trace_kmalloc_node(_RET_IP_, ret,
  				   size, PAGE_SIZE << get_order(size),
  				   flags, node);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3200
3201
3202
  
  		return ret;
  	}
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3203
3204
3205
3206
  
  	s = get_slab(size, flags);
  
  	if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f9132   Christoph Lameter   Slab allocators: ...
3207
  		return s;
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3208
  	ret = slab_alloc(s, flags, node, _RET_IP_);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
3209
  	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3210
3211
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
3212
3213
3214
3215
3216
3217
  }
  EXPORT_SYMBOL(__kmalloc_node);
  #endif
  
  size_t ksize(const void *object)
  {
272c1d21d   Christoph Lameter   SLUB: return ZERO...
3218
  	struct page *page;
81819f0fc   Christoph Lameter   SLUB core
3219

ef8b4520b   Christoph Lameter   Slab allocators: ...
3220
  	if (unlikely(object == ZERO_SIZE_PTR))
272c1d21d   Christoph Lameter   SLUB: return ZERO...
3221
  		return 0;
294a80a8e   Vegard Nossum   SLUB's ksize() fa...
3222
  	page = virt_to_head_page(object);
294a80a8e   Vegard Nossum   SLUB's ksize() fa...
3223

76994412f   Pekka Enberg   slub: ksize() abu...
3224
3225
  	if (unlikely(!PageSlab(page))) {
  		WARN_ON(!PageCompound(page));
294a80a8e   Vegard Nossum   SLUB's ksize() fa...
3226
  		return PAGE_SIZE << compound_order(page);
76994412f   Pekka Enberg   slub: ksize() abu...
3227
  	}
81819f0fc   Christoph Lameter   SLUB core
3228

b3d41885d   Eric Dumazet   slub: fix kmemche...
3229
  	return slab_ksize(page->slab);
81819f0fc   Christoph Lameter   SLUB core
3230
  }
b1aabecd5   Kirill A. Shutemov   mm: Export symbol...
3231
  EXPORT_SYMBOL(ksize);
81819f0fc   Christoph Lameter   SLUB core
3232

d18a90dd8   Ben Greear   slub: Add method ...
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
  #ifdef CONFIG_SLUB_DEBUG
  bool verify_mem_not_deleted(const void *x)
  {
  	struct page *page;
  	void *object = (void *)x;
  	unsigned long flags;
  	bool rv;
  
  	if (unlikely(ZERO_OR_NULL_PTR(x)))
  		return false;
  
  	local_irq_save(flags);
  
  	page = virt_to_head_page(x);
  	if (unlikely(!PageSlab(page))) {
  		/* maybe it was from stack? */
  		rv = true;
  		goto out_unlock;
  	}
  
  	slab_lock(page);
  	if (on_freelist(page->slab, page, object)) {
  		object_err(page->slab, page, object, "Object is on free-list");
  		rv = false;
  	} else {
  		rv = true;
  	}
  	slab_unlock(page);
  
  out_unlock:
  	local_irq_restore(flags);
  	return rv;
  }
  EXPORT_SYMBOL(verify_mem_not_deleted);
  #endif
81819f0fc   Christoph Lameter   SLUB core
3268
3269
  void kfree(const void *x)
  {
81819f0fc   Christoph Lameter   SLUB core
3270
  	struct page *page;
5bb983b0c   Christoph Lameter   SLUB: Deal with a...
3271
  	void *object = (void *)x;
81819f0fc   Christoph Lameter   SLUB core
3272

2121db74b   Pekka Enberg   kmemtrace: trace ...
3273
  	trace_kfree(_RET_IP_, x);
2408c5503   Satyam Sharma   {slub, slob}: use...
3274
  	if (unlikely(ZERO_OR_NULL_PTR(x)))
81819f0fc   Christoph Lameter   SLUB core
3275
  		return;
b49af68ff   Christoph Lameter   Add virt_to_head_...
3276
  	page = virt_to_head_page(x);
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3277
  	if (unlikely(!PageSlab(page))) {
0937502af   Christoph Lameter   slub: Add check f...
3278
  		BUG_ON(!PageCompound(page));
e4f7c0b44   Catalin Marinas   kmemleak: Trace t...
3279
  		kmemleak_free(x);
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3280
3281
3282
  		put_page(page);
  		return;
  	}
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
3283
  	slab_free(page->slab, page, object, _RET_IP_);
81819f0fc   Christoph Lameter   SLUB core
3284
3285
  }
  EXPORT_SYMBOL(kfree);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3286
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
3287
3288
3289
3290
3291
3292
3293
3294
   * kmem_cache_shrink removes empty slabs from the partial lists and sorts
   * the remaining slabs by the number of items in use. The slabs with the
   * most items in use come first. New allocations will then fill those up
   * and thus they can be removed from the partial lists.
   *
   * The slabs with the least items are placed last. This results in them
   * being allocated from last increasing the chance that the last objects
   * are freed in them.
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3295
3296
3297
3298
3299
3300
3301
3302
   */
  int kmem_cache_shrink(struct kmem_cache *s)
  {
  	int node;
  	int i;
  	struct kmem_cache_node *n;
  	struct page *page;
  	struct page *t;
205ab99dd   Christoph Lameter   slub: Update stat...
3303
  	int objects = oo_objects(s->max);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3304
  	struct list_head *slabs_by_inuse =
834f3d119   Christoph Lameter   slub: Add kmem_ca...
3305
  		kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3306
3307
3308
3309
3310
3311
  	unsigned long flags;
  
  	if (!slabs_by_inuse)
  		return -ENOMEM;
  
  	flush_all(s);
f64dc58c5   Christoph Lameter   Memoryless nodes:...
3312
  	for_each_node_state(node, N_NORMAL_MEMORY) {
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3313
3314
3315
3316
  		n = get_node(s, node);
  
  		if (!n->nr_partial)
  			continue;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
3317
  		for (i = 0; i < objects; i++)
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3318
3319
3320
3321
3322
  			INIT_LIST_HEAD(slabs_by_inuse + i);
  
  		spin_lock_irqsave(&n->list_lock, flags);
  
  		/*
672bba3a4   Christoph Lameter   SLUB: update comm...
3323
  		 * Build lists indexed by the items in use in each slab.
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3324
  		 *
672bba3a4   Christoph Lameter   SLUB: update comm...
3325
3326
  		 * Note that concurrent frees may occur while we hold the
  		 * list_lock. page->inuse here is the upper limit.
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3327
3328
  		 */
  		list_for_each_entry_safe(page, t, &n->partial, lru) {
69cb8e6b7   Christoph Lameter   slub: free slabs ...
3329
3330
3331
  			list_move(&page->lru, slabs_by_inuse + page->inuse);
  			if (!page->inuse)
  				n->nr_partial--;
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3332
  		}
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3333
  		/*
672bba3a4   Christoph Lameter   SLUB: update comm...
3334
3335
  		 * Rebuild the partial list with the slabs filled up most
  		 * first and the least used slabs at the end.
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3336
  		 */
69cb8e6b7   Christoph Lameter   slub: free slabs ...
3337
  		for (i = objects - 1; i > 0; i--)
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3338
  			list_splice(slabs_by_inuse + i, n->partial.prev);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3339
  		spin_unlock_irqrestore(&n->list_lock, flags);
69cb8e6b7   Christoph Lameter   slub: free slabs ...
3340
3341
3342
3343
  
  		/* Release empty slabs */
  		list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
  			discard_slab(s, page);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3344
3345
3346
3347
3348
3349
  	}
  
  	kfree(slabs_by_inuse);
  	return 0;
  }
  EXPORT_SYMBOL(kmem_cache_shrink);
92a5bbc11   Pekka Enberg   SLUB: Fix memory ...
3350
  #if defined(CONFIG_MEMORY_HOTPLUG)
b9049e234   Yasunori Goto   memory hotplug: m...
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
  static int slab_mem_going_offline_callback(void *arg)
  {
  	struct kmem_cache *s;
  
  	down_read(&slub_lock);
  	list_for_each_entry(s, &slab_caches, list)
  		kmem_cache_shrink(s);
  	up_read(&slub_lock);
  
  	return 0;
  }
  
  static void slab_mem_offline_callback(void *arg)
  {
  	struct kmem_cache_node *n;
  	struct kmem_cache *s;
  	struct memory_notify *marg = arg;
  	int offline_node;
  
  	offline_node = marg->status_change_nid;
  
  	/*
  	 * If the node still has available memory. we need kmem_cache_node
  	 * for it yet.
  	 */
  	if (offline_node < 0)
  		return;
  
  	down_read(&slub_lock);
  	list_for_each_entry(s, &slab_caches, list) {
  		n = get_node(s, offline_node);
  		if (n) {
  			/*
  			 * if n->nr_slabs > 0, slabs still exist on the node
  			 * that is going down. We were unable to free them,
c9404c9c3   Adam Buchbinder   Fix misspelling o...
3386
  			 * and offline_pages() function shouldn't call this
b9049e234   Yasunori Goto   memory hotplug: m...
3387
3388
  			 * callback. So, we must fail.
  			 */
0f389ec63   Christoph Lameter   slub: No need for...
3389
  			BUG_ON(slabs_node(s, offline_node));
b9049e234   Yasunori Goto   memory hotplug: m...
3390
3391
  
  			s->node[offline_node] = NULL;
8de66a0c0   Christoph Lameter   slub: Fix up miss...
3392
  			kmem_cache_free(kmem_cache_node, n);
b9049e234   Yasunori Goto   memory hotplug: m...
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
  		}
  	}
  	up_read(&slub_lock);
  }
  
  static int slab_mem_going_online_callback(void *arg)
  {
  	struct kmem_cache_node *n;
  	struct kmem_cache *s;
  	struct memory_notify *marg = arg;
  	int nid = marg->status_change_nid;
  	int ret = 0;
  
  	/*
  	 * If the node's memory is already available, then kmem_cache_node is
  	 * already created. Nothing to do.
  	 */
  	if (nid < 0)
  		return 0;
  
  	/*
0121c619d   Christoph Lameter   slub: Whitespace ...
3414
  	 * We are bringing a node online. No memory is available yet. We must
b9049e234   Yasunori Goto   memory hotplug: m...
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
  	 * allocate a kmem_cache_node structure in order to bring the node
  	 * online.
  	 */
  	down_read(&slub_lock);
  	list_for_each_entry(s, &slab_caches, list) {
  		/*
  		 * XXX: kmem_cache_alloc_node will fallback to other nodes
  		 *      since memory is not yet available from the node that
  		 *      is brought up.
  		 */
8de66a0c0   Christoph Lameter   slub: Fix up miss...
3425
  		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
b9049e234   Yasunori Goto   memory hotplug: m...
3426
3427
3428
3429
  		if (!n) {
  			ret = -ENOMEM;
  			goto out;
  		}
5595cffc8   Pekka Enberg   SLUB: dynamic per...
3430
  		init_kmem_cache_node(n, s);
b9049e234   Yasunori Goto   memory hotplug: m...
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
  		s->node[nid] = n;
  	}
  out:
  	up_read(&slub_lock);
  	return ret;
  }
  
  static int slab_memory_callback(struct notifier_block *self,
  				unsigned long action, void *arg)
  {
  	int ret = 0;
  
  	switch (action) {
  	case MEM_GOING_ONLINE:
  		ret = slab_mem_going_online_callback(arg);
  		break;
  	case MEM_GOING_OFFLINE:
  		ret = slab_mem_going_offline_callback(arg);
  		break;
  	case MEM_OFFLINE:
  	case MEM_CANCEL_ONLINE:
  		slab_mem_offline_callback(arg);
  		break;
  	case MEM_ONLINE:
  	case MEM_CANCEL_OFFLINE:
  		break;
  	}
dc19f9db3   KAMEZAWA Hiroyuki   memcg: memory hot...
3458
3459
3460
3461
  	if (ret)
  		ret = notifier_from_errno(ret);
  	else
  		ret = NOTIFY_OK;
b9049e234   Yasunori Goto   memory hotplug: m...
3462
3463
3464
3465
  	return ret;
  }
  
  #endif /* CONFIG_MEMORY_HOTPLUG */
81819f0fc   Christoph Lameter   SLUB core
3466
3467
3468
  /********************************************************************
   *			Basic setup of slabs
   *******************************************************************/
51df11428   Christoph Lameter   slub: Dynamically...
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
  /*
   * Used for early kmem_cache structures that were allocated using
   * the page allocator
   */
  
  static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
  {
  	int node;
  
  	list_add(&s->list, &slab_caches);
  	s->refcount = -1;
  
  	for_each_node_state(node, N_NORMAL_MEMORY) {
  		struct kmem_cache_node *n = get_node(s, node);
  		struct page *p;
  
  		if (n) {
  			list_for_each_entry(p, &n->partial, lru)
  				p->slab = s;
607bf324a   Li Zefan   slub: Fix a typo ...
3488
  #ifdef CONFIG_SLUB_DEBUG
51df11428   Christoph Lameter   slub: Dynamically...
3489
3490
3491
3492
3493
3494
  			list_for_each_entry(p, &n->full, lru)
  				p->slab = s;
  #endif
  		}
  	}
  }
81819f0fc   Christoph Lameter   SLUB core
3495
3496
3497
  void __init kmem_cache_init(void)
  {
  	int i;
4b356be01   Christoph Lameter   SLUB: minimum ali...
3498
  	int caches = 0;
51df11428   Christoph Lameter   slub: Dynamically...
3499
3500
  	struct kmem_cache *temp_kmem_cache;
  	int order;
51df11428   Christoph Lameter   slub: Dynamically...
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
  	struct kmem_cache *temp_kmem_cache_node;
  	unsigned long kmalloc_size;
  
  	kmem_size = offsetof(struct kmem_cache, node) +
  				nr_node_ids * sizeof(struct kmem_cache_node *);
  
  	/* Allocate two kmem_caches from the page allocator */
  	kmalloc_size = ALIGN(kmem_size, cache_line_size());
  	order = get_order(2 * kmalloc_size);
  	kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order);
81819f0fc   Christoph Lameter   SLUB core
3511
3512
  	/*
  	 * Must first have the slab cache available for the allocations of the
672bba3a4   Christoph Lameter   SLUB: update comm...
3513
  	 * struct kmem_cache_node's. There is special bootstrap code in
81819f0fc   Christoph Lameter   SLUB core
3514
3515
  	 * kmem_cache_open for slab_state == DOWN.
  	 */
51df11428   Christoph Lameter   slub: Dynamically...
3516
3517
3518
3519
3520
  	kmem_cache_node = (void *)kmem_cache + kmalloc_size;
  
  	kmem_cache_open(kmem_cache_node, "kmem_cache_node",
  		sizeof(struct kmem_cache_node),
  		0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
b9049e234   Yasunori Goto   memory hotplug: m...
3521

0c40ba4fd   Nadia Derbey   ipc: define the s...
3522
  	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
81819f0fc   Christoph Lameter   SLUB core
3523
3524
3525
  
  	/* Able to allocate the per node structures */
  	slab_state = PARTIAL;
51df11428   Christoph Lameter   slub: Dynamically...
3526
3527
3528
3529
3530
  	temp_kmem_cache = kmem_cache;
  	kmem_cache_open(kmem_cache, "kmem_cache", kmem_size,
  		0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
  	kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
  	memcpy(kmem_cache, temp_kmem_cache, kmem_size);
81819f0fc   Christoph Lameter   SLUB core
3531

51df11428   Christoph Lameter   slub: Dynamically...
3532
3533
3534
3535
3536
3537
  	/*
  	 * Allocate kmem_cache_node properly from the kmem_cache slab.
  	 * kmem_cache_node is separately allocated so no need to
  	 * update any list pointers.
  	 */
  	temp_kmem_cache_node = kmem_cache_node;
81819f0fc   Christoph Lameter   SLUB core
3538

51df11428   Christoph Lameter   slub: Dynamically...
3539
3540
3541
3542
3543
3544
  	kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
  	memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size);
  
  	kmem_cache_bootstrap_fixup(kmem_cache_node);
  
  	caches++;
51df11428   Christoph Lameter   slub: Dynamically...
3545
3546
3547
3548
3549
3550
  	kmem_cache_bootstrap_fixup(kmem_cache);
  	caches++;
  	/* Free temporary boot structure */
  	free_pages((unsigned long)temp_kmem_cache, order);
  
  	/* Now we can use the kmem_cache to allocate kmalloc slabs */
f1b263393   Christoph Lameter   SLUB: faster more...
3551
3552
3553
3554
  
  	/*
  	 * Patch up the size_index table if we have strange large alignment
  	 * requirements for the kmalloc array. This is only the case for
6446faa2f   Christoph Lameter   slub: Fix up comm...
3555
  	 * MIPS it seems. The standard arches will not generate any code here.
f1b263393   Christoph Lameter   SLUB: faster more...
3556
3557
3558
3559
3560
3561
3562
3563
3564
  	 *
  	 * Largest permitted alignment is 256 bytes due to the way we
  	 * handle the index determination for the smaller caches.
  	 *
  	 * Make sure that nothing crazy happens if someone starts tinkering
  	 * around with ARCH_KMALLOC_MINALIGN
  	 */
  	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
  		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
3565
3566
3567
3568
3569
3570
  	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
  		int elem = size_index_elem(i);
  		if (elem >= ARRAY_SIZE(size_index))
  			break;
  		size_index[elem] = KMALLOC_SHIFT_LOW;
  	}
f1b263393   Christoph Lameter   SLUB: faster more...
3571

acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
3572
3573
3574
3575
3576
3577
3578
3579
  	if (KMALLOC_MIN_SIZE == 64) {
  		/*
  		 * The 96 byte size cache is not used if the alignment
  		 * is 64 byte.
  		 */
  		for (i = 64 + 8; i <= 96; i += 8)
  			size_index[size_index_elem(i)] = 7;
  	} else if (KMALLOC_MIN_SIZE == 128) {
41d54d3bf   Christoph Lameter   slub: Do not use ...
3580
3581
3582
3583
3584
3585
  		/*
  		 * The 192 byte sized cache is not used if the alignment
  		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
  		 * instead.
  		 */
  		for (i = 128 + 8; i <= 192; i += 8)
acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
3586
  			size_index[size_index_elem(i)] = 8;
41d54d3bf   Christoph Lameter   slub: Do not use ...
3587
  	}
51df11428   Christoph Lameter   slub: Dynamically...
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
  	/* Caches that are not of the two-to-the-power-of size */
  	if (KMALLOC_MIN_SIZE <= 32) {
  		kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
  		caches++;
  	}
  
  	if (KMALLOC_MIN_SIZE <= 64) {
  		kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
  		caches++;
  	}
  
  	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
  		kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
  		caches++;
  	}
81819f0fc   Christoph Lameter   SLUB core
3603
3604
3605
  	slab_state = UP;
  
  	/* Provide the correct kmalloc names now that the caches are up */
84c1cf624   Pekka Enberg   SLUB: Fix merged ...
3606
3607
3608
3609
3610
3611
3612
3613
3614
  	if (KMALLOC_MIN_SIZE <= 32) {
  		kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT);
  		BUG_ON(!kmalloc_caches[1]->name);
  	}
  
  	if (KMALLOC_MIN_SIZE <= 64) {
  		kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT);
  		BUG_ON(!kmalloc_caches[2]->name);
  	}
d7278bd7d   Christoph Lameter   slub: Check kaspr...
3615
3616
3617
3618
  	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
  		char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
  
  		BUG_ON(!s);
51df11428   Christoph Lameter   slub: Dynamically...
3619
  		kmalloc_caches[i]->name = s;
d7278bd7d   Christoph Lameter   slub: Check kaspr...
3620
  	}
81819f0fc   Christoph Lameter   SLUB core
3621
3622
3623
  
  #ifdef CONFIG_SMP
  	register_cpu_notifier(&slab_notifier);
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
3624
  #endif
81819f0fc   Christoph Lameter   SLUB core
3625

55136592f   Christoph Lameter   slub: Remove dyna...
3626
  #ifdef CONFIG_ZONE_DMA
51df11428   Christoph Lameter   slub: Dynamically...
3627
3628
  	for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
  		struct kmem_cache *s = kmalloc_caches[i];
55136592f   Christoph Lameter   slub: Remove dyna...
3629

51df11428   Christoph Lameter   slub: Dynamically...
3630
  		if (s && s->size) {
55136592f   Christoph Lameter   slub: Remove dyna...
3631
3632
3633
3634
  			char *name = kasprintf(GFP_NOWAIT,
  				 "dma-kmalloc-%d", s->objsize);
  
  			BUG_ON(!name);
51df11428   Christoph Lameter   slub: Dynamically...
3635
3636
  			kmalloc_dma_caches[i] = create_kmalloc_cache(name,
  				s->objsize, SLAB_CACHE_DMA);
55136592f   Christoph Lameter   slub: Remove dyna...
3637
3638
3639
  		}
  	}
  #endif
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
3640
3641
  	printk(KERN_INFO
  		"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
4b356be01   Christoph Lameter   SLUB: minimum ali...
3642
3643
3644
  		" CPUs=%d, Nodes=%d
  ",
  		caches, cache_line_size(),
81819f0fc   Christoph Lameter   SLUB core
3645
3646
3647
  		slub_min_order, slub_max_order, slub_min_objects,
  		nr_cpu_ids, nr_node_ids);
  }
7e85ee0c1   Pekka Enberg   slab,slub: don't ...
3648
3649
  void __init kmem_cache_init_late(void)
  {
7e85ee0c1   Pekka Enberg   slab,slub: don't ...
3650
  }
81819f0fc   Christoph Lameter   SLUB core
3651
3652
3653
3654
3655
3656
3657
  /*
   * Find a mergeable slab cache
   */
  static int slab_unmergeable(struct kmem_cache *s)
  {
  	if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
  		return 1;
c59def9f2   Christoph Lameter   Slab allocators: ...
3658
  	if (s->ctor)
81819f0fc   Christoph Lameter   SLUB core
3659
  		return 1;
8ffa68755   Christoph Lameter   SLUB: Fix NUMA / ...
3660
3661
3662
3663
3664
  	/*
  	 * We may have set a slab to be unmergeable during bootstrap.
  	 */
  	if (s->refcount < 0)
  		return 1;
81819f0fc   Christoph Lameter   SLUB core
3665
3666
3667
3668
  	return 0;
  }
  
  static struct kmem_cache *find_mergeable(size_t size,
ba0268a8b   Christoph Lameter   SLUB: accurately ...
3669
  		size_t align, unsigned long flags, const char *name,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
3670
  		void (*ctor)(void *))
81819f0fc   Christoph Lameter   SLUB core
3671
  {
5b95a4acf   Christoph Lameter   SLUB: use list_fo...
3672
  	struct kmem_cache *s;
81819f0fc   Christoph Lameter   SLUB core
3673
3674
3675
  
  	if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
  		return NULL;
c59def9f2   Christoph Lameter   Slab allocators: ...
3676
  	if (ctor)
81819f0fc   Christoph Lameter   SLUB core
3677
3678
3679
3680
3681
  		return NULL;
  
  	size = ALIGN(size, sizeof(void *));
  	align = calculate_alignment(flags, align, size);
  	size = ALIGN(size, align);
ba0268a8b   Christoph Lameter   SLUB: accurately ...
3682
  	flags = kmem_cache_flags(size, flags, name, NULL);
81819f0fc   Christoph Lameter   SLUB core
3683

5b95a4acf   Christoph Lameter   SLUB: use list_fo...
3684
  	list_for_each_entry(s, &slab_caches, list) {
81819f0fc   Christoph Lameter   SLUB core
3685
3686
3687
3688
3689
  		if (slab_unmergeable(s))
  			continue;
  
  		if (size > s->size)
  			continue;
ba0268a8b   Christoph Lameter   SLUB: accurately ...
3690
  		if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
81819f0fc   Christoph Lameter   SLUB core
3691
3692
3693
3694
3695
  				continue;
  		/*
  		 * Check if alignment is compatible.
  		 * Courtesy of Adrian Drzewiecki
  		 */
064287807   Pekka Enberg   SLUB: Fix coding ...
3696
  		if ((s->size & ~(align - 1)) != s->size)
81819f0fc   Christoph Lameter   SLUB core
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
  			continue;
  
  		if (s->size - size >= sizeof(void *))
  			continue;
  
  		return s;
  	}
  	return NULL;
  }
  
  struct kmem_cache *kmem_cache_create(const char *name, size_t size,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
3708
  		size_t align, unsigned long flags, void (*ctor)(void *))
81819f0fc   Christoph Lameter   SLUB core
3709
3710
  {
  	struct kmem_cache *s;
84c1cf624   Pekka Enberg   SLUB: Fix merged ...
3711
  	char *n;
81819f0fc   Christoph Lameter   SLUB core
3712

fe1ff49d0   Benjamin Herrenschmidt   mm: kmem_cache_cr...
3713
3714
  	if (WARN_ON(!name))
  		return NULL;
81819f0fc   Christoph Lameter   SLUB core
3715
  	down_write(&slub_lock);
ba0268a8b   Christoph Lameter   SLUB: accurately ...
3716
  	s = find_mergeable(size, align, flags, name, ctor);
81819f0fc   Christoph Lameter   SLUB core
3717
3718
3719
3720
3721
3722
3723
3724
  	if (s) {
  		s->refcount++;
  		/*
  		 * Adjust the object sizes so that we clear
  		 * the complete object on kzalloc.
  		 */
  		s->objsize = max(s->objsize, (int)size);
  		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
6446faa2f   Christoph Lameter   slub: Fix up comm...
3725

7b8f3b66d   David Rientjes   slub: avoid leaki...
3726
  		if (sysfs_slab_alias(s, name)) {
7b8f3b66d   David Rientjes   slub: avoid leaki...
3727
  			s->refcount--;
81819f0fc   Christoph Lameter   SLUB core
3728
  			goto err;
7b8f3b66d   David Rientjes   slub: avoid leaki...
3729
  		}
2bce64858   Christoph Lameter   slub: Allow remov...
3730
  		up_write(&slub_lock);
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
3731
3732
  		return s;
  	}
6446faa2f   Christoph Lameter   slub: Fix up comm...
3733

84c1cf624   Pekka Enberg   SLUB: Fix merged ...
3734
3735
3736
  	n = kstrdup(name, GFP_KERNEL);
  	if (!n)
  		goto err;
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
3737
3738
  	s = kmalloc(kmem_size, GFP_KERNEL);
  	if (s) {
84c1cf624   Pekka Enberg   SLUB: Fix merged ...
3739
  		if (kmem_cache_open(s, n,
c59def9f2   Christoph Lameter   Slab allocators: ...
3740
  				size, align, flags, ctor)) {
81819f0fc   Christoph Lameter   SLUB core
3741
  			list_add(&s->list, &slab_caches);
7b8f3b66d   David Rientjes   slub: avoid leaki...
3742
  			if (sysfs_slab_add(s)) {
7b8f3b66d   David Rientjes   slub: avoid leaki...
3743
  				list_del(&s->list);
84c1cf624   Pekka Enberg   SLUB: Fix merged ...
3744
  				kfree(n);
7b8f3b66d   David Rientjes   slub: avoid leaki...
3745
  				kfree(s);
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
3746
  				goto err;
7b8f3b66d   David Rientjes   slub: avoid leaki...
3747
  			}
2bce64858   Christoph Lameter   slub: Allow remov...
3748
  			up_write(&slub_lock);
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
3749
3750
  			return s;
  		}
84c1cf624   Pekka Enberg   SLUB: Fix merged ...
3751
  		kfree(n);
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
3752
  		kfree(s);
81819f0fc   Christoph Lameter   SLUB core
3753
  	}
68cee4f11   Pavel Emelyanov   slub: Fix slub_lo...
3754
  err:
81819f0fc   Christoph Lameter   SLUB core
3755
  	up_write(&slub_lock);
81819f0fc   Christoph Lameter   SLUB core
3756

81819f0fc   Christoph Lameter   SLUB core
3757
3758
3759
3760
3761
3762
3763
3764
  	if (flags & SLAB_PANIC)
  		panic("Cannot create slabcache %s
  ", name);
  	else
  		s = NULL;
  	return s;
  }
  EXPORT_SYMBOL(kmem_cache_create);
81819f0fc   Christoph Lameter   SLUB core
3765
  #ifdef CONFIG_SMP
27390bc33   Christoph Lameter   SLUB: fix locking...
3766
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
3767
3768
   * Use the cpu notifier to insure that the cpu slabs are flushed when
   * necessary.
81819f0fc   Christoph Lameter   SLUB core
3769
3770
3771
3772
3773
   */
  static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
  		unsigned long action, void *hcpu)
  {
  	long cpu = (long)hcpu;
5b95a4acf   Christoph Lameter   SLUB: use list_fo...
3774
3775
  	struct kmem_cache *s;
  	unsigned long flags;
81819f0fc   Christoph Lameter   SLUB core
3776
3777
3778
  
  	switch (action) {
  	case CPU_UP_CANCELED:
8bb784428   Rafael J. Wysocki   Add suspend-relat...
3779
  	case CPU_UP_CANCELED_FROZEN:
81819f0fc   Christoph Lameter   SLUB core
3780
  	case CPU_DEAD:
8bb784428   Rafael J. Wysocki   Add suspend-relat...
3781
  	case CPU_DEAD_FROZEN:
5b95a4acf   Christoph Lameter   SLUB: use list_fo...
3782
3783
3784
3785
3786
3787
3788
  		down_read(&slub_lock);
  		list_for_each_entry(s, &slab_caches, list) {
  			local_irq_save(flags);
  			__flush_cpu_slab(s, cpu);
  			local_irq_restore(flags);
  		}
  		up_read(&slub_lock);
81819f0fc   Christoph Lameter   SLUB core
3789
3790
3791
3792
3793
3794
  		break;
  	default:
  		break;
  	}
  	return NOTIFY_OK;
  }
064287807   Pekka Enberg   SLUB: Fix coding ...
3795
  static struct notifier_block __cpuinitdata slab_notifier = {
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
3796
  	.notifier_call = slab_cpuup_callback
064287807   Pekka Enberg   SLUB: Fix coding ...
3797
  };
81819f0fc   Christoph Lameter   SLUB core
3798
3799
  
  #endif
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
3800
  void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
81819f0fc   Christoph Lameter   SLUB core
3801
  {
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3802
  	struct kmem_cache *s;
94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3803
  	void *ret;
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3804

ffadd4d0f   Christoph Lameter   SLUB: Introduce a...
3805
  	if (unlikely(size > SLUB_MAX_SIZE))
eada35efc   Pekka Enberg   slub: kmalloc pag...
3806
  		return kmalloc_large(size, gfpflags);
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3807
  	s = get_slab(size, gfpflags);
81819f0fc   Christoph Lameter   SLUB core
3808

2408c5503   Satyam Sharma   {slub, slob}: use...
3809
  	if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f9132   Christoph Lameter   Slab allocators: ...
3810
  		return s;
81819f0fc   Christoph Lameter   SLUB core
3811

2154a3363   Christoph Lameter   slub: Use a const...
3812
  	ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller);
94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3813

25985edce   Lucas De Marchi   Fix common misspe...
3814
  	/* Honor the call site pointer we received. */
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
3815
  	trace_kmalloc(caller, ret, size, s->size, gfpflags);
94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3816
3817
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
3818
  }
5d1f57e4d   Namhyung Kim   slub: Move NUMA-r...
3819
  #ifdef CONFIG_NUMA
81819f0fc   Christoph Lameter   SLUB core
3820
  void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
3821
  					int node, unsigned long caller)
81819f0fc   Christoph Lameter   SLUB core
3822
  {
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3823
  	struct kmem_cache *s;
94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3824
  	void *ret;
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3825

d3e14aa33   Xiaotian Feng   slub: __kmalloc_n...
3826
3827
3828
3829
3830
3831
3832
3833
3834
  	if (unlikely(size > SLUB_MAX_SIZE)) {
  		ret = kmalloc_large_node(size, gfpflags, node);
  
  		trace_kmalloc_node(caller, ret,
  				   size, PAGE_SIZE << get_order(size),
  				   gfpflags, node);
  
  		return ret;
  	}
eada35efc   Pekka Enberg   slub: kmalloc pag...
3835

aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3836
  	s = get_slab(size, gfpflags);
81819f0fc   Christoph Lameter   SLUB core
3837

2408c5503   Satyam Sharma   {slub, slob}: use...
3838
  	if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f9132   Christoph Lameter   Slab allocators: ...
3839
  		return s;
81819f0fc   Christoph Lameter   SLUB core
3840

94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3841
  	ret = slab_alloc(s, gfpflags, node, caller);
25985edce   Lucas De Marchi   Fix common misspe...
3842
  	/* Honor the call site pointer we received. */
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
3843
  	trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3844
3845
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
3846
  }
5d1f57e4d   Namhyung Kim   slub: Move NUMA-r...
3847
  #endif
81819f0fc   Christoph Lameter   SLUB core
3848

ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
3849
  #ifdef CONFIG_SYSFS
205ab99dd   Christoph Lameter   slub: Update stat...
3850
3851
3852
3853
3854
3855
3856
3857
3858
  static int count_inuse(struct page *page)
  {
  	return page->inuse;
  }
  
  static int count_total(struct page *page)
  {
  	return page->objects;
  }
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
3859
  #endif
205ab99dd   Christoph Lameter   slub: Update stat...
3860

ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
3861
  #ifdef CONFIG_SLUB_DEBUG
434e245dd   Christoph Lameter   SLUB: Do not allo...
3862
3863
  static int validate_slab(struct kmem_cache *s, struct page *page,
  						unsigned long *map)
53e15af03   Christoph Lameter   slub: validation ...
3864
3865
  {
  	void *p;
a973e9dd1   Christoph Lameter   Revert "unique en...
3866
  	void *addr = page_address(page);
53e15af03   Christoph Lameter   slub: validation ...
3867
3868
3869
3870
3871
3872
  
  	if (!check_slab(s, page) ||
  			!on_freelist(s, page, NULL))
  		return 0;
  
  	/* Now we know that a valid freelist exists */
39b264641   Christoph Lameter   slub: Store max n...
3873
  	bitmap_zero(map, page->objects);
53e15af03   Christoph Lameter   slub: validation ...
3874

5f80b13ae   Christoph Lameter   slub: get_map() f...
3875
3876
3877
3878
3879
  	get_map(s, page, map);
  	for_each_object(p, s, addr, page->objects) {
  		if (test_bit(slab_index(p, s, addr), map))
  			if (!check_object(s, page, p, SLUB_RED_INACTIVE))
  				return 0;
53e15af03   Christoph Lameter   slub: validation ...
3880
  	}
224a88be4   Christoph Lameter   slub: for_each_ob...
3881
  	for_each_object(p, s, addr, page->objects)
7656c72b5   Christoph Lameter   SLUB: add macros ...
3882
  		if (!test_bit(slab_index(p, s, addr), map))
37d57443d   Tero Roponen   slub: Fix a crash...
3883
  			if (!check_object(s, page, p, SLUB_RED_ACTIVE))
53e15af03   Christoph Lameter   slub: validation ...
3884
3885
3886
  				return 0;
  	return 1;
  }
434e245dd   Christoph Lameter   SLUB: Do not allo...
3887
3888
  static void validate_slab_slab(struct kmem_cache *s, struct page *page,
  						unsigned long *map)
53e15af03   Christoph Lameter   slub: validation ...
3889
  {
881db7fb0   Christoph Lameter   slub: Invert lock...
3890
3891
3892
  	slab_lock(page);
  	validate_slab(s, page, map);
  	slab_unlock(page);
53e15af03   Christoph Lameter   slub: validation ...
3893
  }
434e245dd   Christoph Lameter   SLUB: Do not allo...
3894
3895
  static int validate_slab_node(struct kmem_cache *s,
  		struct kmem_cache_node *n, unsigned long *map)
53e15af03   Christoph Lameter   slub: validation ...
3896
3897
3898
3899
3900
3901
3902
3903
  {
  	unsigned long count = 0;
  	struct page *page;
  	unsigned long flags;
  
  	spin_lock_irqsave(&n->list_lock, flags);
  
  	list_for_each_entry(page, &n->partial, lru) {
434e245dd   Christoph Lameter   SLUB: Do not allo...
3904
  		validate_slab_slab(s, page, map);
53e15af03   Christoph Lameter   slub: validation ...
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
  		count++;
  	}
  	if (count != n->nr_partial)
  		printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
  			"counter=%ld
  ", s->name, count, n->nr_partial);
  
  	if (!(s->flags & SLAB_STORE_USER))
  		goto out;
  
  	list_for_each_entry(page, &n->full, lru) {
434e245dd   Christoph Lameter   SLUB: Do not allo...
3916
  		validate_slab_slab(s, page, map);
53e15af03   Christoph Lameter   slub: validation ...
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
  		count++;
  	}
  	if (count != atomic_long_read(&n->nr_slabs))
  		printk(KERN_ERR "SLUB: %s %ld slabs counted but "
  			"counter=%ld
  ", s->name, count,
  			atomic_long_read(&n->nr_slabs));
  
  out:
  	spin_unlock_irqrestore(&n->list_lock, flags);
  	return count;
  }
434e245dd   Christoph Lameter   SLUB: Do not allo...
3929
  static long validate_slab_cache(struct kmem_cache *s)
53e15af03   Christoph Lameter   slub: validation ...
3930
3931
3932
  {
  	int node;
  	unsigned long count = 0;
205ab99dd   Christoph Lameter   slub: Update stat...
3933
  	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
434e245dd   Christoph Lameter   SLUB: Do not allo...
3934
3935
3936
3937
  				sizeof(unsigned long), GFP_KERNEL);
  
  	if (!map)
  		return -ENOMEM;
53e15af03   Christoph Lameter   slub: validation ...
3938
3939
  
  	flush_all(s);
f64dc58c5   Christoph Lameter   Memoryless nodes:...
3940
  	for_each_node_state(node, N_NORMAL_MEMORY) {
53e15af03   Christoph Lameter   slub: validation ...
3941
  		struct kmem_cache_node *n = get_node(s, node);
434e245dd   Christoph Lameter   SLUB: Do not allo...
3942
  		count += validate_slab_node(s, n, map);
53e15af03   Christoph Lameter   slub: validation ...
3943
  	}
434e245dd   Christoph Lameter   SLUB: Do not allo...
3944
  	kfree(map);
53e15af03   Christoph Lameter   slub: validation ...
3945
3946
  	return count;
  }
88a420e4e   Christoph Lameter   slub: add ability...
3947
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
3948
   * Generate lists of code addresses where slabcache objects are allocated
88a420e4e   Christoph Lameter   slub: add ability...
3949
3950
3951
3952
3953
   * and freed.
   */
  
  struct location {
  	unsigned long count;
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
3954
  	unsigned long addr;
45edfa580   Christoph Lameter   SLUB: include lif...
3955
3956
3957
3958
3959
  	long long sum_time;
  	long min_time;
  	long max_time;
  	long min_pid;
  	long max_pid;
174596a0b   Rusty Russell   cpumask: convert mm/
3960
  	DECLARE_BITMAP(cpus, NR_CPUS);
45edfa580   Christoph Lameter   SLUB: include lif...
3961
  	nodemask_t nodes;
88a420e4e   Christoph Lameter   slub: add ability...
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
  };
  
  struct loc_track {
  	unsigned long max;
  	unsigned long count;
  	struct location *loc;
  };
  
  static void free_loc_track(struct loc_track *t)
  {
  	if (t->max)
  		free_pages((unsigned long)t->loc,
  			get_order(sizeof(struct location) * t->max));
  }
68dff6a9a   Christoph Lameter   SLUB slab validat...
3976
  static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
88a420e4e   Christoph Lameter   slub: add ability...
3977
3978
3979
  {
  	struct location *l;
  	int order;
88a420e4e   Christoph Lameter   slub: add ability...
3980
  	order = get_order(sizeof(struct location) * max);
68dff6a9a   Christoph Lameter   SLUB slab validat...
3981
  	l = (void *)__get_free_pages(flags, order);
88a420e4e   Christoph Lameter   slub: add ability...
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
  	if (!l)
  		return 0;
  
  	if (t->count) {
  		memcpy(l, t->loc, sizeof(struct location) * t->count);
  		free_loc_track(t);
  	}
  	t->max = max;
  	t->loc = l;
  	return 1;
  }
  
  static int add_location(struct loc_track *t, struct kmem_cache *s,
45edfa580   Christoph Lameter   SLUB: include lif...
3995
  				const struct track *track)
88a420e4e   Christoph Lameter   slub: add ability...
3996
3997
3998
  {
  	long start, end, pos;
  	struct location *l;
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
3999
  	unsigned long caddr;
45edfa580   Christoph Lameter   SLUB: include lif...
4000
  	unsigned long age = jiffies - track->when;
88a420e4e   Christoph Lameter   slub: add ability...
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014
4015
  
  	start = -1;
  	end = t->count;
  
  	for ( ; ; ) {
  		pos = start + (end - start + 1) / 2;
  
  		/*
  		 * There is nothing at "end". If we end up there
  		 * we need to add something to before end.
  		 */
  		if (pos == end)
  			break;
  
  		caddr = t->loc[pos].addr;
45edfa580   Christoph Lameter   SLUB: include lif...
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
  		if (track->addr == caddr) {
  
  			l = &t->loc[pos];
  			l->count++;
  			if (track->when) {
  				l->sum_time += age;
  				if (age < l->min_time)
  					l->min_time = age;
  				if (age > l->max_time)
  					l->max_time = age;
  
  				if (track->pid < l->min_pid)
  					l->min_pid = track->pid;
  				if (track->pid > l->max_pid)
  					l->max_pid = track->pid;
174596a0b   Rusty Russell   cpumask: convert mm/
4031
4032
  				cpumask_set_cpu(track->cpu,
  						to_cpumask(l->cpus));
45edfa580   Christoph Lameter   SLUB: include lif...
4033
4034
  			}
  			node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4e   Christoph Lameter   slub: add ability...
4035
4036
  			return 1;
  		}
45edfa580   Christoph Lameter   SLUB: include lif...
4037
  		if (track->addr < caddr)
88a420e4e   Christoph Lameter   slub: add ability...
4038
4039
4040
4041
4042
4043
  			end = pos;
  		else
  			start = pos;
  	}
  
  	/*
672bba3a4   Christoph Lameter   SLUB: update comm...
4044
  	 * Not found. Insert new tracking element.
88a420e4e   Christoph Lameter   slub: add ability...
4045
  	 */
68dff6a9a   Christoph Lameter   SLUB slab validat...
4046
  	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
88a420e4e   Christoph Lameter   slub: add ability...
4047
4048
4049
4050
4051
4052
4053
4054
  		return 0;
  
  	l = t->loc + pos;
  	if (pos < t->count)
  		memmove(l + 1, l,
  			(t->count - pos) * sizeof(struct location));
  	t->count++;
  	l->count = 1;
45edfa580   Christoph Lameter   SLUB: include lif...
4055
4056
4057
4058
4059
4060
  	l->addr = track->addr;
  	l->sum_time = age;
  	l->min_time = age;
  	l->max_time = age;
  	l->min_pid = track->pid;
  	l->max_pid = track->pid;
174596a0b   Rusty Russell   cpumask: convert mm/
4061
4062
  	cpumask_clear(to_cpumask(l->cpus));
  	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
45edfa580   Christoph Lameter   SLUB: include lif...
4063
4064
  	nodes_clear(l->nodes);
  	node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4e   Christoph Lameter   slub: add ability...
4065
4066
4067
4068
  	return 1;
  }
  
  static void process_slab(struct loc_track *t, struct kmem_cache *s,
bbd7d57bf   Eric Dumazet   slub: Potential s...
4069
  		struct page *page, enum track_item alloc,
a5dd5c117   Namhyung Kim   slub: Fix signedn...
4070
  		unsigned long *map)
88a420e4e   Christoph Lameter   slub: add ability...
4071
  {
a973e9dd1   Christoph Lameter   Revert "unique en...
4072
  	void *addr = page_address(page);
88a420e4e   Christoph Lameter   slub: add ability...
4073
  	void *p;
39b264641   Christoph Lameter   slub: Store max n...
4074
  	bitmap_zero(map, page->objects);
5f80b13ae   Christoph Lameter   slub: get_map() f...
4075
  	get_map(s, page, map);
88a420e4e   Christoph Lameter   slub: add ability...
4076

224a88be4   Christoph Lameter   slub: for_each_ob...
4077
  	for_each_object(p, s, addr, page->objects)
45edfa580   Christoph Lameter   SLUB: include lif...
4078
4079
  		if (!test_bit(slab_index(p, s, addr), map))
  			add_location(t, s, get_track(s, p, alloc));
88a420e4e   Christoph Lameter   slub: add ability...
4080
4081
4082
4083
4084
  }
  
  static int list_locations(struct kmem_cache *s, char *buf,
  					enum track_item alloc)
  {
e374d4835   Harvey Harrison   slub: fix shadowe...
4085
  	int len = 0;
88a420e4e   Christoph Lameter   slub: add ability...
4086
  	unsigned long i;
68dff6a9a   Christoph Lameter   SLUB slab validat...
4087
  	struct loc_track t = { 0, 0, NULL };
88a420e4e   Christoph Lameter   slub: add ability...
4088
  	int node;
bbd7d57bf   Eric Dumazet   slub: Potential s...
4089
4090
  	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
  				     sizeof(unsigned long), GFP_KERNEL);
88a420e4e   Christoph Lameter   slub: add ability...
4091

bbd7d57bf   Eric Dumazet   slub: Potential s...
4092
4093
4094
  	if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
  				     GFP_TEMPORARY)) {
  		kfree(map);
68dff6a9a   Christoph Lameter   SLUB slab validat...
4095
4096
  		return sprintf(buf, "Out of memory
  ");
bbd7d57bf   Eric Dumazet   slub: Potential s...
4097
  	}
88a420e4e   Christoph Lameter   slub: add ability...
4098
4099
  	/* Push back cpu slabs */
  	flush_all(s);
f64dc58c5   Christoph Lameter   Memoryless nodes:...
4100
  	for_each_node_state(node, N_NORMAL_MEMORY) {
88a420e4e   Christoph Lameter   slub: add ability...
4101
4102
4103
  		struct kmem_cache_node *n = get_node(s, node);
  		unsigned long flags;
  		struct page *page;
9e86943b6   Christoph Lameter   SLUB: use atomic_...
4104
  		if (!atomic_long_read(&n->nr_slabs))
88a420e4e   Christoph Lameter   slub: add ability...
4105
4106
4107
4108
  			continue;
  
  		spin_lock_irqsave(&n->list_lock, flags);
  		list_for_each_entry(page, &n->partial, lru)
bbd7d57bf   Eric Dumazet   slub: Potential s...
4109
  			process_slab(&t, s, page, alloc, map);
88a420e4e   Christoph Lameter   slub: add ability...
4110
  		list_for_each_entry(page, &n->full, lru)
bbd7d57bf   Eric Dumazet   slub: Potential s...
4111
  			process_slab(&t, s, page, alloc, map);
88a420e4e   Christoph Lameter   slub: add ability...
4112
4113
4114
4115
  		spin_unlock_irqrestore(&n->list_lock, flags);
  	}
  
  	for (i = 0; i < t.count; i++) {
45edfa580   Christoph Lameter   SLUB: include lif...
4116
  		struct location *l = &t.loc[i];
88a420e4e   Christoph Lameter   slub: add ability...
4117

9c2462472   Hugh Dickins   KSYM_SYMBOL_LEN f...
4118
  		if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
88a420e4e   Christoph Lameter   slub: add ability...
4119
  			break;
e374d4835   Harvey Harrison   slub: fix shadowe...
4120
  		len += sprintf(buf + len, "%7ld ", l->count);
45edfa580   Christoph Lameter   SLUB: include lif...
4121
4122
  
  		if (l->addr)
62c70bce8   Joe Perches   mm: convert sprin...
4123
  			len += sprintf(buf + len, "%pS", (void *)l->addr);
88a420e4e   Christoph Lameter   slub: add ability...
4124
  		else
e374d4835   Harvey Harrison   slub: fix shadowe...
4125
  			len += sprintf(buf + len, "<not-available>");
45edfa580   Christoph Lameter   SLUB: include lif...
4126
4127
  
  		if (l->sum_time != l->min_time) {
e374d4835   Harvey Harrison   slub: fix shadowe...
4128
  			len += sprintf(buf + len, " age=%ld/%ld/%ld",
f8bd2258e   Roman Zippel   remove div_long_l...
4129
4130
4131
  				l->min_time,
  				(long)div_u64(l->sum_time, l->count),
  				l->max_time);
45edfa580   Christoph Lameter   SLUB: include lif...
4132
  		} else
e374d4835   Harvey Harrison   slub: fix shadowe...
4133
  			len += sprintf(buf + len, " age=%ld",
45edfa580   Christoph Lameter   SLUB: include lif...
4134
4135
4136
  				l->min_time);
  
  		if (l->min_pid != l->max_pid)
e374d4835   Harvey Harrison   slub: fix shadowe...
4137
  			len += sprintf(buf + len, " pid=%ld-%ld",
45edfa580   Christoph Lameter   SLUB: include lif...
4138
4139
  				l->min_pid, l->max_pid);
  		else
e374d4835   Harvey Harrison   slub: fix shadowe...
4140
  			len += sprintf(buf + len, " pid=%ld",
45edfa580   Christoph Lameter   SLUB: include lif...
4141
  				l->min_pid);
174596a0b   Rusty Russell   cpumask: convert mm/
4142
4143
  		if (num_online_cpus() > 1 &&
  				!cpumask_empty(to_cpumask(l->cpus)) &&
e374d4835   Harvey Harrison   slub: fix shadowe...
4144
4145
4146
  				len < PAGE_SIZE - 60) {
  			len += sprintf(buf + len, " cpus=");
  			len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
174596a0b   Rusty Russell   cpumask: convert mm/
4147
  						 to_cpumask(l->cpus));
45edfa580   Christoph Lameter   SLUB: include lif...
4148
  		}
62bc62a87   Christoph Lameter   page allocator: u...
4149
  		if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
e374d4835   Harvey Harrison   slub: fix shadowe...
4150
4151
4152
  				len < PAGE_SIZE - 60) {
  			len += sprintf(buf + len, " nodes=");
  			len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
45edfa580   Christoph Lameter   SLUB: include lif...
4153
4154
  					l->nodes);
  		}
e374d4835   Harvey Harrison   slub: fix shadowe...
4155
4156
  		len += sprintf(buf + len, "
  ");
88a420e4e   Christoph Lameter   slub: add ability...
4157
4158
4159
  	}
  
  	free_loc_track(&t);
bbd7d57bf   Eric Dumazet   slub: Potential s...
4160
  	kfree(map);
88a420e4e   Christoph Lameter   slub: add ability...
4161
  	if (!t.count)
e374d4835   Harvey Harrison   slub: fix shadowe...
4162
4163
4164
  		len += sprintf(buf, "No data
  ");
  	return len;
88a420e4e   Christoph Lameter   slub: add ability...
4165
  }
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4166
  #endif
88a420e4e   Christoph Lameter   slub: add ability...
4167

a5a84755c   Christoph Lameter   slub: Move functi...
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252
  #ifdef SLUB_RESILIENCY_TEST
  static void resiliency_test(void)
  {
  	u8 *p;
  
  	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10);
  
  	printk(KERN_ERR "SLUB resiliency testing
  ");
  	printk(KERN_ERR "-----------------------
  ");
  	printk(KERN_ERR "A. Corruption after allocation
  ");
  
  	p = kzalloc(16, GFP_KERNEL);
  	p[16] = 0x12;
  	printk(KERN_ERR "
  1. kmalloc-16: Clobber Redzone/next pointer"
  			" 0x12->0x%p
  
  ", p + 16);
  
  	validate_slab_cache(kmalloc_caches[4]);
  
  	/* Hmmm... The next two are dangerous */
  	p = kzalloc(32, GFP_KERNEL);
  	p[32 + sizeof(void *)] = 0x34;
  	printk(KERN_ERR "
  2. kmalloc-32: Clobber next pointer/next slab"
  			" 0x34 -> -0x%p
  ", p);
  	printk(KERN_ERR
  		"If allocated object is overwritten then not detectable
  
  ");
  
  	validate_slab_cache(kmalloc_caches[5]);
  	p = kzalloc(64, GFP_KERNEL);
  	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
  	*p = 0x56;
  	printk(KERN_ERR "
  3. kmalloc-64: corrupting random byte 0x56->0x%p
  ",
  									p);
  	printk(KERN_ERR
  		"If allocated object is overwritten then not detectable
  
  ");
  	validate_slab_cache(kmalloc_caches[6]);
  
  	printk(KERN_ERR "
  B. Corruption after free
  ");
  	p = kzalloc(128, GFP_KERNEL);
  	kfree(p);
  	*p = 0x78;
  	printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p
  
  ", p);
  	validate_slab_cache(kmalloc_caches[7]);
  
  	p = kzalloc(256, GFP_KERNEL);
  	kfree(p);
  	p[50] = 0x9a;
  	printk(KERN_ERR "
  2. kmalloc-256: Clobber 50th byte 0x9a->0x%p
  
  ",
  			p);
  	validate_slab_cache(kmalloc_caches[8]);
  
  	p = kzalloc(512, GFP_KERNEL);
  	kfree(p);
  	p[512] = 0xab;
  	printk(KERN_ERR "
  3. kmalloc-512: Clobber redzone 0xab->0x%p
  
  ", p);
  	validate_slab_cache(kmalloc_caches[9]);
  }
  #else
  #ifdef CONFIG_SYSFS
  static void resiliency_test(void) {};
  #endif
  #endif
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4253
  #ifdef CONFIG_SYSFS
81819f0fc   Christoph Lameter   SLUB core
4254
  enum slab_stat_type {
205ab99dd   Christoph Lameter   slub: Update stat...
4255
4256
4257
4258
4259
  	SL_ALL,			/* All slabs */
  	SL_PARTIAL,		/* Only partially allocated slabs */
  	SL_CPU,			/* Only slabs used for cpu caches */
  	SL_OBJECTS,		/* Determine allocated objects not slabs */
  	SL_TOTAL		/* Determine object capacity not slabs */
81819f0fc   Christoph Lameter   SLUB core
4260
  };
205ab99dd   Christoph Lameter   slub: Update stat...
4261
  #define SO_ALL		(1 << SL_ALL)
81819f0fc   Christoph Lameter   SLUB core
4262
4263
4264
  #define SO_PARTIAL	(1 << SL_PARTIAL)
  #define SO_CPU		(1 << SL_CPU)
  #define SO_OBJECTS	(1 << SL_OBJECTS)
205ab99dd   Christoph Lameter   slub: Update stat...
4265
  #define SO_TOTAL	(1 << SL_TOTAL)
81819f0fc   Christoph Lameter   SLUB core
4266

62e5c4b4d   Cyrill Gorcunov   slub: fix possibl...
4267
4268
  static ssize_t show_slab_objects(struct kmem_cache *s,
  			    char *buf, unsigned long flags)
81819f0fc   Christoph Lameter   SLUB core
4269
4270
  {
  	unsigned long total = 0;
81819f0fc   Christoph Lameter   SLUB core
4271
4272
4273
4274
4275
4276
  	int node;
  	int x;
  	unsigned long *nodes;
  	unsigned long *per_cpu;
  
  	nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
62e5c4b4d   Cyrill Gorcunov   slub: fix possibl...
4277
4278
  	if (!nodes)
  		return -ENOMEM;
81819f0fc   Christoph Lameter   SLUB core
4279
  	per_cpu = nodes + nr_node_ids;
205ab99dd   Christoph Lameter   slub: Update stat...
4280
4281
  	if (flags & SO_CPU) {
  		int cpu;
81819f0fc   Christoph Lameter   SLUB core
4282

205ab99dd   Christoph Lameter   slub: Update stat...
4283
  		for_each_possible_cpu(cpu) {
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
4284
  			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
49e225858   Christoph Lameter   slub: per cpu cac...
4285
  			struct page *page;
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
4286

205ab99dd   Christoph Lameter   slub: Update stat...
4287
4288
4289
4290
4291
4292
4293
4294
  			if (!c || c->node < 0)
  				continue;
  
  			if (c->page) {
  					if (flags & SO_TOTAL)
  						x = c->page->objects;
  				else if (flags & SO_OBJECTS)
  					x = c->page->inuse;
81819f0fc   Christoph Lameter   SLUB core
4295
4296
  				else
  					x = 1;
205ab99dd   Christoph Lameter   slub: Update stat...
4297

81819f0fc   Christoph Lameter   SLUB core
4298
  				total += x;
205ab99dd   Christoph Lameter   slub: Update stat...
4299
  				nodes[c->node] += x;
81819f0fc   Christoph Lameter   SLUB core
4300
  			}
49e225858   Christoph Lameter   slub: per cpu cac...
4301
4302
4303
4304
4305
4306
4307
  			page = c->partial;
  
  			if (page) {
  				x = page->pobjects;
                                  total += x;
                                  nodes[c->node] += x;
  			}
205ab99dd   Christoph Lameter   slub: Update stat...
4308
  			per_cpu[c->node]++;
81819f0fc   Christoph Lameter   SLUB core
4309
4310
  		}
  	}
04d94879c   Christoph Lameter   slub: Avoid use o...
4311
  	lock_memory_hotplug();
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4312
  #ifdef CONFIG_SLUB_DEBUG
205ab99dd   Christoph Lameter   slub: Update stat...
4313
4314
4315
4316
4317
4318
4319
4320
4321
  	if (flags & SO_ALL) {
  		for_each_node_state(node, N_NORMAL_MEMORY) {
  			struct kmem_cache_node *n = get_node(s, node);
  
  		if (flags & SO_TOTAL)
  			x = atomic_long_read(&n->total_objects);
  		else if (flags & SO_OBJECTS)
  			x = atomic_long_read(&n->total_objects) -
  				count_partial(n, count_free);
81819f0fc   Christoph Lameter   SLUB core
4322

81819f0fc   Christoph Lameter   SLUB core
4323
  			else
205ab99dd   Christoph Lameter   slub: Update stat...
4324
  				x = atomic_long_read(&n->nr_slabs);
81819f0fc   Christoph Lameter   SLUB core
4325
4326
4327
  			total += x;
  			nodes[node] += x;
  		}
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4328
4329
4330
  	} else
  #endif
  	if (flags & SO_PARTIAL) {
205ab99dd   Christoph Lameter   slub: Update stat...
4331
4332
  		for_each_node_state(node, N_NORMAL_MEMORY) {
  			struct kmem_cache_node *n = get_node(s, node);
81819f0fc   Christoph Lameter   SLUB core
4333

205ab99dd   Christoph Lameter   slub: Update stat...
4334
4335
4336
4337
  			if (flags & SO_TOTAL)
  				x = count_partial(n, count_total);
  			else if (flags & SO_OBJECTS)
  				x = count_partial(n, count_inuse);
81819f0fc   Christoph Lameter   SLUB core
4338
  			else
205ab99dd   Christoph Lameter   slub: Update stat...
4339
  				x = n->nr_partial;
81819f0fc   Christoph Lameter   SLUB core
4340
4341
4342
4343
  			total += x;
  			nodes[node] += x;
  		}
  	}
81819f0fc   Christoph Lameter   SLUB core
4344
4345
  	x = sprintf(buf, "%lu", total);
  #ifdef CONFIG_NUMA
f64dc58c5   Christoph Lameter   Memoryless nodes:...
4346
  	for_each_node_state(node, N_NORMAL_MEMORY)
81819f0fc   Christoph Lameter   SLUB core
4347
4348
4349
4350
  		if (nodes[node])
  			x += sprintf(buf + x, " N%d=%lu",
  					node, nodes[node]);
  #endif
04d94879c   Christoph Lameter   slub: Avoid use o...
4351
  	unlock_memory_hotplug();
81819f0fc   Christoph Lameter   SLUB core
4352
4353
4354
4355
  	kfree(nodes);
  	return x + sprintf(buf + x, "
  ");
  }
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4356
  #ifdef CONFIG_SLUB_DEBUG
81819f0fc   Christoph Lameter   SLUB core
4357
4358
4359
  static int any_slab_objects(struct kmem_cache *s)
  {
  	int node;
81819f0fc   Christoph Lameter   SLUB core
4360

dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
4361
  	for_each_online_node(node) {
81819f0fc   Christoph Lameter   SLUB core
4362
  		struct kmem_cache_node *n = get_node(s, node);
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
4363
4364
  		if (!n)
  			continue;
4ea33e2dc   Benjamin Herrenschmidt   slub: fix atomic ...
4365
  		if (atomic_long_read(&n->total_objects))
81819f0fc   Christoph Lameter   SLUB core
4366
4367
4368
4369
  			return 1;
  	}
  	return 0;
  }
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4370
  #endif
81819f0fc   Christoph Lameter   SLUB core
4371
4372
  
  #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
497888cf6   Phil Carmody   treewide: fix pot...
4373
  #define to_slab(n) container_of(n, struct kmem_cache, kobj)
81819f0fc   Christoph Lameter   SLUB core
4374
4375
4376
4377
4378
4379
4380
4381
  
  struct slab_attribute {
  	struct attribute attr;
  	ssize_t (*show)(struct kmem_cache *s, char *buf);
  	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
  };
  
  #define SLAB_ATTR_RO(_name) \
ab067e99d   Vasiliy Kulikov   mm: restrict acce...
4382
4383
  	static struct slab_attribute _name##_attr = \
  	__ATTR(_name, 0400, _name##_show, NULL)
81819f0fc   Christoph Lameter   SLUB core
4384
4385
4386
  
  #define SLAB_ATTR(_name) \
  	static struct slab_attribute _name##_attr =  \
ab067e99d   Vasiliy Kulikov   mm: restrict acce...
4387
  	__ATTR(_name, 0600, _name##_show, _name##_store)
81819f0fc   Christoph Lameter   SLUB core
4388

81819f0fc   Christoph Lameter   SLUB core
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
  static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", s->size);
  }
  SLAB_ATTR_RO(slab_size);
  
  static ssize_t align_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", s->align);
  }
  SLAB_ATTR_RO(align);
  
  static ssize_t object_size_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", s->objsize);
  }
  SLAB_ATTR_RO(object_size);
  
  static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
  {
834f3d119   Christoph Lameter   slub: Add kmem_ca...
4412
4413
  	return sprintf(buf, "%d
  ", oo_objects(s->oo));
81819f0fc   Christoph Lameter   SLUB core
4414
4415
  }
  SLAB_ATTR_RO(objs_per_slab);
06b285dc3   Christoph Lameter   slub: Make the or...
4416
4417
4418
  static ssize_t order_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
0121c619d   Christoph Lameter   slub: Whitespace ...
4419
4420
4421
4422
4423
4424
  	unsigned long order;
  	int err;
  
  	err = strict_strtoul(buf, 10, &order);
  	if (err)
  		return err;
06b285dc3   Christoph Lameter   slub: Make the or...
4425
4426
4427
4428
4429
4430
4431
  
  	if (order > slub_max_order || order < slub_min_order)
  		return -EINVAL;
  
  	calculate_sizes(s, order);
  	return length;
  }
81819f0fc   Christoph Lameter   SLUB core
4432
4433
  static ssize_t order_show(struct kmem_cache *s, char *buf)
  {
834f3d119   Christoph Lameter   slub: Add kmem_ca...
4434
4435
  	return sprintf(buf, "%d
  ", oo_order(s->oo));
81819f0fc   Christoph Lameter   SLUB core
4436
  }
06b285dc3   Christoph Lameter   slub: Make the or...
4437
  SLAB_ATTR(order);
81819f0fc   Christoph Lameter   SLUB core
4438

73d342b16   David Rientjes   slub: add min_par...
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
  static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%lu
  ", s->min_partial);
  }
  
  static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
  				 size_t length)
  {
  	unsigned long min;
  	int err;
  
  	err = strict_strtoul(buf, 10, &min);
  	if (err)
  		return err;
c0bdb232b   David Rientjes   slub: rename calc...
4454
  	set_min_partial(s, min);
73d342b16   David Rientjes   slub: add min_par...
4455
4456
4457
  	return length;
  }
  SLAB_ATTR(min_partial);
49e225858   Christoph Lameter   slub: per cpu cac...
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
  static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%u
  ", s->cpu_partial);
  }
  
  static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
  				 size_t length)
  {
  	unsigned long objects;
  	int err;
  
  	err = strict_strtoul(buf, 10, &objects);
  	if (err)
  		return err;
  
  	s->cpu_partial = objects;
  	flush_all(s);
  	return length;
  }
  SLAB_ATTR(cpu_partial);
81819f0fc   Christoph Lameter   SLUB core
4479
4480
  static ssize_t ctor_show(struct kmem_cache *s, char *buf)
  {
62c70bce8   Joe Perches   mm: convert sprin...
4481
4482
4483
4484
  	if (!s->ctor)
  		return 0;
  	return sprintf(buf, "%pS
  ", s->ctor);
81819f0fc   Christoph Lameter   SLUB core
4485
4486
  }
  SLAB_ATTR_RO(ctor);
81819f0fc   Christoph Lameter   SLUB core
4487
4488
4489
4490
4491
4492
  static ssize_t aliases_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", s->refcount - 1);
  }
  SLAB_ATTR_RO(aliases);
81819f0fc   Christoph Lameter   SLUB core
4493
4494
  static ssize_t partial_show(struct kmem_cache *s, char *buf)
  {
d9acf4b7b   Christoph Lameter   slub: rename slab...
4495
  	return show_slab_objects(s, buf, SO_PARTIAL);
81819f0fc   Christoph Lameter   SLUB core
4496
4497
4498
4499
4500
  }
  SLAB_ATTR_RO(partial);
  
  static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
  {
d9acf4b7b   Christoph Lameter   slub: rename slab...
4501
  	return show_slab_objects(s, buf, SO_CPU);
81819f0fc   Christoph Lameter   SLUB core
4502
4503
4504
4505
4506
  }
  SLAB_ATTR_RO(cpu_slabs);
  
  static ssize_t objects_show(struct kmem_cache *s, char *buf)
  {
205ab99dd   Christoph Lameter   slub: Update stat...
4507
  	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
81819f0fc   Christoph Lameter   SLUB core
4508
4509
  }
  SLAB_ATTR_RO(objects);
205ab99dd   Christoph Lameter   slub: Update stat...
4510
4511
4512
4513
4514
  static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
  {
  	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
  }
  SLAB_ATTR_RO(objects_partial);
49e225858   Christoph Lameter   slub: per cpu cac...
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
  static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
  {
  	int objects = 0;
  	int pages = 0;
  	int cpu;
  	int len;
  
  	for_each_online_cpu(cpu) {
  		struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
  
  		if (page) {
  			pages += page->pages;
  			objects += page->pobjects;
  		}
  	}
  
  	len = sprintf(buf, "%d(%d)", objects, pages);
  
  #ifdef CONFIG_SMP
  	for_each_online_cpu(cpu) {
  		struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
  
  		if (page && len < PAGE_SIZE - 20)
  			len += sprintf(buf + len, " C%d=%d(%d)", cpu,
  				page->pobjects, page->pages);
  	}
  #endif
  	return len + sprintf(buf + len, "
  ");
  }
  SLAB_ATTR_RO(slabs_cpu_partial);
a5a84755c   Christoph Lameter   slub: Move functi...
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
  static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
  }
  
  static ssize_t reclaim_account_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
  	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
  	if (buf[0] == '1')
  		s->flags |= SLAB_RECLAIM_ACCOUNT;
  	return length;
  }
  SLAB_ATTR(reclaim_account);
  
  static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_HWCACHE_ALIGN));
  }
  SLAB_ATTR_RO(hwcache_align);
  
  #ifdef CONFIG_ZONE_DMA
  static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_CACHE_DMA));
  }
  SLAB_ATTR_RO(cache_dma);
  #endif
  
  static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_DESTROY_BY_RCU));
  }
  SLAB_ATTR_RO(destroy_by_rcu);
ab9a0f196   Lai Jiangshan   slub: automatical...
4584
4585
4586
4587
4588
4589
  static ssize_t reserved_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", s->reserved);
  }
  SLAB_ATTR_RO(reserved);
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4590
  #ifdef CONFIG_SLUB_DEBUG
a5a84755c   Christoph Lameter   slub: Move functi...
4591
4592
4593
4594
4595
  static ssize_t slabs_show(struct kmem_cache *s, char *buf)
  {
  	return show_slab_objects(s, buf, SO_ALL);
  }
  SLAB_ATTR_RO(slabs);
205ab99dd   Christoph Lameter   slub: Update stat...
4596
4597
4598
4599
4600
  static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
  {
  	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
  }
  SLAB_ATTR_RO(total_objects);
81819f0fc   Christoph Lameter   SLUB core
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
  static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_DEBUG_FREE));
  }
  
  static ssize_t sanity_checks_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
  	s->flags &= ~SLAB_DEBUG_FREE;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4611
4612
  	if (buf[0] == '1') {
  		s->flags &= ~__CMPXCHG_DOUBLE;
81819f0fc   Christoph Lameter   SLUB core
4613
  		s->flags |= SLAB_DEBUG_FREE;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4614
  	}
81819f0fc   Christoph Lameter   SLUB core
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
  	return length;
  }
  SLAB_ATTR(sanity_checks);
  
  static ssize_t trace_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_TRACE));
  }
  
  static ssize_t trace_store(struct kmem_cache *s, const char *buf,
  							size_t length)
  {
  	s->flags &= ~SLAB_TRACE;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4629
4630
  	if (buf[0] == '1') {
  		s->flags &= ~__CMPXCHG_DOUBLE;
81819f0fc   Christoph Lameter   SLUB core
4631
  		s->flags |= SLAB_TRACE;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4632
  	}
81819f0fc   Christoph Lameter   SLUB core
4633
4634
4635
  	return length;
  }
  SLAB_ATTR(trace);
81819f0fc   Christoph Lameter   SLUB core
4636
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
  static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_RED_ZONE));
  }
  
  static ssize_t red_zone_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
  	if (any_slab_objects(s))
  		return -EBUSY;
  
  	s->flags &= ~SLAB_RED_ZONE;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4649
4650
  	if (buf[0] == '1') {
  		s->flags &= ~__CMPXCHG_DOUBLE;
81819f0fc   Christoph Lameter   SLUB core
4651
  		s->flags |= SLAB_RED_ZONE;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4652
  	}
06b285dc3   Christoph Lameter   slub: Make the or...
4653
  	calculate_sizes(s, -1);
81819f0fc   Christoph Lameter   SLUB core
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666
4667
4668
4669
4670
  	return length;
  }
  SLAB_ATTR(red_zone);
  
  static ssize_t poison_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_POISON));
  }
  
  static ssize_t poison_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
  	if (any_slab_objects(s))
  		return -EBUSY;
  
  	s->flags &= ~SLAB_POISON;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4671
4672
  	if (buf[0] == '1') {
  		s->flags &= ~__CMPXCHG_DOUBLE;
81819f0fc   Christoph Lameter   SLUB core
4673
  		s->flags |= SLAB_POISON;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4674
  	}
06b285dc3   Christoph Lameter   slub: Make the or...
4675
  	calculate_sizes(s, -1);
81819f0fc   Christoph Lameter   SLUB core
4676
4677
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692
  	return length;
  }
  SLAB_ATTR(poison);
  
  static ssize_t store_user_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_STORE_USER));
  }
  
  static ssize_t store_user_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
  	if (any_slab_objects(s))
  		return -EBUSY;
  
  	s->flags &= ~SLAB_STORE_USER;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4693
4694
  	if (buf[0] == '1') {
  		s->flags &= ~__CMPXCHG_DOUBLE;
81819f0fc   Christoph Lameter   SLUB core
4695
  		s->flags |= SLAB_STORE_USER;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4696
  	}
06b285dc3   Christoph Lameter   slub: Make the or...
4697
  	calculate_sizes(s, -1);
81819f0fc   Christoph Lameter   SLUB core
4698
4699
4700
  	return length;
  }
  SLAB_ATTR(store_user);
53e15af03   Christoph Lameter   slub: validation ...
4701
4702
4703
4704
4705
4706
4707
4708
  static ssize_t validate_show(struct kmem_cache *s, char *buf)
  {
  	return 0;
  }
  
  static ssize_t validate_store(struct kmem_cache *s,
  			const char *buf, size_t length)
  {
434e245dd   Christoph Lameter   SLUB: Do not allo...
4709
4710
4711
4712
4713
4714
4715
4716
  	int ret = -EINVAL;
  
  	if (buf[0] == '1') {
  		ret = validate_slab_cache(s);
  		if (ret >= 0)
  			ret = length;
  	}
  	return ret;
53e15af03   Christoph Lameter   slub: validation ...
4717
4718
  }
  SLAB_ATTR(validate);
a5a84755c   Christoph Lameter   slub: Move functi...
4719
4720
4721
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747
4748
4749
4750
4751
4752
  
  static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
  {
  	if (!(s->flags & SLAB_STORE_USER))
  		return -ENOSYS;
  	return list_locations(s, buf, TRACK_ALLOC);
  }
  SLAB_ATTR_RO(alloc_calls);
  
  static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
  {
  	if (!(s->flags & SLAB_STORE_USER))
  		return -ENOSYS;
  	return list_locations(s, buf, TRACK_FREE);
  }
  SLAB_ATTR_RO(free_calls);
  #endif /* CONFIG_SLUB_DEBUG */
  
  #ifdef CONFIG_FAILSLAB
  static ssize_t failslab_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_FAILSLAB));
  }
  
  static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
  							size_t length)
  {
  	s->flags &= ~SLAB_FAILSLAB;
  	if (buf[0] == '1')
  		s->flags |= SLAB_FAILSLAB;
  	return length;
  }
  SLAB_ATTR(failslab);
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4753
  #endif
53e15af03   Christoph Lameter   slub: validation ...
4754

2086d26a0   Christoph Lameter   SLUB: Free slabs ...
4755
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772
  static ssize_t shrink_show(struct kmem_cache *s, char *buf)
  {
  	return 0;
  }
  
  static ssize_t shrink_store(struct kmem_cache *s,
  			const char *buf, size_t length)
  {
  	if (buf[0] == '1') {
  		int rc = kmem_cache_shrink(s);
  
  		if (rc)
  			return rc;
  	} else
  		return -EINVAL;
  	return length;
  }
  SLAB_ATTR(shrink);
81819f0fc   Christoph Lameter   SLUB core
4773
  #ifdef CONFIG_NUMA
9824601ea   Christoph Lameter   SLUB: rename defr...
4774
  static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
81819f0fc   Christoph Lameter   SLUB core
4775
  {
9824601ea   Christoph Lameter   SLUB: rename defr...
4776
4777
  	return sprintf(buf, "%d
  ", s->remote_node_defrag_ratio / 10);
81819f0fc   Christoph Lameter   SLUB core
4778
  }
9824601ea   Christoph Lameter   SLUB: rename defr...
4779
  static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
81819f0fc   Christoph Lameter   SLUB core
4780
4781
  				const char *buf, size_t length)
  {
0121c619d   Christoph Lameter   slub: Whitespace ...
4782
4783
4784
4785
4786
4787
  	unsigned long ratio;
  	int err;
  
  	err = strict_strtoul(buf, 10, &ratio);
  	if (err)
  		return err;
e2cb96b7e   Christoph Lameter   slub: Disable NUM...
4788
  	if (ratio <= 100)
0121c619d   Christoph Lameter   slub: Whitespace ...
4789
  		s->remote_node_defrag_ratio = ratio * 10;
81819f0fc   Christoph Lameter   SLUB core
4790

81819f0fc   Christoph Lameter   SLUB core
4791
4792
  	return length;
  }
9824601ea   Christoph Lameter   SLUB: rename defr...
4793
  SLAB_ATTR(remote_node_defrag_ratio);
81819f0fc   Christoph Lameter   SLUB core
4794
  #endif
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4795
  #ifdef CONFIG_SLUB_STATS
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4796
4797
4798
4799
4800
4801
4802
4803
4804
4805
4806
  static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
  {
  	unsigned long sum  = 0;
  	int cpu;
  	int len;
  	int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
  
  	if (!data)
  		return -ENOMEM;
  
  	for_each_online_cpu(cpu) {
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
4807
  		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4808
4809
4810
4811
4812
4813
  
  		data[cpu] = x;
  		sum += x;
  	}
  
  	len = sprintf(buf, "%lu", sum);
50ef37b96   Christoph Lameter   slub: Fixes to pe...
4814
  #ifdef CONFIG_SMP
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4815
4816
  	for_each_online_cpu(cpu) {
  		if (data[cpu] && len < PAGE_SIZE - 20)
50ef37b96   Christoph Lameter   slub: Fixes to pe...
4817
  			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4818
  	}
50ef37b96   Christoph Lameter   slub: Fixes to pe...
4819
  #endif
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4820
4821
4822
4823
  	kfree(data);
  	return len + sprintf(buf + len, "
  ");
  }
78eb00cc5   David Rientjes   slub: allow stats...
4824
4825
4826
4827
4828
  static void clear_stat(struct kmem_cache *s, enum stat_item si)
  {
  	int cpu;
  
  	for_each_online_cpu(cpu)
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
4829
  		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
78eb00cc5   David Rientjes   slub: allow stats...
4830
  }
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4831
4832
4833
4834
4835
  #define STAT_ATTR(si, text) 					\
  static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
  {								\
  	return show_stat(s, buf, si);				\
  }								\
78eb00cc5   David Rientjes   slub: allow stats...
4836
4837
4838
4839
4840
4841
4842
4843
4844
  static ssize_t text##_store(struct kmem_cache *s,		\
  				const char *buf, size_t length)	\
  {								\
  	if (buf[0] != '0')					\
  		return -EINVAL;					\
  	clear_stat(s, si);					\
  	return length;						\
  }								\
  SLAB_ATTR(text);						\
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
  
  STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
  STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
  STAT_ATTR(FREE_FASTPATH, free_fastpath);
  STAT_ATTR(FREE_SLOWPATH, free_slowpath);
  STAT_ATTR(FREE_FROZEN, free_frozen);
  STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
  STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
  STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
  STAT_ATTR(ALLOC_SLAB, alloc_slab);
  STAT_ATTR(ALLOC_REFILL, alloc_refill);
e36a2652d   Christoph Lameter   slub: Add statist...
4856
  STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4857
4858
4859
4860
4861
4862
4863
  STAT_ATTR(FREE_SLAB, free_slab);
  STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
  STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
  STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
  STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
  STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
  STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
03e404af2   Christoph Lameter   slub: fast releas...
4864
  STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
65c3376aa   Christoph Lameter   slub: Fallback to...
4865
  STAT_ATTR(ORDER_FALLBACK, order_fallback);
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4866
4867
  STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
  STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
49e225858   Christoph Lameter   slub: per cpu cac...
4868
4869
  STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
  STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4870
  #endif
064287807   Pekka Enberg   SLUB: Fix coding ...
4871
  static struct attribute *slab_attrs[] = {
81819f0fc   Christoph Lameter   SLUB core
4872
4873
4874
4875
  	&slab_size_attr.attr,
  	&object_size_attr.attr,
  	&objs_per_slab_attr.attr,
  	&order_attr.attr,
73d342b16   David Rientjes   slub: add min_par...
4876
  	&min_partial_attr.attr,
49e225858   Christoph Lameter   slub: per cpu cac...
4877
  	&cpu_partial_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4878
  	&objects_attr.attr,
205ab99dd   Christoph Lameter   slub: Update stat...
4879
  	&objects_partial_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4880
4881
4882
  	&partial_attr.attr,
  	&cpu_slabs_attr.attr,
  	&ctor_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4883
4884
  	&aliases_attr.attr,
  	&align_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4885
4886
4887
  	&hwcache_align_attr.attr,
  	&reclaim_account_attr.attr,
  	&destroy_by_rcu_attr.attr,
a5a84755c   Christoph Lameter   slub: Move functi...
4888
  	&shrink_attr.attr,
ab9a0f196   Lai Jiangshan   slub: automatical...
4889
  	&reserved_attr.attr,
49e225858   Christoph Lameter   slub: per cpu cac...
4890
  	&slabs_cpu_partial_attr.attr,
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4891
  #ifdef CONFIG_SLUB_DEBUG
a5a84755c   Christoph Lameter   slub: Move functi...
4892
4893
4894
4895
  	&total_objects_attr.attr,
  	&slabs_attr.attr,
  	&sanity_checks_attr.attr,
  	&trace_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4896
4897
4898
  	&red_zone_attr.attr,
  	&poison_attr.attr,
  	&store_user_attr.attr,
53e15af03   Christoph Lameter   slub: validation ...
4899
  	&validate_attr.attr,
88a420e4e   Christoph Lameter   slub: add ability...
4900
4901
  	&alloc_calls_attr.attr,
  	&free_calls_attr.attr,
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4902
  #endif
81819f0fc   Christoph Lameter   SLUB core
4903
4904
4905
4906
  #ifdef CONFIG_ZONE_DMA
  	&cache_dma_attr.attr,
  #endif
  #ifdef CONFIG_NUMA
9824601ea   Christoph Lameter   SLUB: rename defr...
4907
  	&remote_node_defrag_ratio_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4908
  #endif
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919
  #ifdef CONFIG_SLUB_STATS
  	&alloc_fastpath_attr.attr,
  	&alloc_slowpath_attr.attr,
  	&free_fastpath_attr.attr,
  	&free_slowpath_attr.attr,
  	&free_frozen_attr.attr,
  	&free_add_partial_attr.attr,
  	&free_remove_partial_attr.attr,
  	&alloc_from_partial_attr.attr,
  	&alloc_slab_attr.attr,
  	&alloc_refill_attr.attr,
e36a2652d   Christoph Lameter   slub: Add statist...
4920
  	&alloc_node_mismatch_attr.attr,
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4921
4922
4923
4924
4925
4926
4927
  	&free_slab_attr.attr,
  	&cpuslab_flush_attr.attr,
  	&deactivate_full_attr.attr,
  	&deactivate_empty_attr.attr,
  	&deactivate_to_head_attr.attr,
  	&deactivate_to_tail_attr.attr,
  	&deactivate_remote_frees_attr.attr,
03e404af2   Christoph Lameter   slub: fast releas...
4928
  	&deactivate_bypass_attr.attr,
65c3376aa   Christoph Lameter   slub: Fallback to...
4929
  	&order_fallback_attr.attr,
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4930
4931
  	&cmpxchg_double_fail_attr.attr,
  	&cmpxchg_double_cpu_fail_attr.attr,
49e225858   Christoph Lameter   slub: per cpu cac...
4932
4933
  	&cpu_partial_alloc_attr.attr,
  	&cpu_partial_free_attr.attr,
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4934
  #endif
4c13dd3b4   Dmitry Monakhov   failslab: add abi...
4935
4936
4937
  #ifdef CONFIG_FAILSLAB
  	&failslab_attr.attr,
  #endif
81819f0fc   Christoph Lameter   SLUB core
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971
4972
4973
4974
4975
4976
4977
4978
4979
4980
4981
  	NULL
  };
  
  static struct attribute_group slab_attr_group = {
  	.attrs = slab_attrs,
  };
  
  static ssize_t slab_attr_show(struct kobject *kobj,
  				struct attribute *attr,
  				char *buf)
  {
  	struct slab_attribute *attribute;
  	struct kmem_cache *s;
  	int err;
  
  	attribute = to_slab_attr(attr);
  	s = to_slab(kobj);
  
  	if (!attribute->show)
  		return -EIO;
  
  	err = attribute->show(s, buf);
  
  	return err;
  }
  
  static ssize_t slab_attr_store(struct kobject *kobj,
  				struct attribute *attr,
  				const char *buf, size_t len)
  {
  	struct slab_attribute *attribute;
  	struct kmem_cache *s;
  	int err;
  
  	attribute = to_slab_attr(attr);
  	s = to_slab(kobj);
  
  	if (!attribute->store)
  		return -EIO;
  
  	err = attribute->store(s, buf, len);
  
  	return err;
  }
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
4982
4983
4984
  static void kmem_cache_release(struct kobject *kobj)
  {
  	struct kmem_cache *s = to_slab(kobj);
84c1cf624   Pekka Enberg   SLUB: Fix merged ...
4985
  	kfree(s->name);
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
4986
4987
  	kfree(s);
  }
52cf25d0a   Emese Revfy   Driver core: Cons...
4988
  static const struct sysfs_ops slab_sysfs_ops = {
81819f0fc   Christoph Lameter   SLUB core
4989
4990
4991
4992
4993
4994
  	.show = slab_attr_show,
  	.store = slab_attr_store,
  };
  
  static struct kobj_type slab_ktype = {
  	.sysfs_ops = &slab_sysfs_ops,
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
4995
  	.release = kmem_cache_release
81819f0fc   Christoph Lameter   SLUB core
4996
4997
4998
4999
5000
5001
5002
5003
5004
5005
  };
  
  static int uevent_filter(struct kset *kset, struct kobject *kobj)
  {
  	struct kobj_type *ktype = get_ktype(kobj);
  
  	if (ktype == &slab_ktype)
  		return 1;
  	return 0;
  }
9cd43611c   Emese Revfy   kobject: Constify...
5006
  static const struct kset_uevent_ops slab_uevent_ops = {
81819f0fc   Christoph Lameter   SLUB core
5007
5008
  	.filter = uevent_filter,
  };
27c3a314d   Greg Kroah-Hartman   kset: convert slu...
5009
  static struct kset *slab_kset;
81819f0fc   Christoph Lameter   SLUB core
5010
5011
5012
5013
  
  #define ID_STR_LENGTH 64
  
  /* Create a unique string id for a slab cache:
6446faa2f   Christoph Lameter   slub: Fix up comm...
5014
5015
   *
   * Format	:[flags-]size
81819f0fc   Christoph Lameter   SLUB core
5016
5017
5018
5019
5020
5021
5022
5023
5024
5025
5026
5027
5028
5029
5030
5031
5032
5033
5034
5035
5036
5037
   */
  static char *create_unique_id(struct kmem_cache *s)
  {
  	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
  	char *p = name;
  
  	BUG_ON(!name);
  
  	*p++ = ':';
  	/*
  	 * First flags affecting slabcache operations. We will only
  	 * get here for aliasable slabs so we do not need to support
  	 * too many flags. The flags here must cover all flags that
  	 * are matched during merging to guarantee that the id is
  	 * unique.
  	 */
  	if (s->flags & SLAB_CACHE_DMA)
  		*p++ = 'd';
  	if (s->flags & SLAB_RECLAIM_ACCOUNT)
  		*p++ = 'a';
  	if (s->flags & SLAB_DEBUG_FREE)
  		*p++ = 'F';
5a896d9e7   Vegard Nossum   slub: add hooks f...
5038
5039
  	if (!(s->flags & SLAB_NOTRACK))
  		*p++ = 't';
81819f0fc   Christoph Lameter   SLUB core
5040
5041
5042
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060
5061
5062
5063
  	if (p != name + 1)
  		*p++ = '-';
  	p += sprintf(p, "%07d", s->size);
  	BUG_ON(p > name + ID_STR_LENGTH - 1);
  	return name;
  }
  
  static int sysfs_slab_add(struct kmem_cache *s)
  {
  	int err;
  	const char *name;
  	int unmergeable;
  
  	if (slab_state < SYSFS)
  		/* Defer until later */
  		return 0;
  
  	unmergeable = slab_unmergeable(s);
  	if (unmergeable) {
  		/*
  		 * Slabcache can never be merged so we can use the name proper.
  		 * This is typically the case for debug situations. In that
  		 * case we can catch duplicate names easily.
  		 */
27c3a314d   Greg Kroah-Hartman   kset: convert slu...
5064
  		sysfs_remove_link(&slab_kset->kobj, s->name);
81819f0fc   Christoph Lameter   SLUB core
5065
5066
5067
5068
5069
5070
5071
5072
  		name = s->name;
  	} else {
  		/*
  		 * Create a unique name for the slab as a target
  		 * for the symlinks.
  		 */
  		name = create_unique_id(s);
  	}
27c3a314d   Greg Kroah-Hartman   kset: convert slu...
5073
  	s->kobj.kset = slab_kset;
1eada11c8   Greg Kroah-Hartman   Kobject: convert ...
5074
5075
5076
  	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
  	if (err) {
  		kobject_put(&s->kobj);
81819f0fc   Christoph Lameter   SLUB core
5077
  		return err;
1eada11c8   Greg Kroah-Hartman   Kobject: convert ...
5078
  	}
81819f0fc   Christoph Lameter   SLUB core
5079
5080
  
  	err = sysfs_create_group(&s->kobj, &slab_attr_group);
5788d8ad6   Xiaotian Feng   slub: release kob...
5081
5082
5083
  	if (err) {
  		kobject_del(&s->kobj);
  		kobject_put(&s->kobj);
81819f0fc   Christoph Lameter   SLUB core
5084
  		return err;
5788d8ad6   Xiaotian Feng   slub: release kob...
5085
  	}
81819f0fc   Christoph Lameter   SLUB core
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095
5096
  	kobject_uevent(&s->kobj, KOBJ_ADD);
  	if (!unmergeable) {
  		/* Setup first alias */
  		sysfs_slab_alias(s, s->name);
  		kfree(name);
  	}
  	return 0;
  }
  
  static void sysfs_slab_remove(struct kmem_cache *s)
  {
2bce64858   Christoph Lameter   slub: Allow remov...
5097
5098
5099
5100
5101
5102
  	if (slab_state < SYSFS)
  		/*
  		 * Sysfs has not been setup yet so no need to remove the
  		 * cache from sysfs.
  		 */
  		return;
81819f0fc   Christoph Lameter   SLUB core
5103
5104
  	kobject_uevent(&s->kobj, KOBJ_REMOVE);
  	kobject_del(&s->kobj);
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
5105
  	kobject_put(&s->kobj);
81819f0fc   Christoph Lameter   SLUB core
5106
5107
5108
5109
  }
  
  /*
   * Need to buffer aliases during bootup until sysfs becomes
9f6c708e5   Nick Andrew   slub: Fix incorre...
5110
   * available lest we lose that information.
81819f0fc   Christoph Lameter   SLUB core
5111
5112
5113
5114
5115
5116
   */
  struct saved_alias {
  	struct kmem_cache *s;
  	const char *name;
  	struct saved_alias *next;
  };
5af328a51   Adrian Bunk   mm/slub.c: make c...
5117
  static struct saved_alias *alias_list;
81819f0fc   Christoph Lameter   SLUB core
5118
5119
5120
5121
5122
5123
5124
5125
5126
  
  static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
  {
  	struct saved_alias *al;
  
  	if (slab_state == SYSFS) {
  		/*
  		 * If we have a leftover link then remove it.
  		 */
27c3a314d   Greg Kroah-Hartman   kset: convert slu...
5127
5128
  		sysfs_remove_link(&slab_kset->kobj, name);
  		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
81819f0fc   Christoph Lameter   SLUB core
5129
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143
  	}
  
  	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
  	if (!al)
  		return -ENOMEM;
  
  	al->s = s;
  	al->name = name;
  	al->next = alias_list;
  	alias_list = al;
  	return 0;
  }
  
  static int __init slab_sysfs_init(void)
  {
5b95a4acf   Christoph Lameter   SLUB: use list_fo...
5144
  	struct kmem_cache *s;
81819f0fc   Christoph Lameter   SLUB core
5145
  	int err;
2bce64858   Christoph Lameter   slub: Allow remov...
5146
  	down_write(&slub_lock);
0ff21e466   Greg Kroah-Hartman   kobject: convert ...
5147
  	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
27c3a314d   Greg Kroah-Hartman   kset: convert slu...
5148
  	if (!slab_kset) {
2bce64858   Christoph Lameter   slub: Allow remov...
5149
  		up_write(&slub_lock);
81819f0fc   Christoph Lameter   SLUB core
5150
5151
5152
5153
  		printk(KERN_ERR "Cannot register slab subsystem.
  ");
  		return -ENOSYS;
  	}
26a7bd030   Christoph Lameter   SLUB: get rid of ...
5154
  	slab_state = SYSFS;
5b95a4acf   Christoph Lameter   SLUB: use list_fo...
5155
  	list_for_each_entry(s, &slab_caches, list) {
26a7bd030   Christoph Lameter   SLUB: get rid of ...
5156
  		err = sysfs_slab_add(s);
5d540fb71   Christoph Lameter   slub: do not fail...
5157
5158
5159
5160
  		if (err)
  			printk(KERN_ERR "SLUB: Unable to add boot slab %s"
  						" to sysfs
  ", s->name);
26a7bd030   Christoph Lameter   SLUB: get rid of ...
5161
  	}
81819f0fc   Christoph Lameter   SLUB core
5162
5163
5164
5165
5166
5167
  
  	while (alias_list) {
  		struct saved_alias *al = alias_list;
  
  		alias_list = alias_list->next;
  		err = sysfs_slab_alias(al->s, al->name);
5d540fb71   Christoph Lameter   slub: do not fail...
5168
5169
5170
5171
  		if (err)
  			printk(KERN_ERR "SLUB: Unable to add boot slab alias"
  					" %s to sysfs
  ", s->name);
81819f0fc   Christoph Lameter   SLUB core
5172
5173
  		kfree(al);
  	}
2bce64858   Christoph Lameter   slub: Allow remov...
5174
  	up_write(&slub_lock);
81819f0fc   Christoph Lameter   SLUB core
5175
5176
5177
5178
5179
  	resiliency_test();
  	return 0;
  }
  
  __initcall(slab_sysfs_init);
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
5180
  #endif /* CONFIG_SYSFS */
57ed3eda9   Pekka J Enberg   slub: provide /pr...
5181
5182
5183
5184
  
  /*
   * The /proc/slabinfo ABI
   */
158a96242   Linus Torvalds   Unify /proc/slabi...
5185
  #ifdef CONFIG_SLABINFO
57ed3eda9   Pekka J Enberg   slub: provide /pr...
5186
5187
5188
5189
5190
5191
5192
5193
5194
5195
5196
5197
5198
5199
5200
5201
5202
5203
5204
5205
5206
5207
5208
5209
5210
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223
  static void print_slabinfo_header(struct seq_file *m)
  {
  	seq_puts(m, "slabinfo - version: 2.1
  ");
  	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
  		 "<objperslab> <pagesperslab>");
  	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
  	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
  	seq_putc(m, '
  ');
  }
  
  static void *s_start(struct seq_file *m, loff_t *pos)
  {
  	loff_t n = *pos;
  
  	down_read(&slub_lock);
  	if (!n)
  		print_slabinfo_header(m);
  
  	return seq_list_start(&slab_caches, *pos);
  }
  
  static void *s_next(struct seq_file *m, void *p, loff_t *pos)
  {
  	return seq_list_next(p, &slab_caches, pos);
  }
  
  static void s_stop(struct seq_file *m, void *p)
  {
  	up_read(&slub_lock);
  }
  
  static int s_show(struct seq_file *m, void *p)
  {
  	unsigned long nr_partials = 0;
  	unsigned long nr_slabs = 0;
  	unsigned long nr_inuse = 0;
205ab99dd   Christoph Lameter   slub: Update stat...
5224
5225
  	unsigned long nr_objs = 0;
  	unsigned long nr_free = 0;
57ed3eda9   Pekka J Enberg   slub: provide /pr...
5226
5227
5228
5229
5230
5231
5232
5233
5234
5235
5236
5237
5238
  	struct kmem_cache *s;
  	int node;
  
  	s = list_entry(p, struct kmem_cache, list);
  
  	for_each_online_node(node) {
  		struct kmem_cache_node *n = get_node(s, node);
  
  		if (!n)
  			continue;
  
  		nr_partials += n->nr_partial;
  		nr_slabs += atomic_long_read(&n->nr_slabs);
205ab99dd   Christoph Lameter   slub: Update stat...
5239
5240
  		nr_objs += atomic_long_read(&n->total_objects);
  		nr_free += count_partial(n, count_free);
57ed3eda9   Pekka J Enberg   slub: provide /pr...
5241
  	}
205ab99dd   Christoph Lameter   slub: Update stat...
5242
  	nr_inuse = nr_objs - nr_free;
57ed3eda9   Pekka J Enberg   slub: provide /pr...
5243
5244
  
  	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
834f3d119   Christoph Lameter   slub: Add kmem_ca...
5245
5246
  		   nr_objs, s->size, oo_objects(s->oo),
  		   (1 << oo_order(s->oo)));
57ed3eda9   Pekka J Enberg   slub: provide /pr...
5247
5248
5249
5250
5251
5252
5253
  	seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
  	seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
  		   0UL);
  	seq_putc(m, '
  ');
  	return 0;
  }
7b3c3a50a   Alexey Dobriyan   proc: move /proc/...
5254
  static const struct seq_operations slabinfo_op = {
57ed3eda9   Pekka J Enberg   slub: provide /pr...
5255
5256
5257
5258
5259
  	.start = s_start,
  	.next = s_next,
  	.stop = s_stop,
  	.show = s_show,
  };
7b3c3a50a   Alexey Dobriyan   proc: move /proc/...
5260
5261
5262
5263
5264
5265
5266
5267
5268
5269
5270
5271
5272
5273
  static int slabinfo_open(struct inode *inode, struct file *file)
  {
  	return seq_open(file, &slabinfo_op);
  }
  
  static const struct file_operations proc_slabinfo_operations = {
  	.open		= slabinfo_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
  	.release	= seq_release,
  };
  
  static int __init slab_proc_init(void)
  {
ab067e99d   Vasiliy Kulikov   mm: restrict acce...
5274
  	proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations);
7b3c3a50a   Alexey Dobriyan   proc: move /proc/...
5275
5276
5277
  	return 0;
  }
  module_init(slab_proc_init);
158a96242   Linus Torvalds   Unify /proc/slabi...
5278
  #endif /* CONFIG_SLABINFO */