Blame view

mm/slub.c 109 KB
81819f0fc   Christoph Lameter   SLUB core
1
2
3
4
5
6
7
  /*
   * SLUB: A slab allocator that limits cache line use instead of queuing
   * objects in per cpu and per node lists.
   *
   * The allocator synchronizes using per slab locks and only
   * uses a centralized lock to manage a pool of partial slabs.
   *
cde535359   Christoph Lameter   Christoph has moved
8
   * (C) 2007 SGI, Christoph Lameter
81819f0fc   Christoph Lameter   SLUB core
9
10
11
   */
  
  #include <linux/mm.h>
1eb5ac646   Nick Piggin   mm: SLUB fix recl...
12
  #include <linux/swap.h> /* struct reclaim_state */
81819f0fc   Christoph Lameter   SLUB core
13
14
15
16
17
  #include <linux/module.h>
  #include <linux/bit_spinlock.h>
  #include <linux/interrupt.h>
  #include <linux/bitops.h>
  #include <linux/slab.h>
7b3c3a50a   Alexey Dobriyan   proc: move /proc/...
18
  #include <linux/proc_fs.h>
81819f0fc   Christoph Lameter   SLUB core
19
  #include <linux/seq_file.h>
02af61bb5   Zhaolei   tracing, kmemtrac...
20
  #include <linux/kmemtrace.h>
5a896d9e7   Vegard Nossum   slub: add hooks f...
21
  #include <linux/kmemcheck.h>
81819f0fc   Christoph Lameter   SLUB core
22
23
24
25
  #include <linux/cpu.h>
  #include <linux/cpuset.h>
  #include <linux/mempolicy.h>
  #include <linux/ctype.h>
3ac7fe5a4   Thomas Gleixner   infrastructure to...
26
  #include <linux/debugobjects.h>
81819f0fc   Christoph Lameter   SLUB core
27
  #include <linux/kallsyms.h>
b9049e234   Yasunori Goto   memory hotplug: m...
28
  #include <linux/memory.h>
f8bd2258e   Roman Zippel   remove div_long_l...
29
  #include <linux/math64.h>
773ff60e8   Akinobu Mita   SLUB: failslab su...
30
  #include <linux/fault-inject.h>
81819f0fc   Christoph Lameter   SLUB core
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
  
  /*
   * Lock order:
   *   1. slab_lock(page)
   *   2. slab->list_lock
   *
   *   The slab_lock protects operations on the object of a particular
   *   slab and its metadata in the page struct. If the slab lock
   *   has been taken then no allocations nor frees can be performed
   *   on the objects in the slab nor can the slab be added or removed
   *   from the partial or full lists since this would mean modifying
   *   the page_struct of the slab.
   *
   *   The list_lock protects the partial and full list on each node and
   *   the partial slab counter. If taken then no new slabs may be added or
   *   removed from the lists nor make the number of partial slabs be modified.
   *   (Note that the total number of slabs is an atomic value that may be
   *   modified without taking the list lock).
   *
   *   The list_lock is a centralized lock and thus we avoid taking it as
   *   much as possible. As long as SLUB does not have to handle partial
   *   slabs, operations can continue without any centralized lock. F.e.
   *   allocating a long series of objects that fill up slabs does not require
   *   the list lock.
   *
   *   The lock order is sometimes inverted when we are trying to get a slab
   *   off a list. We take the list_lock and then look for a page on the list
   *   to use. While we do that objects in the slabs may be freed. We can
   *   only operate on the slab if we have also taken the slab_lock. So we use
   *   a slab_trylock() on the slab. If trylock was successful then no frees
   *   can occur anymore and we can use the slab for allocations etc. If the
   *   slab_trylock() does not succeed then frees are in progress in the slab and
   *   we must stay away from it for a while since we may cause a bouncing
   *   cacheline if we try to acquire the lock. So go onto the next slab.
   *   If all pages are busy then we may allocate a new slab instead of reusing
   *   a partial slab. A new slab has noone operating on it and thus there is
   *   no danger of cacheline contention.
   *
   *   Interrupts are disabled during allocation and deallocation in order to
   *   make the slab allocator safe to use in the context of an irq. In addition
   *   interrupts are disabled to ensure that the processor does not change
   *   while handling per_cpu slabs, due to kernel preemption.
   *
   * SLUB assigns one slab for allocation to each processor.
   * Allocations only occur from these slabs called cpu slabs.
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
77
78
   * Slabs with free elements are kept on a partial list and during regular
   * operations no list for full slabs is used. If an object in a full slab is
81819f0fc   Christoph Lameter   SLUB core
79
   * freed then the slab will show up again on the partial lists.
672bba3a4   Christoph Lameter   SLUB: update comm...
80
81
   * We track full slabs for debugging purposes though because otherwise we
   * cannot scan all objects.
81819f0fc   Christoph Lameter   SLUB core
82
83
84
85
86
87
88
   *
   * Slabs are freed when they become empty. Teardown and setup is
   * minimal so we rely on the page allocators per cpu caches for
   * fast frees and allocs.
   *
   * Overloading of page flags that are otherwise used for LRU management.
   *
4b6f07504   Christoph Lameter   SLUB: Define func...
89
90
91
92
93
94
95
96
97
98
99
100
   * PageActive 		The slab is frozen and exempt from list processing.
   * 			This means that the slab is dedicated to a purpose
   * 			such as satisfying allocations for a specific
   * 			processor. Objects may be freed in the slab while
   * 			it is frozen but slab_free will then skip the usual
   * 			list operations. It is up to the processor holding
   * 			the slab to integrate the slab into the slab lists
   * 			when the slab is no longer needed.
   *
   * 			One use of this flag is to mark slabs that are
   * 			used for allocations. Then such a slab becomes a cpu
   * 			slab. The cpu slab may be equipped with an additional
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
101
   * 			freelist that allows lockless access to
894b8788d   Christoph Lameter   slub: support con...
102
103
   * 			free objects in addition to the regular freelist
   * 			that requires the slab lock.
81819f0fc   Christoph Lameter   SLUB core
104
105
106
   *
   * PageError		Slab requires special handling due to debug
   * 			options set. This moves	slab handling out of
894b8788d   Christoph Lameter   slub: support con...
107
   * 			the fast path and disables lockless freelists.
81819f0fc   Christoph Lameter   SLUB core
108
   */
5577bd8a8   Christoph Lameter   SLUB: Do our own ...
109
  #ifdef CONFIG_SLUB_DEBUG
8a38082d2   Andy Whitcroft   slub: record page...
110
  #define SLABDEBUG 1
5577bd8a8   Christoph Lameter   SLUB: Do our own ...
111
112
113
  #else
  #define SLABDEBUG 0
  #endif
81819f0fc   Christoph Lameter   SLUB core
114
115
116
  /*
   * Issues still to be resolved:
   *
81819f0fc   Christoph Lameter   SLUB core
117
118
   * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
   *
81819f0fc   Christoph Lameter   SLUB core
119
120
121
122
123
   * - Variable sizing of the per node arrays
   */
  
  /* Enable to test recovery from slab corruption on boot */
  #undef SLUB_RESILIENCY_TEST
81819f0fc   Christoph Lameter   SLUB core
124
  /*
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
125
126
127
   * Mininum number of partial slabs. These will be left on the partial
   * lists even if they are empty. kmem_cache_shrink may reclaim them.
   */
76be89500   Christoph Lameter   SLUB: Improve hac...
128
  #define MIN_PARTIAL 5
e95eed571   Christoph Lameter   SLUB: Add MIN_PAR...
129

2086d26a0   Christoph Lameter   SLUB: Free slabs ...
130
131
132
133
134
135
  /*
   * Maximum number of desirable partial slabs.
   * The existence of more partial slabs makes kmem_cache_shrink
   * sort the partial list by the number of objects in the.
   */
  #define MAX_PARTIAL 10
81819f0fc   Christoph Lameter   SLUB core
136
137
  #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
  				SLAB_POISON | SLAB_STORE_USER)
672bba3a4   Christoph Lameter   SLUB: update comm...
138

81819f0fc   Christoph Lameter   SLUB core
139
  /*
3de472138   David Rientjes   slub: use size an...
140
141
142
   * Debugging flags that require metadata to be stored in the slab.  These get
   * disabled when slub_debug=O is used and a cache's min order increases with
   * metadata.
fa5ec8a1f   David Rientjes   slub: add option ...
143
   */
3de472138   David Rientjes   slub: use size an...
144
  #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
fa5ec8a1f   David Rientjes   slub: add option ...
145
146
  
  /*
81819f0fc   Christoph Lameter   SLUB core
147
148
149
   * Set of flags that will prevent slab merging
   */
  #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
4c13dd3b4   Dmitry Monakhov   failslab: add abi...
150
151
  		SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
  		SLAB_FAILSLAB)
81819f0fc   Christoph Lameter   SLUB core
152
153
  
  #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
5a896d9e7   Vegard Nossum   slub: add hooks f...
154
  		SLAB_CACHE_DMA | SLAB_NOTRACK)
81819f0fc   Christoph Lameter   SLUB core
155

210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
156
157
158
  #define OO_SHIFT	16
  #define OO_MASK		((1 << OO_SHIFT) - 1)
  #define MAX_OBJS_PER_PAGE	65535 /* since page.objects is u16 */
81819f0fc   Christoph Lameter   SLUB core
159
  /* Internal SLUB flags */
1ceef4024   Christoph Lameter   SLUB: Fix dynamic...
160
161
  #define __OBJECT_POISON		0x80000000 /* Poison object */
  #define __SYSFS_ADD_DEFERRED	0x40000000 /* Not yet visible via sysfs */
81819f0fc   Christoph Lameter   SLUB core
162
163
164
165
166
167
168
169
170
171
  
  static int kmem_size = sizeof(struct kmem_cache);
  
  #ifdef CONFIG_SMP
  static struct notifier_block slab_notifier;
  #endif
  
  static enum {
  	DOWN,		/* No slab functionality available */
  	PARTIAL,	/* kmem_cache_open() works but kmalloc does not */
672bba3a4   Christoph Lameter   SLUB: update comm...
172
  	UP,		/* Everything works but does not show up in sysfs */
81819f0fc   Christoph Lameter   SLUB core
173
174
175
176
177
  	SYSFS		/* Sysfs up */
  } slab_state = DOWN;
  
  /* A list of all slab caches on the system */
  static DECLARE_RWSEM(slub_lock);
5af328a51   Adrian Bunk   mm/slub.c: make c...
178
  static LIST_HEAD(slab_caches);
81819f0fc   Christoph Lameter   SLUB core
179

02cbc8744   Christoph Lameter   SLUB: move tracki...
180
181
182
183
  /*
   * Tracking user of a slab.
   */
  struct track {
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
184
  	unsigned long addr;	/* Called from address */
02cbc8744   Christoph Lameter   SLUB: move tracki...
185
186
187
188
189
190
  	int cpu;		/* Was running on cpu */
  	int pid;		/* Pid context */
  	unsigned long when;	/* When did the operation occur */
  };
  
  enum track_item { TRACK_ALLOC, TRACK_FREE };
f6acb6350   Christoph Lameter   slub: #ifdef simp...
191
  #ifdef CONFIG_SLUB_DEBUG
81819f0fc   Christoph Lameter   SLUB core
192
193
194
  static int sysfs_slab_add(struct kmem_cache *);
  static int sysfs_slab_alias(struct kmem_cache *, const char *);
  static void sysfs_slab_remove(struct kmem_cache *);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
195

81819f0fc   Christoph Lameter   SLUB core
196
  #else
0c7100132   Christoph Lameter   SLUB: add some mo...
197
198
199
  static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
  static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
  							{ return 0; }
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
200
201
202
203
  static inline void sysfs_slab_remove(struct kmem_cache *s)
  {
  	kfree(s);
  }
8ff12cfc0   Christoph Lameter   SLUB: Support for...
204

81819f0fc   Christoph Lameter   SLUB core
205
  #endif
84e554e68   Christoph Lameter   SLUB: Make slub s...
206
  static inline void stat(struct kmem_cache *s, enum stat_item si)
8ff12cfc0   Christoph Lameter   SLUB: Support for...
207
208
  {
  #ifdef CONFIG_SLUB_STATS
84e554e68   Christoph Lameter   SLUB: Make slub s...
209
  	__this_cpu_inc(s->cpu_slab->stat[si]);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
210
211
  #endif
  }
81819f0fc   Christoph Lameter   SLUB core
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
  /********************************************************************
   * 			Core slab cache functions
   *******************************************************************/
  
  int slab_is_available(void)
  {
  	return slab_state >= UP;
  }
  
  static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
  {
  #ifdef CONFIG_NUMA
  	return s->node[node];
  #else
  	return &s->local_node;
  #endif
  }
6446faa2f   Christoph Lameter   slub: Fix up comm...
229
  /* Verify that a pointer has an address that is valid within a slab page */
02cbc8744   Christoph Lameter   SLUB: move tracki...
230
231
232
233
  static inline int check_valid_pointer(struct kmem_cache *s,
  				struct page *page, const void *object)
  {
  	void *base;
a973e9dd1   Christoph Lameter   Revert "unique en...
234
  	if (!object)
02cbc8744   Christoph Lameter   SLUB: move tracki...
235
  		return 1;
a973e9dd1   Christoph Lameter   Revert "unique en...
236
  	base = page_address(page);
39b264641   Christoph Lameter   slub: Store max n...
237
  	if (object < base || object >= base + page->objects * s->size ||
02cbc8744   Christoph Lameter   SLUB: move tracki...
238
239
240
241
242
243
  		(object - base) % s->size) {
  		return 0;
  	}
  
  	return 1;
  }
7656c72b5   Christoph Lameter   SLUB: add macros ...
244
245
246
247
248
249
250
251
252
253
254
  static inline void *get_freepointer(struct kmem_cache *s, void *object)
  {
  	return *(void **)(object + s->offset);
  }
  
  static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
  {
  	*(void **)(object + s->offset) = fp;
  }
  
  /* Loop over all objects in a slab */
224a88be4   Christoph Lameter   slub: for_each_ob...
255
256
  #define for_each_object(__p, __s, __addr, __objects) \
  	for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
7656c72b5   Christoph Lameter   SLUB: add macros ...
257
258
259
260
  			__p += (__s)->size)
  
  /* Scan freelist */
  #define for_each_free_object(__p, __s, __free) \
a973e9dd1   Christoph Lameter   Revert "unique en...
261
  	for (__p = (__free); __p; __p = get_freepointer((__s), __p))
7656c72b5   Christoph Lameter   SLUB: add macros ...
262
263
264
265
266
267
  
  /* Determine object index from a given position */
  static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
  {
  	return (p - addr) / s->size;
  }
834f3d119   Christoph Lameter   slub: Add kmem_ca...
268
269
270
271
  static inline struct kmem_cache_order_objects oo_make(int order,
  						unsigned long size)
  {
  	struct kmem_cache_order_objects x = {
210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
272
  		(order << OO_SHIFT) + (PAGE_SIZE << order) / size
834f3d119   Christoph Lameter   slub: Add kmem_ca...
273
274
275
276
277
278
279
  	};
  
  	return x;
  }
  
  static inline int oo_order(struct kmem_cache_order_objects x)
  {
210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
280
  	return x.x >> OO_SHIFT;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
281
282
283
284
  }
  
  static inline int oo_objects(struct kmem_cache_order_objects x)
  {
210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
285
  	return x.x & OO_MASK;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
286
  }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
287
288
289
290
  #ifdef CONFIG_SLUB_DEBUG
  /*
   * Debug settings:
   */
f0630fff5   Christoph Lameter   SLUB: support slu...
291
292
293
  #ifdef CONFIG_SLUB_DEBUG_ON
  static int slub_debug = DEBUG_DEFAULT_FLAGS;
  #else
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
294
  static int slub_debug;
f0630fff5   Christoph Lameter   SLUB: support slu...
295
  #endif
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
296
297
  
  static char *slub_debug_slabs;
fa5ec8a1f   David Rientjes   slub: add option ...
298
  static int disable_higher_order_debug;
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
299

7656c72b5   Christoph Lameter   SLUB: add macros ...
300
  /*
81819f0fc   Christoph Lameter   SLUB core
301
302
303
304
305
306
307
308
309
310
311
312
   * Object debugging
   */
  static void print_section(char *text, u8 *addr, unsigned int length)
  {
  	int i, offset;
  	int newline = 1;
  	char ascii[17];
  
  	ascii[16] = 0;
  
  	for (i = 0; i < length; i++) {
  		if (newline) {
249226847   Christoph Lameter   SLUB: change erro...
313
  			printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
81819f0fc   Christoph Lameter   SLUB core
314
315
  			newline = 0;
  		}
064287807   Pekka Enberg   SLUB: Fix coding ...
316
  		printk(KERN_CONT " %02x", addr[i]);
81819f0fc   Christoph Lameter   SLUB core
317
318
319
  		offset = i % 16;
  		ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
  		if (offset == 15) {
064287807   Pekka Enberg   SLUB: Fix coding ...
320
321
  			printk(KERN_CONT " %s
  ", ascii);
81819f0fc   Christoph Lameter   SLUB core
322
323
324
325
326
327
  			newline = 1;
  		}
  	}
  	if (!newline) {
  		i %= 16;
  		while (i < 16) {
064287807   Pekka Enberg   SLUB: Fix coding ...
328
  			printk(KERN_CONT "   ");
81819f0fc   Christoph Lameter   SLUB core
329
330
331
  			ascii[i] = ' ';
  			i++;
  		}
064287807   Pekka Enberg   SLUB: Fix coding ...
332
333
  		printk(KERN_CONT " %s
  ", ascii);
81819f0fc   Christoph Lameter   SLUB core
334
335
  	}
  }
81819f0fc   Christoph Lameter   SLUB core
336
337
338
339
340
341
342
343
344
345
346
347
348
349
  static struct track *get_track(struct kmem_cache *s, void *object,
  	enum track_item alloc)
  {
  	struct track *p;
  
  	if (s->offset)
  		p = object + s->offset + sizeof(void *);
  	else
  		p = object + s->inuse;
  
  	return p + alloc;
  }
  
  static void set_track(struct kmem_cache *s, void *object,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
350
  			enum track_item alloc, unsigned long addr)
81819f0fc   Christoph Lameter   SLUB core
351
  {
1a00df4a2   Akinobu Mita   slub: use get_tra...
352
  	struct track *p = get_track(s, object, alloc);
81819f0fc   Christoph Lameter   SLUB core
353

81819f0fc   Christoph Lameter   SLUB core
354
355
356
  	if (addr) {
  		p->addr = addr;
  		p->cpu = smp_processor_id();
88e4ccf29   Alexey Dobriyan   slub: current is ...
357
  		p->pid = current->pid;
81819f0fc   Christoph Lameter   SLUB core
358
359
360
361
  		p->when = jiffies;
  	} else
  		memset(p, 0, sizeof(struct track));
  }
81819f0fc   Christoph Lameter   SLUB core
362
363
  static void init_tracking(struct kmem_cache *s, void *object)
  {
249226847   Christoph Lameter   SLUB: change erro...
364
365
  	if (!(s->flags & SLAB_STORE_USER))
  		return;
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
366
367
  	set_track(s, object, TRACK_FREE, 0UL);
  	set_track(s, object, TRACK_ALLOC, 0UL);
81819f0fc   Christoph Lameter   SLUB core
368
369
370
371
372
373
  }
  
  static void print_track(const char *s, struct track *t)
  {
  	if (!t->addr)
  		return;
7daf705f3   Linus Torvalds   Start using the n...
374
375
  	printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d
  ",
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
376
  		s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
249226847   Christoph Lameter   SLUB: change erro...
377
378
379
380
381
382
383
384
385
386
387
388
389
  }
  
  static void print_tracking(struct kmem_cache *s, void *object)
  {
  	if (!(s->flags & SLAB_STORE_USER))
  		return;
  
  	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
  	print_track("Freed", get_track(s, object, TRACK_FREE));
  }
  
  static void print_page_info(struct page *page)
  {
39b264641   Christoph Lameter   slub: Store max n...
390
391
392
  	printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx
  ",
  		page, page->objects, page->inuse, page->freelist, page->flags);
249226847   Christoph Lameter   SLUB: change erro...
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
  
  }
  
  static void slab_bug(struct kmem_cache *s, char *fmt, ...)
  {
  	va_list args;
  	char buf[100];
  
  	va_start(args, fmt);
  	vsnprintf(buf, sizeof(buf), fmt, args);
  	va_end(args);
  	printk(KERN_ERR "========================================"
  			"=====================================
  ");
  	printk(KERN_ERR "BUG %s: %s
  ", s->name, buf);
  	printk(KERN_ERR "----------------------------------------"
  			"-------------------------------------
  
  ");
81819f0fc   Christoph Lameter   SLUB core
413
  }
249226847   Christoph Lameter   SLUB: change erro...
414
415
416
417
418
419
420
421
422
423
424
425
426
  static void slab_fix(struct kmem_cache *s, char *fmt, ...)
  {
  	va_list args;
  	char buf[100];
  
  	va_start(args, fmt);
  	vsnprintf(buf, sizeof(buf), fmt, args);
  	va_end(args);
  	printk(KERN_ERR "FIX %s: %s
  ", s->name, buf);
  }
  
  static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
81819f0fc   Christoph Lameter   SLUB core
427
428
  {
  	unsigned int off;	/* Offset of last byte */
a973e9dd1   Christoph Lameter   Revert "unique en...
429
  	u8 *addr = page_address(page);
249226847   Christoph Lameter   SLUB: change erro...
430
431
432
433
434
435
436
437
438
439
440
441
  
  	print_tracking(s, p);
  
  	print_page_info(page);
  
  	printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p
  
  ",
  			p, p - addr, get_freepointer(s, p));
  
  	if (p > addr + 16)
  		print_section("Bytes b4", p - 16, 16);
0ebd652b3   Pekka Enberg   slub: dump more d...
442
  	print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE));
81819f0fc   Christoph Lameter   SLUB core
443
444
445
446
  
  	if (s->flags & SLAB_RED_ZONE)
  		print_section("Redzone", p + s->objsize,
  			s->inuse - s->objsize);
81819f0fc   Christoph Lameter   SLUB core
447
448
449
450
  	if (s->offset)
  		off = s->offset + sizeof(void *);
  	else
  		off = s->inuse;
249226847   Christoph Lameter   SLUB: change erro...
451
  	if (s->flags & SLAB_STORE_USER)
81819f0fc   Christoph Lameter   SLUB core
452
  		off += 2 * sizeof(struct track);
81819f0fc   Christoph Lameter   SLUB core
453
454
455
  
  	if (off != s->size)
  		/* Beginning of the filler is the free pointer */
249226847   Christoph Lameter   SLUB: change erro...
456
457
458
  		print_section("Padding", p + off, s->size - off);
  
  	dump_stack();
81819f0fc   Christoph Lameter   SLUB core
459
460
461
462
463
  }
  
  static void object_err(struct kmem_cache *s, struct page *page,
  			u8 *object, char *reason)
  {
3dc506378   Christoph Lameter   slab_err: Pass pa...
464
  	slab_bug(s, "%s", reason);
249226847   Christoph Lameter   SLUB: change erro...
465
  	print_trailer(s, page, object);
81819f0fc   Christoph Lameter   SLUB core
466
  }
249226847   Christoph Lameter   SLUB: change erro...
467
  static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
81819f0fc   Christoph Lameter   SLUB core
468
469
470
  {
  	va_list args;
  	char buf[100];
249226847   Christoph Lameter   SLUB: change erro...
471
472
  	va_start(args, fmt);
  	vsnprintf(buf, sizeof(buf), fmt, args);
81819f0fc   Christoph Lameter   SLUB core
473
  	va_end(args);
3dc506378   Christoph Lameter   slab_err: Pass pa...
474
  	slab_bug(s, "%s", buf);
249226847   Christoph Lameter   SLUB: change erro...
475
  	print_page_info(page);
81819f0fc   Christoph Lameter   SLUB core
476
477
478
479
480
481
482
483
484
  	dump_stack();
  }
  
  static void init_object(struct kmem_cache *s, void *object, int active)
  {
  	u8 *p = object;
  
  	if (s->flags & __OBJECT_POISON) {
  		memset(p, POISON_FREE, s->objsize - 1);
064287807   Pekka Enberg   SLUB: Fix coding ...
485
  		p[s->objsize - 1] = POISON_END;
81819f0fc   Christoph Lameter   SLUB core
486
487
488
489
490
491
492
  	}
  
  	if (s->flags & SLAB_RED_ZONE)
  		memset(p + s->objsize,
  			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
  			s->inuse - s->objsize);
  }
249226847   Christoph Lameter   SLUB: change erro...
493
  static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
81819f0fc   Christoph Lameter   SLUB core
494
495
496
  {
  	while (bytes) {
  		if (*start != (u8)value)
249226847   Christoph Lameter   SLUB: change erro...
497
  			return start;
81819f0fc   Christoph Lameter   SLUB core
498
499
500
  		start++;
  		bytes--;
  	}
249226847   Christoph Lameter   SLUB: change erro...
501
502
503
504
505
506
507
508
509
510
511
512
513
  	return NULL;
  }
  
  static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
  						void *from, void *to)
  {
  	slab_fix(s, "Restoring 0x%p-0x%p=0x%x
  ", from, to - 1, data);
  	memset(from, data, to - from);
  }
  
  static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
  			u8 *object, char *what,
064287807   Pekka Enberg   SLUB: Fix coding ...
514
  			u8 *start, unsigned int value, unsigned int bytes)
249226847   Christoph Lameter   SLUB: change erro...
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
  {
  	u8 *fault;
  	u8 *end;
  
  	fault = check_bytes(start, value, bytes);
  	if (!fault)
  		return 1;
  
  	end = start + bytes;
  	while (end > fault && end[-1] == value)
  		end--;
  
  	slab_bug(s, "%s overwritten", what);
  	printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x
  ",
  					fault, end - 1, fault[0], value);
  	print_trailer(s, page, object);
  
  	restore_bytes(s, what, value, fault, end);
  	return 0;
81819f0fc   Christoph Lameter   SLUB core
535
  }
81819f0fc   Christoph Lameter   SLUB core
536
537
538
539
540
541
542
  /*
   * Object layout:
   *
   * object address
   * 	Bytes of the object to be managed.
   * 	If the freepointer may overlay the object then the free
   * 	pointer is the first word of the object.
672bba3a4   Christoph Lameter   SLUB: update comm...
543
   *
81819f0fc   Christoph Lameter   SLUB core
544
545
546
547
548
   * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
   * 	0xa5 (POISON_END)
   *
   * object + s->objsize
   * 	Padding to reach word boundary. This is also used for Redzoning.
672bba3a4   Christoph Lameter   SLUB: update comm...
549
550
551
   * 	Padding is extended by another word if Redzoning is enabled and
   * 	objsize == inuse.
   *
81819f0fc   Christoph Lameter   SLUB core
552
553
554
555
   * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
   * 	0xcc (RED_ACTIVE) for objects in use.
   *
   * object + s->inuse
672bba3a4   Christoph Lameter   SLUB: update comm...
556
557
   * 	Meta data starts here.
   *
81819f0fc   Christoph Lameter   SLUB core
558
559
   * 	A. Free pointer (if we cannot overwrite object on free)
   * 	B. Tracking data for SLAB_STORE_USER
672bba3a4   Christoph Lameter   SLUB: update comm...
560
   * 	C. Padding to reach required alignment boundary or at mininum
6446faa2f   Christoph Lameter   slub: Fix up comm...
561
   * 		one word if debugging is on to be able to detect writes
672bba3a4   Christoph Lameter   SLUB: update comm...
562
563
564
   * 		before the word boundary.
   *
   *	Padding is done using 0x5a (POISON_INUSE)
81819f0fc   Christoph Lameter   SLUB core
565
566
   *
   * object + s->size
672bba3a4   Christoph Lameter   SLUB: update comm...
567
   * 	Nothing is used beyond s->size.
81819f0fc   Christoph Lameter   SLUB core
568
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
569
570
   * If slabcaches are merged then the objsize and inuse boundaries are mostly
   * ignored. And therefore no slab options that rely on these boundaries
81819f0fc   Christoph Lameter   SLUB core
571
572
   * may be used with merged slabcaches.
   */
81819f0fc   Christoph Lameter   SLUB core
573
574
575
576
577
578
579
580
581
582
583
584
585
586
  static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
  {
  	unsigned long off = s->inuse;	/* The end of info */
  
  	if (s->offset)
  		/* Freepointer is placed after the object. */
  		off += sizeof(void *);
  
  	if (s->flags & SLAB_STORE_USER)
  		/* We also have user information there */
  		off += 2 * sizeof(struct track);
  
  	if (s->size == off)
  		return 1;
249226847   Christoph Lameter   SLUB: change erro...
587
588
  	return check_bytes_and_report(s, page, p, "Object padding",
  				p + off, POISON_INUSE, s->size - off);
81819f0fc   Christoph Lameter   SLUB core
589
  }
39b264641   Christoph Lameter   slub: Store max n...
590
  /* Check the pad bytes at the end of a slab page */
81819f0fc   Christoph Lameter   SLUB core
591
592
  static int slab_pad_check(struct kmem_cache *s, struct page *page)
  {
249226847   Christoph Lameter   SLUB: change erro...
593
594
595
596
597
  	u8 *start;
  	u8 *fault;
  	u8 *end;
  	int length;
  	int remainder;
81819f0fc   Christoph Lameter   SLUB core
598
599
600
  
  	if (!(s->flags & SLAB_POISON))
  		return 1;
a973e9dd1   Christoph Lameter   Revert "unique en...
601
  	start = page_address(page);
834f3d119   Christoph Lameter   slub: Add kmem_ca...
602
  	length = (PAGE_SIZE << compound_order(page));
39b264641   Christoph Lameter   slub: Store max n...
603
604
  	end = start + length;
  	remainder = length % s->size;
81819f0fc   Christoph Lameter   SLUB core
605
606
  	if (!remainder)
  		return 1;
39b264641   Christoph Lameter   slub: Store max n...
607
  	fault = check_bytes(end - remainder, POISON_INUSE, remainder);
249226847   Christoph Lameter   SLUB: change erro...
608
609
610
611
612
613
  	if (!fault)
  		return 1;
  	while (end > fault && end[-1] == POISON_INUSE)
  		end--;
  
  	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
39b264641   Christoph Lameter   slub: Store max n...
614
  	print_section("Padding", end - remainder, remainder);
249226847   Christoph Lameter   SLUB: change erro...
615

8a3d271de   Eric Dumazet   slub: fix slab_pa...
616
  	restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
249226847   Christoph Lameter   SLUB: change erro...
617
  	return 0;
81819f0fc   Christoph Lameter   SLUB core
618
619
620
621
622
623
624
625
626
627
628
  }
  
  static int check_object(struct kmem_cache *s, struct page *page,
  					void *object, int active)
  {
  	u8 *p = object;
  	u8 *endobject = object + s->objsize;
  
  	if (s->flags & SLAB_RED_ZONE) {
  		unsigned int red =
  			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
249226847   Christoph Lameter   SLUB: change erro...
629
630
  		if (!check_bytes_and_report(s, page, object, "Redzone",
  			endobject, red, s->inuse - s->objsize))
81819f0fc   Christoph Lameter   SLUB core
631
  			return 0;
81819f0fc   Christoph Lameter   SLUB core
632
  	} else {
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
633
634
635
636
  		if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
  			check_bytes_and_report(s, page, p, "Alignment padding",
  				endobject, POISON_INUSE, s->inuse - s->objsize);
  		}
81819f0fc   Christoph Lameter   SLUB core
637
638
639
640
  	}
  
  	if (s->flags & SLAB_POISON) {
  		if (!active && (s->flags & __OBJECT_POISON) &&
249226847   Christoph Lameter   SLUB: change erro...
641
642
643
  			(!check_bytes_and_report(s, page, p, "Poison", p,
  					POISON_FREE, s->objsize - 1) ||
  			 !check_bytes_and_report(s, page, p, "Poison",
064287807   Pekka Enberg   SLUB: Fix coding ...
644
  				p + s->objsize - 1, POISON_END, 1)))
81819f0fc   Christoph Lameter   SLUB core
645
  			return 0;
81819f0fc   Christoph Lameter   SLUB core
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
  		/*
  		 * check_pad_bytes cleans up on its own.
  		 */
  		check_pad_bytes(s, page, p);
  	}
  
  	if (!s->offset && active)
  		/*
  		 * Object and freepointer overlap. Cannot check
  		 * freepointer while object is allocated.
  		 */
  		return 1;
  
  	/* Check free pointer validity */
  	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
  		object_err(s, page, p, "Freepointer corrupt");
  		/*
9f6c708e5   Nick Andrew   slub: Fix incorre...
663
  		 * No choice but to zap it and thus lose the remainder
81819f0fc   Christoph Lameter   SLUB core
664
  		 * of the free objects in this slab. May cause
672bba3a4   Christoph Lameter   SLUB: update comm...
665
  		 * another error because the object count is now wrong.
81819f0fc   Christoph Lameter   SLUB core
666
  		 */
a973e9dd1   Christoph Lameter   Revert "unique en...
667
  		set_freepointer(s, p, NULL);
81819f0fc   Christoph Lameter   SLUB core
668
669
670
671
672
673
674
  		return 0;
  	}
  	return 1;
  }
  
  static int check_slab(struct kmem_cache *s, struct page *page)
  {
39b264641   Christoph Lameter   slub: Store max n...
675
  	int maxobj;
81819f0fc   Christoph Lameter   SLUB core
676
677
678
  	VM_BUG_ON(!irqs_disabled());
  
  	if (!PageSlab(page)) {
249226847   Christoph Lameter   SLUB: change erro...
679
  		slab_err(s, page, "Not a valid slab page");
81819f0fc   Christoph Lameter   SLUB core
680
681
  		return 0;
  	}
39b264641   Christoph Lameter   slub: Store max n...
682
683
684
685
686
687
688
689
  
  	maxobj = (PAGE_SIZE << compound_order(page)) / s->size;
  	if (page->objects > maxobj) {
  		slab_err(s, page, "objects %u > max %u",
  			s->name, page->objects, maxobj);
  		return 0;
  	}
  	if (page->inuse > page->objects) {
249226847   Christoph Lameter   SLUB: change erro...
690
  		slab_err(s, page, "inuse %u > max %u",
39b264641   Christoph Lameter   slub: Store max n...
691
  			s->name, page->inuse, page->objects);
81819f0fc   Christoph Lameter   SLUB core
692
693
694
695
696
697
698
699
  		return 0;
  	}
  	/* Slab_pad_check fixes things up after itself */
  	slab_pad_check(s, page);
  	return 1;
  }
  
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
700
701
   * Determine if a certain object on a page is on the freelist. Must hold the
   * slab lock to guarantee that the chains are in a consistent state.
81819f0fc   Christoph Lameter   SLUB core
702
703
704
705
706
707
   */
  static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
  {
  	int nr = 0;
  	void *fp = page->freelist;
  	void *object = NULL;
224a88be4   Christoph Lameter   slub: for_each_ob...
708
  	unsigned long max_objects;
81819f0fc   Christoph Lameter   SLUB core
709

39b264641   Christoph Lameter   slub: Store max n...
710
  	while (fp && nr <= page->objects) {
81819f0fc   Christoph Lameter   SLUB core
711
712
713
714
715
716
  		if (fp == search)
  			return 1;
  		if (!check_valid_pointer(s, page, fp)) {
  			if (object) {
  				object_err(s, page, object,
  					"Freechain corrupt");
a973e9dd1   Christoph Lameter   Revert "unique en...
717
  				set_freepointer(s, object, NULL);
81819f0fc   Christoph Lameter   SLUB core
718
719
  				break;
  			} else {
249226847   Christoph Lameter   SLUB: change erro...
720
  				slab_err(s, page, "Freepointer corrupt");
a973e9dd1   Christoph Lameter   Revert "unique en...
721
  				page->freelist = NULL;
39b264641   Christoph Lameter   slub: Store max n...
722
  				page->inuse = page->objects;
249226847   Christoph Lameter   SLUB: change erro...
723
  				slab_fix(s, "Freelist cleared");
81819f0fc   Christoph Lameter   SLUB core
724
725
726
727
728
729
730
731
  				return 0;
  			}
  			break;
  		}
  		object = fp;
  		fp = get_freepointer(s, object);
  		nr++;
  	}
224a88be4   Christoph Lameter   slub: for_each_ob...
732
  	max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
733
734
  	if (max_objects > MAX_OBJS_PER_PAGE)
  		max_objects = MAX_OBJS_PER_PAGE;
224a88be4   Christoph Lameter   slub: for_each_ob...
735
736
737
738
739
740
741
  
  	if (page->objects != max_objects) {
  		slab_err(s, page, "Wrong number of objects. Found %d but "
  			"should be %d", page->objects, max_objects);
  		page->objects = max_objects;
  		slab_fix(s, "Number of objects adjusted.");
  	}
39b264641   Christoph Lameter   slub: Store max n...
742
  	if (page->inuse != page->objects - nr) {
70d71228a   Christoph Lameter   slub: remove obje...
743
  		slab_err(s, page, "Wrong object count. Counter is %d but "
39b264641   Christoph Lameter   slub: Store max n...
744
745
  			"counted were %d", page->inuse, page->objects - nr);
  		page->inuse = page->objects - nr;
249226847   Christoph Lameter   SLUB: change erro...
746
  		slab_fix(s, "Object count adjusted.");
81819f0fc   Christoph Lameter   SLUB core
747
748
749
  	}
  	return search == NULL;
  }
0121c619d   Christoph Lameter   slub: Whitespace ...
750
751
  static void trace(struct kmem_cache *s, struct page *page, void *object,
  								int alloc)
3ec097421   Christoph Lameter   SLUB: Simplify de...
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
  {
  	if (s->flags & SLAB_TRACE) {
  		printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p
  ",
  			s->name,
  			alloc ? "alloc" : "free",
  			object, page->inuse,
  			page->freelist);
  
  		if (!alloc)
  			print_section("Object", (void *)object, s->objsize);
  
  		dump_stack();
  	}
  }
643b11384   Christoph Lameter   slub: enable trac...
767
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
768
   * Tracking of fully allocated slabs for debugging purposes.
643b11384   Christoph Lameter   slub: enable trac...
769
   */
e95eed571   Christoph Lameter   SLUB: Add MIN_PAR...
770
  static void add_full(struct kmem_cache_node *n, struct page *page)
643b11384   Christoph Lameter   slub: enable trac...
771
  {
643b11384   Christoph Lameter   slub: enable trac...
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
  	spin_lock(&n->list_lock);
  	list_add(&page->lru, &n->full);
  	spin_unlock(&n->list_lock);
  }
  
  static void remove_full(struct kmem_cache *s, struct page *page)
  {
  	struct kmem_cache_node *n;
  
  	if (!(s->flags & SLAB_STORE_USER))
  		return;
  
  	n = get_node(s, page_to_nid(page));
  
  	spin_lock(&n->list_lock);
  	list_del(&page->lru);
  	spin_unlock(&n->list_lock);
  }
0f389ec63   Christoph Lameter   slub: No need for...
790
791
792
793
794
795
796
  /* Tracking of the number of slabs for debugging purposes */
  static inline unsigned long slabs_node(struct kmem_cache *s, int node)
  {
  	struct kmem_cache_node *n = get_node(s, node);
  
  	return atomic_long_read(&n->nr_slabs);
  }
26c02cf05   Alexander Beregalov   SLUB: fix build w...
797
798
799
800
  static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
  {
  	return atomic_long_read(&n->nr_slabs);
  }
205ab99dd   Christoph Lameter   slub: Update stat...
801
  static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec63   Christoph Lameter   slub: No need for...
802
803
804
805
806
807
808
809
810
  {
  	struct kmem_cache_node *n = get_node(s, node);
  
  	/*
  	 * May be called early in order to allocate a slab for the
  	 * kmem_cache_node structure. Solve the chicken-egg
  	 * dilemma by deferring the increment of the count during
  	 * bootstrap (see early_kmem_cache_node_alloc).
  	 */
205ab99dd   Christoph Lameter   slub: Update stat...
811
  	if (!NUMA_BUILD || n) {
0f389ec63   Christoph Lameter   slub: No need for...
812
  		atomic_long_inc(&n->nr_slabs);
205ab99dd   Christoph Lameter   slub: Update stat...
813
814
  		atomic_long_add(objects, &n->total_objects);
  	}
0f389ec63   Christoph Lameter   slub: No need for...
815
  }
205ab99dd   Christoph Lameter   slub: Update stat...
816
  static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec63   Christoph Lameter   slub: No need for...
817
818
819
820
  {
  	struct kmem_cache_node *n = get_node(s, node);
  
  	atomic_long_dec(&n->nr_slabs);
205ab99dd   Christoph Lameter   slub: Update stat...
821
  	atomic_long_sub(objects, &n->total_objects);
0f389ec63   Christoph Lameter   slub: No need for...
822
823
824
  }
  
  /* Object debug checks for alloc/free paths */
3ec097421   Christoph Lameter   SLUB: Simplify de...
825
826
827
828
829
830
831
832
833
834
835
  static void setup_object_debug(struct kmem_cache *s, struct page *page,
  								void *object)
  {
  	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
  		return;
  
  	init_object(s, object, 0);
  	init_tracking(s, object);
  }
  
  static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
836
  					void *object, unsigned long addr)
81819f0fc   Christoph Lameter   SLUB core
837
838
839
  {
  	if (!check_slab(s, page))
  		goto bad;
d692ef6dc   Christoph Lameter   slub: Remove usel...
840
  	if (!on_freelist(s, page, object)) {
249226847   Christoph Lameter   SLUB: change erro...
841
  		object_err(s, page, object, "Object already allocated");
70d71228a   Christoph Lameter   slub: remove obje...
842
  		goto bad;
81819f0fc   Christoph Lameter   SLUB core
843
844
845
846
  	}
  
  	if (!check_valid_pointer(s, page, object)) {
  		object_err(s, page, object, "Freelist Pointer check fails");
70d71228a   Christoph Lameter   slub: remove obje...
847
  		goto bad;
81819f0fc   Christoph Lameter   SLUB core
848
  	}
d692ef6dc   Christoph Lameter   slub: Remove usel...
849
  	if (!check_object(s, page, object, 0))
81819f0fc   Christoph Lameter   SLUB core
850
  		goto bad;
81819f0fc   Christoph Lameter   SLUB core
851

3ec097421   Christoph Lameter   SLUB: Simplify de...
852
853
854
855
856
  	/* Success perform special debug activities for allocs */
  	if (s->flags & SLAB_STORE_USER)
  		set_track(s, object, TRACK_ALLOC, addr);
  	trace(s, page, object, 1);
  	init_object(s, object, 1);
81819f0fc   Christoph Lameter   SLUB core
857
  	return 1;
3ec097421   Christoph Lameter   SLUB: Simplify de...
858

81819f0fc   Christoph Lameter   SLUB core
859
860
861
862
863
  bad:
  	if (PageSlab(page)) {
  		/*
  		 * If this is a slab page then lets do the best we can
  		 * to avoid issues in the future. Marking all objects
672bba3a4   Christoph Lameter   SLUB: update comm...
864
  		 * as used avoids touching the remaining objects.
81819f0fc   Christoph Lameter   SLUB core
865
  		 */
249226847   Christoph Lameter   SLUB: change erro...
866
  		slab_fix(s, "Marking all objects used");
39b264641   Christoph Lameter   slub: Store max n...
867
  		page->inuse = page->objects;
a973e9dd1   Christoph Lameter   Revert "unique en...
868
  		page->freelist = NULL;
81819f0fc   Christoph Lameter   SLUB core
869
870
871
  	}
  	return 0;
  }
3ec097421   Christoph Lameter   SLUB: Simplify de...
872
  static int free_debug_processing(struct kmem_cache *s, struct page *page,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
873
  					void *object, unsigned long addr)
81819f0fc   Christoph Lameter   SLUB core
874
875
876
877
878
  {
  	if (!check_slab(s, page))
  		goto fail;
  
  	if (!check_valid_pointer(s, page, object)) {
70d71228a   Christoph Lameter   slub: remove obje...
879
  		slab_err(s, page, "Invalid object pointer 0x%p", object);
81819f0fc   Christoph Lameter   SLUB core
880
881
882
883
  		goto fail;
  	}
  
  	if (on_freelist(s, page, object)) {
249226847   Christoph Lameter   SLUB: change erro...
884
  		object_err(s, page, object, "Object already free");
81819f0fc   Christoph Lameter   SLUB core
885
886
887
888
889
890
891
  		goto fail;
  	}
  
  	if (!check_object(s, page, object, 1))
  		return 0;
  
  	if (unlikely(s != page->slab)) {
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
892
  		if (!PageSlab(page)) {
70d71228a   Christoph Lameter   slub: remove obje...
893
894
  			slab_err(s, page, "Attempt to free object(0x%p) "
  				"outside of slab", object);
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
895
  		} else if (!page->slab) {
81819f0fc   Christoph Lameter   SLUB core
896
  			printk(KERN_ERR
70d71228a   Christoph Lameter   slub: remove obje...
897
898
  				"SLUB <none>: no slab for object 0x%p.
  ",
81819f0fc   Christoph Lameter   SLUB core
899
  						object);
70d71228a   Christoph Lameter   slub: remove obje...
900
  			dump_stack();
064287807   Pekka Enberg   SLUB: Fix coding ...
901
  		} else
249226847   Christoph Lameter   SLUB: change erro...
902
903
  			object_err(s, page, object,
  					"page slab pointer corrupt.");
81819f0fc   Christoph Lameter   SLUB core
904
905
  		goto fail;
  	}
3ec097421   Christoph Lameter   SLUB: Simplify de...
906
907
  
  	/* Special debug activities for freeing objects */
8a38082d2   Andy Whitcroft   slub: record page...
908
  	if (!PageSlubFrozen(page) && !page->freelist)
3ec097421   Christoph Lameter   SLUB: Simplify de...
909
910
911
912
913
  		remove_full(s, page);
  	if (s->flags & SLAB_STORE_USER)
  		set_track(s, object, TRACK_FREE, addr);
  	trace(s, page, object, 0);
  	init_object(s, object, 0);
81819f0fc   Christoph Lameter   SLUB core
914
  	return 1;
3ec097421   Christoph Lameter   SLUB: Simplify de...
915

81819f0fc   Christoph Lameter   SLUB core
916
  fail:
249226847   Christoph Lameter   SLUB: change erro...
917
  	slab_fix(s, "Object at 0x%p not freed", object);
81819f0fc   Christoph Lameter   SLUB core
918
919
  	return 0;
  }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
920
921
  static int __init setup_slub_debug(char *str)
  {
f0630fff5   Christoph Lameter   SLUB: support slu...
922
923
924
925
926
927
928
929
930
931
932
933
934
  	slub_debug = DEBUG_DEFAULT_FLAGS;
  	if (*str++ != '=' || !*str)
  		/*
  		 * No options specified. Switch on full debugging.
  		 */
  		goto out;
  
  	if (*str == ',')
  		/*
  		 * No options but restriction on slabs. This means full
  		 * debugging for slabs matching a pattern.
  		 */
  		goto check_slabs;
fa5ec8a1f   David Rientjes   slub: add option ...
935
936
937
938
939
940
941
942
  	if (tolower(*str) == 'o') {
  		/*
  		 * Avoid enabling debugging on caches if its minimum order
  		 * would increase as a result.
  		 */
  		disable_higher_order_debug = 1;
  		goto out;
  	}
f0630fff5   Christoph Lameter   SLUB: support slu...
943
944
945
946
947
948
949
950
951
952
  	slub_debug = 0;
  	if (*str == '-')
  		/*
  		 * Switch off all debugging measures.
  		 */
  		goto out;
  
  	/*
  	 * Determine which debug features should be switched on
  	 */
064287807   Pekka Enberg   SLUB: Fix coding ...
953
  	for (; *str && *str != ','; str++) {
f0630fff5   Christoph Lameter   SLUB: support slu...
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
  		switch (tolower(*str)) {
  		case 'f':
  			slub_debug |= SLAB_DEBUG_FREE;
  			break;
  		case 'z':
  			slub_debug |= SLAB_RED_ZONE;
  			break;
  		case 'p':
  			slub_debug |= SLAB_POISON;
  			break;
  		case 'u':
  			slub_debug |= SLAB_STORE_USER;
  			break;
  		case 't':
  			slub_debug |= SLAB_TRACE;
  			break;
4c13dd3b4   Dmitry Monakhov   failslab: add abi...
970
971
972
  		case 'a':
  			slub_debug |= SLAB_FAILSLAB;
  			break;
f0630fff5   Christoph Lameter   SLUB: support slu...
973
974
  		default:
  			printk(KERN_ERR "slub_debug option '%c' "
064287807   Pekka Enberg   SLUB: Fix coding ...
975
976
  				"unknown. skipped
  ", *str);
f0630fff5   Christoph Lameter   SLUB: support slu...
977
  		}
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
978
  	}
f0630fff5   Christoph Lameter   SLUB: support slu...
979
  check_slabs:
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
980
981
  	if (*str == ',')
  		slub_debug_slabs = str + 1;
f0630fff5   Christoph Lameter   SLUB: support slu...
982
  out:
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
983
984
985
986
  	return 1;
  }
  
  __setup("slub_debug", setup_slub_debug);
ba0268a8b   Christoph Lameter   SLUB: accurately ...
987
988
  static unsigned long kmem_cache_flags(unsigned long objsize,
  	unsigned long flags, const char *name,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
989
  	void (*ctor)(void *))
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
990
991
  {
  	/*
e153362a5   Christoph Lameter   slub: Remove objs...
992
  	 * Enable debugging if selected on the kernel commandline.
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
993
  	 */
e153362a5   Christoph Lameter   slub: Remove objs...
994
  	if (slub_debug && (!slub_debug_slabs ||
3de472138   David Rientjes   slub: use size an...
995
996
  		!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
  		flags |= slub_debug;
ba0268a8b   Christoph Lameter   SLUB: accurately ...
997
998
  
  	return flags;
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
999
1000
  }
  #else
3ec097421   Christoph Lameter   SLUB: Simplify de...
1001
1002
  static inline void setup_object_debug(struct kmem_cache *s,
  			struct page *page, void *object) {}
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1003

3ec097421   Christoph Lameter   SLUB: Simplify de...
1004
  static inline int alloc_debug_processing(struct kmem_cache *s,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
1005
  	struct page *page, void *object, unsigned long addr) { return 0; }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1006

3ec097421   Christoph Lameter   SLUB: Simplify de...
1007
  static inline int free_debug_processing(struct kmem_cache *s,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
1008
  	struct page *page, void *object, unsigned long addr) { return 0; }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1009

41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1010
1011
1012
1013
  static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
  			{ return 1; }
  static inline int check_object(struct kmem_cache *s, struct page *page,
  			void *object, int active) { return 1; }
3ec097421   Christoph Lameter   SLUB: Simplify de...
1014
  static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
ba0268a8b   Christoph Lameter   SLUB: accurately ...
1015
1016
  static inline unsigned long kmem_cache_flags(unsigned long objsize,
  	unsigned long flags, const char *name,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
1017
  	void (*ctor)(void *))
ba0268a8b   Christoph Lameter   SLUB: accurately ...
1018
1019
1020
  {
  	return flags;
  }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1021
  #define slub_debug 0
0f389ec63   Christoph Lameter   slub: No need for...
1022

fdaa45e95   Ingo Molnar   slub: Fix build e...
1023
  #define disable_higher_order_debug 0
0f389ec63   Christoph Lameter   slub: No need for...
1024
1025
  static inline unsigned long slabs_node(struct kmem_cache *s, int node)
  							{ return 0; }
26c02cf05   Alexander Beregalov   SLUB: fix build w...
1026
1027
  static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
  							{ return 0; }
205ab99dd   Christoph Lameter   slub: Update stat...
1028
1029
1030
1031
  static inline void inc_slabs_node(struct kmem_cache *s, int node,
  							int objects) {}
  static inline void dec_slabs_node(struct kmem_cache *s, int node,
  							int objects) {}
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1032
  #endif
205ab99dd   Christoph Lameter   slub: Update stat...
1033

81819f0fc   Christoph Lameter   SLUB core
1034
1035
1036
  /*
   * Slab allocation and freeing
   */
65c3376aa   Christoph Lameter   slub: Fallback to...
1037
1038
1039
1040
  static inline struct page *alloc_slab_page(gfp_t flags, int node,
  					struct kmem_cache_order_objects oo)
  {
  	int order = oo_order(oo);
b1eeab676   Vegard Nossum   kmemcheck: add ho...
1041
  	flags |= __GFP_NOTRACK;
65c3376aa   Christoph Lameter   slub: Fallback to...
1042
1043
1044
  	if (node == -1)
  		return alloc_pages(flags, order);
  	else
6b65aaf30   Minchan Kim   slub: Use alloc_p...
1045
  		return alloc_pages_exact_node(node, flags, order);
65c3376aa   Christoph Lameter   slub: Fallback to...
1046
  }
81819f0fc   Christoph Lameter   SLUB core
1047
1048
  static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
  {
064287807   Pekka Enberg   SLUB: Fix coding ...
1049
  	struct page *page;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
1050
  	struct kmem_cache_order_objects oo = s->oo;
ba52270d1   Pekka Enberg   SLUB: Don't pass ...
1051
  	gfp_t alloc_gfp;
81819f0fc   Christoph Lameter   SLUB core
1052

b7a49f0d4   Christoph Lameter   slub: Determine g...
1053
  	flags |= s->allocflags;
e12ba74d8   Mel Gorman   Group short-lived...
1054

ba52270d1   Pekka Enberg   SLUB: Don't pass ...
1055
1056
1057
1058
1059
1060
1061
  	/*
  	 * Let the initial higher-order allocation fail under memory pressure
  	 * so we fall-back to the minimum order allocation.
  	 */
  	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
  
  	page = alloc_slab_page(alloc_gfp, node, oo);
65c3376aa   Christoph Lameter   slub: Fallback to...
1062
1063
1064
1065
1066
1067
1068
1069
1070
  	if (unlikely(!page)) {
  		oo = s->min;
  		/*
  		 * Allocation may have failed due to fragmentation.
  		 * Try a lower order alloc if possible
  		 */
  		page = alloc_slab_page(flags, node, oo);
  		if (!page)
  			return NULL;
81819f0fc   Christoph Lameter   SLUB core
1071

84e554e68   Christoph Lameter   SLUB: Make slub s...
1072
  		stat(s, ORDER_FALLBACK);
65c3376aa   Christoph Lameter   slub: Fallback to...
1073
  	}
5a896d9e7   Vegard Nossum   slub: add hooks f...
1074
1075
  
  	if (kmemcheck_enabled
5086c389c   Amerigo Wang   SLUB: Fix some co...
1076
  		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
b1eeab676   Vegard Nossum   kmemcheck: add ho...
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
  		int pages = 1 << oo_order(oo);
  
  		kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
  
  		/*
  		 * Objects from caches that have a constructor don't get
  		 * cleared when they're allocated, so we need to do it here.
  		 */
  		if (s->ctor)
  			kmemcheck_mark_uninitialized_pages(page, pages);
  		else
  			kmemcheck_mark_unallocated_pages(page, pages);
5a896d9e7   Vegard Nossum   slub: add hooks f...
1089
  	}
834f3d119   Christoph Lameter   slub: Add kmem_ca...
1090
  	page->objects = oo_objects(oo);
81819f0fc   Christoph Lameter   SLUB core
1091
1092
1093
  	mod_zone_page_state(page_zone(page),
  		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
  		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
65c3376aa   Christoph Lameter   slub: Fallback to...
1094
  		1 << oo_order(oo));
81819f0fc   Christoph Lameter   SLUB core
1095
1096
1097
1098
1099
1100
1101
  
  	return page;
  }
  
  static void setup_object(struct kmem_cache *s, struct page *page,
  				void *object)
  {
3ec097421   Christoph Lameter   SLUB: Simplify de...
1102
  	setup_object_debug(s, page, object);
4f1049345   Christoph Lameter   slab allocators: ...
1103
  	if (unlikely(s->ctor))
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
1104
  		s->ctor(object);
81819f0fc   Christoph Lameter   SLUB core
1105
1106
1107
1108
1109
  }
  
  static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
  {
  	struct page *page;
81819f0fc   Christoph Lameter   SLUB core
1110
  	void *start;
81819f0fc   Christoph Lameter   SLUB core
1111
1112
  	void *last;
  	void *p;
6cb062296   Christoph Lameter   Categorize GFP flags
1113
  	BUG_ON(flags & GFP_SLAB_BUG_MASK);
81819f0fc   Christoph Lameter   SLUB core
1114

6cb062296   Christoph Lameter   Categorize GFP flags
1115
1116
  	page = allocate_slab(s,
  		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
81819f0fc   Christoph Lameter   SLUB core
1117
1118
  	if (!page)
  		goto out;
205ab99dd   Christoph Lameter   slub: Update stat...
1119
  	inc_slabs_node(s, page_to_nid(page), page->objects);
81819f0fc   Christoph Lameter   SLUB core
1120
1121
1122
1123
  	page->slab = s;
  	page->flags |= 1 << PG_slab;
  	if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
  			SLAB_STORE_USER | SLAB_TRACE))
8a38082d2   Andy Whitcroft   slub: record page...
1124
  		__SetPageSlubDebug(page);
81819f0fc   Christoph Lameter   SLUB core
1125
1126
  
  	start = page_address(page);
81819f0fc   Christoph Lameter   SLUB core
1127
1128
  
  	if (unlikely(s->flags & SLAB_POISON))
834f3d119   Christoph Lameter   slub: Add kmem_ca...
1129
  		memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
81819f0fc   Christoph Lameter   SLUB core
1130
1131
  
  	last = start;
224a88be4   Christoph Lameter   slub: for_each_ob...
1132
  	for_each_object(p, s, start, page->objects) {
81819f0fc   Christoph Lameter   SLUB core
1133
1134
1135
1136
1137
  		setup_object(s, page, last);
  		set_freepointer(s, last, p);
  		last = p;
  	}
  	setup_object(s, page, last);
a973e9dd1   Christoph Lameter   Revert "unique en...
1138
  	set_freepointer(s, last, NULL);
81819f0fc   Christoph Lameter   SLUB core
1139
1140
1141
1142
  
  	page->freelist = start;
  	page->inuse = 0;
  out:
81819f0fc   Christoph Lameter   SLUB core
1143
1144
1145
1146
1147
  	return page;
  }
  
  static void __free_slab(struct kmem_cache *s, struct page *page)
  {
834f3d119   Christoph Lameter   slub: Add kmem_ca...
1148
1149
  	int order = compound_order(page);
  	int pages = 1 << order;
81819f0fc   Christoph Lameter   SLUB core
1150

8a38082d2   Andy Whitcroft   slub: record page...
1151
  	if (unlikely(SLABDEBUG && PageSlubDebug(page))) {
81819f0fc   Christoph Lameter   SLUB core
1152
1153
1154
  		void *p;
  
  		slab_pad_check(s, page);
224a88be4   Christoph Lameter   slub: for_each_ob...
1155
1156
  		for_each_object(p, s, page_address(page),
  						page->objects)
81819f0fc   Christoph Lameter   SLUB core
1157
  			check_object(s, page, p, 0);
8a38082d2   Andy Whitcroft   slub: record page...
1158
  		__ClearPageSlubDebug(page);
81819f0fc   Christoph Lameter   SLUB core
1159
  	}
b1eeab676   Vegard Nossum   kmemcheck: add ho...
1160
  	kmemcheck_free_shadow(page, compound_order(page));
5a896d9e7   Vegard Nossum   slub: add hooks f...
1161

81819f0fc   Christoph Lameter   SLUB core
1162
1163
1164
  	mod_zone_page_state(page_zone(page),
  		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
  		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
064287807   Pekka Enberg   SLUB: Fix coding ...
1165
  		-pages);
81819f0fc   Christoph Lameter   SLUB core
1166

49bd5221c   Christoph Lameter   slub: Move map/fl...
1167
1168
  	__ClearPageSlab(page);
  	reset_page_mapcount(page);
1eb5ac646   Nick Piggin   mm: SLUB fix recl...
1169
1170
  	if (current->reclaim_state)
  		current->reclaim_state->reclaimed_slab += pages;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
1171
  	__free_pages(page, order);
81819f0fc   Christoph Lameter   SLUB core
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
  }
  
  static void rcu_free_slab(struct rcu_head *h)
  {
  	struct page *page;
  
  	page = container_of((struct list_head *)h, struct page, lru);
  	__free_slab(page->slab, page);
  }
  
  static void free_slab(struct kmem_cache *s, struct page *page)
  {
  	if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
  		/*
  		 * RCU free overloads the RCU head over the LRU
  		 */
  		struct rcu_head *head = (void *)&page->lru;
  
  		call_rcu(head, rcu_free_slab);
  	} else
  		__free_slab(s, page);
  }
  
  static void discard_slab(struct kmem_cache *s, struct page *page)
  {
205ab99dd   Christoph Lameter   slub: Update stat...
1197
  	dec_slabs_node(s, page_to_nid(page), page->objects);
81819f0fc   Christoph Lameter   SLUB core
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
  	free_slab(s, page);
  }
  
  /*
   * Per slab locking using the pagelock
   */
  static __always_inline void slab_lock(struct page *page)
  {
  	bit_spin_lock(PG_locked, &page->flags);
  }
  
  static __always_inline void slab_unlock(struct page *page)
  {
a76d35462   Nick Piggin   Use non atomic un...
1211
  	__bit_spin_unlock(PG_locked, &page->flags);
81819f0fc   Christoph Lameter   SLUB core
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
  }
  
  static __always_inline int slab_trylock(struct page *page)
  {
  	int rc = 1;
  
  	rc = bit_spin_trylock(PG_locked, &page->flags);
  	return rc;
  }
  
  /*
   * Management of partially allocated slabs
   */
7c2e132c5   Christoph Lameter   Add parameter to ...
1225
1226
  static void add_partial(struct kmem_cache_node *n,
  				struct page *page, int tail)
81819f0fc   Christoph Lameter   SLUB core
1227
  {
e95eed571   Christoph Lameter   SLUB: Add MIN_PAR...
1228
1229
  	spin_lock(&n->list_lock);
  	n->nr_partial++;
7c2e132c5   Christoph Lameter   Add parameter to ...
1230
1231
1232
1233
  	if (tail)
  		list_add_tail(&page->lru, &n->partial);
  	else
  		list_add(&page->lru, &n->partial);
81819f0fc   Christoph Lameter   SLUB core
1234
1235
  	spin_unlock(&n->list_lock);
  }
0121c619d   Christoph Lameter   slub: Whitespace ...
1236
  static void remove_partial(struct kmem_cache *s, struct page *page)
81819f0fc   Christoph Lameter   SLUB core
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
  {
  	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
  
  	spin_lock(&n->list_lock);
  	list_del(&page->lru);
  	n->nr_partial--;
  	spin_unlock(&n->list_lock);
  }
  
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
1247
   * Lock slab and remove from the partial list.
81819f0fc   Christoph Lameter   SLUB core
1248
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
1249
   * Must hold list_lock.
81819f0fc   Christoph Lameter   SLUB core
1250
   */
0121c619d   Christoph Lameter   slub: Whitespace ...
1251
1252
  static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
  							struct page *page)
81819f0fc   Christoph Lameter   SLUB core
1253
1254
1255
1256
  {
  	if (slab_trylock(page)) {
  		list_del(&page->lru);
  		n->nr_partial--;
8a38082d2   Andy Whitcroft   slub: record page...
1257
  		__SetPageSlubFrozen(page);
81819f0fc   Christoph Lameter   SLUB core
1258
1259
1260
1261
1262
1263
  		return 1;
  	}
  	return 0;
  }
  
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
1264
   * Try to allocate a partial slab from a specific node.
81819f0fc   Christoph Lameter   SLUB core
1265
1266
1267
1268
1269
1270
1271
1272
   */
  static struct page *get_partial_node(struct kmem_cache_node *n)
  {
  	struct page *page;
  
  	/*
  	 * Racy check. If we mistakenly see no partial slabs then we
  	 * just allocate an empty slab. If we mistakenly try to get a
672bba3a4   Christoph Lameter   SLUB: update comm...
1273
1274
  	 * partial slab and there is none available then get_partials()
  	 * will return NULL.
81819f0fc   Christoph Lameter   SLUB core
1275
1276
1277
1278
1279
1280
  	 */
  	if (!n || !n->nr_partial)
  		return NULL;
  
  	spin_lock(&n->list_lock);
  	list_for_each_entry(page, &n->partial, lru)
4b6f07504   Christoph Lameter   SLUB: Define func...
1281
  		if (lock_and_freeze_slab(n, page))
81819f0fc   Christoph Lameter   SLUB core
1282
1283
1284
1285
1286
1287
1288
1289
  			goto out;
  	page = NULL;
  out:
  	spin_unlock(&n->list_lock);
  	return page;
  }
  
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
1290
   * Get a page from somewhere. Search in increasing NUMA distances.
81819f0fc   Christoph Lameter   SLUB core
1291
1292
1293
1294
1295
   */
  static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
  {
  #ifdef CONFIG_NUMA
  	struct zonelist *zonelist;
dd1a239f6   Mel Gorman   mm: have zonelist...
1296
  	struct zoneref *z;
54a6eb5c4   Mel Gorman   mm: use two zonel...
1297
1298
  	struct zone *zone;
  	enum zone_type high_zoneidx = gfp_zone(flags);
81819f0fc   Christoph Lameter   SLUB core
1299
1300
1301
  	struct page *page;
  
  	/*
672bba3a4   Christoph Lameter   SLUB: update comm...
1302
1303
1304
1305
  	 * The defrag ratio allows a configuration of the tradeoffs between
  	 * inter node defragmentation and node local allocations. A lower
  	 * defrag_ratio increases the tendency to do local allocations
  	 * instead of attempting to obtain partial slabs from other nodes.
81819f0fc   Christoph Lameter   SLUB core
1306
  	 *
672bba3a4   Christoph Lameter   SLUB: update comm...
1307
1308
1309
1310
  	 * If the defrag_ratio is set to 0 then kmalloc() always
  	 * returns node local objects. If the ratio is higher then kmalloc()
  	 * may return off node objects because partial slabs are obtained
  	 * from other nodes and filled up.
81819f0fc   Christoph Lameter   SLUB core
1311
  	 *
6446faa2f   Christoph Lameter   slub: Fix up comm...
1312
  	 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
672bba3a4   Christoph Lameter   SLUB: update comm...
1313
1314
1315
1316
1317
  	 * defrag_ratio = 1000) then every (well almost) allocation will
  	 * first attempt to defrag slab caches on other nodes. This means
  	 * scanning over all nodes to look for partial slabs which may be
  	 * expensive if we do it every time we are trying to find a slab
  	 * with available objects.
81819f0fc   Christoph Lameter   SLUB core
1318
  	 */
9824601ea   Christoph Lameter   SLUB: rename defr...
1319
1320
  	if (!s->remote_node_defrag_ratio ||
  			get_cycles() % 1024 > s->remote_node_defrag_ratio)
81819f0fc   Christoph Lameter   SLUB core
1321
  		return NULL;
c0ff7453b   Miao Xie   cpuset,mm: fix no...
1322
  	get_mems_allowed();
0e88460da   Mel Gorman   mm: introduce nod...
1323
  	zonelist = node_zonelist(slab_node(current->mempolicy), flags);
54a6eb5c4   Mel Gorman   mm: use two zonel...
1324
  	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
81819f0fc   Christoph Lameter   SLUB core
1325
  		struct kmem_cache_node *n;
54a6eb5c4   Mel Gorman   mm: use two zonel...
1326
  		n = get_node(s, zone_to_nid(zone));
81819f0fc   Christoph Lameter   SLUB core
1327

54a6eb5c4   Mel Gorman   mm: use two zonel...
1328
  		if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
3b89d7d88   David Rientjes   slub: move min_pa...
1329
  				n->nr_partial > s->min_partial) {
81819f0fc   Christoph Lameter   SLUB core
1330
  			page = get_partial_node(n);
c0ff7453b   Miao Xie   cpuset,mm: fix no...
1331
1332
  			if (page) {
  				put_mems_allowed();
81819f0fc   Christoph Lameter   SLUB core
1333
  				return page;
c0ff7453b   Miao Xie   cpuset,mm: fix no...
1334
  			}
81819f0fc   Christoph Lameter   SLUB core
1335
1336
  		}
  	}
c0ff7453b   Miao Xie   cpuset,mm: fix no...
1337
  	put_mems_allowed();
81819f0fc   Christoph Lameter   SLUB core
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
  #endif
  	return NULL;
  }
  
  /*
   * Get a partial page, lock it and return it.
   */
  static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
  {
  	struct page *page;
  	int searchnode = (node == -1) ? numa_node_id() : node;
  
  	page = get_partial_node(get_node(s, searchnode));
  	if (page || (flags & __GFP_THISNODE))
  		return page;
  
  	return get_any_partial(s, flags);
  }
  
  /*
   * Move a page back to the lists.
   *
   * Must be called with the slab lock held.
   *
   * On exit the slab lock will have been dropped.
   */
7c2e132c5   Christoph Lameter   Add parameter to ...
1364
  static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
81819f0fc   Christoph Lameter   SLUB core
1365
  {
e95eed571   Christoph Lameter   SLUB: Add MIN_PAR...
1366
  	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
8a38082d2   Andy Whitcroft   slub: record page...
1367
  	__ClearPageSlubFrozen(page);
81819f0fc   Christoph Lameter   SLUB core
1368
  	if (page->inuse) {
e95eed571   Christoph Lameter   SLUB: Add MIN_PAR...
1369

a973e9dd1   Christoph Lameter   Revert "unique en...
1370
  		if (page->freelist) {
7c2e132c5   Christoph Lameter   Add parameter to ...
1371
  			add_partial(n, page, tail);
84e554e68   Christoph Lameter   SLUB: Make slub s...
1372
  			stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
1373
  		} else {
84e554e68   Christoph Lameter   SLUB: Make slub s...
1374
  			stat(s, DEACTIVATE_FULL);
8a38082d2   Andy Whitcroft   slub: record page...
1375
1376
  			if (SLABDEBUG && PageSlubDebug(page) &&
  						(s->flags & SLAB_STORE_USER))
8ff12cfc0   Christoph Lameter   SLUB: Support for...
1377
1378
  				add_full(n, page);
  		}
81819f0fc   Christoph Lameter   SLUB core
1379
1380
  		slab_unlock(page);
  	} else {
84e554e68   Christoph Lameter   SLUB: Make slub s...
1381
  		stat(s, DEACTIVATE_EMPTY);
3b89d7d88   David Rientjes   slub: move min_pa...
1382
  		if (n->nr_partial < s->min_partial) {
e95eed571   Christoph Lameter   SLUB: Add MIN_PAR...
1383
  			/*
672bba3a4   Christoph Lameter   SLUB: update comm...
1384
1385
1386
  			 * Adding an empty slab to the partial slabs in order
  			 * to avoid page allocator overhead. This slab needs
  			 * to come after the other slabs with objects in
6446faa2f   Christoph Lameter   slub: Fix up comm...
1387
1388
1389
  			 * so that the others get filled first. That way the
  			 * size of the partial list stays small.
  			 *
0121c619d   Christoph Lameter   slub: Whitespace ...
1390
1391
  			 * kmem_cache_shrink can reclaim any empty slabs from
  			 * the partial list.
e95eed571   Christoph Lameter   SLUB: Add MIN_PAR...
1392
  			 */
7c2e132c5   Christoph Lameter   Add parameter to ...
1393
  			add_partial(n, page, 1);
e95eed571   Christoph Lameter   SLUB: Add MIN_PAR...
1394
1395
1396
  			slab_unlock(page);
  		} else {
  			slab_unlock(page);
84e554e68   Christoph Lameter   SLUB: Make slub s...
1397
  			stat(s, FREE_SLAB);
e95eed571   Christoph Lameter   SLUB: Add MIN_PAR...
1398
1399
  			discard_slab(s, page);
  		}
81819f0fc   Christoph Lameter   SLUB core
1400
1401
1402
1403
1404
1405
  	}
  }
  
  /*
   * Remove the cpu slab
   */
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1406
  static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
81819f0fc   Christoph Lameter   SLUB core
1407
  {
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1408
  	struct page *page = c->page;
7c2e132c5   Christoph Lameter   Add parameter to ...
1409
  	int tail = 1;
8ff12cfc0   Christoph Lameter   SLUB: Support for...
1410

b773ad736   Christoph Lameter   slub statistics: ...
1411
  	if (page->freelist)
84e554e68   Christoph Lameter   SLUB: Make slub s...
1412
  		stat(s, DEACTIVATE_REMOTE_FREES);
894b8788d   Christoph Lameter   slub: support con...
1413
  	/*
6446faa2f   Christoph Lameter   slub: Fix up comm...
1414
  	 * Merge cpu freelist into slab freelist. Typically we get here
894b8788d   Christoph Lameter   slub: support con...
1415
1416
1417
  	 * because both freelists are empty. So this is unlikely
  	 * to occur.
  	 */
a973e9dd1   Christoph Lameter   Revert "unique en...
1418
  	while (unlikely(c->freelist)) {
894b8788d   Christoph Lameter   slub: support con...
1419
  		void **object;
7c2e132c5   Christoph Lameter   Add parameter to ...
1420
  		tail = 0;	/* Hot objects. Put the slab first */
894b8788d   Christoph Lameter   slub: support con...
1421
  		/* Retrieve object from cpu_freelist */
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1422
  		object = c->freelist;
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
1423
  		c->freelist = get_freepointer(s, c->freelist);
894b8788d   Christoph Lameter   slub: support con...
1424
1425
  
  		/* And put onto the regular freelist */
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
1426
  		set_freepointer(s, object, page->freelist);
894b8788d   Christoph Lameter   slub: support con...
1427
1428
1429
  		page->freelist = object;
  		page->inuse--;
  	}
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1430
  	c->page = NULL;
7c2e132c5   Christoph Lameter   Add parameter to ...
1431
  	unfreeze_slab(s, page, tail);
81819f0fc   Christoph Lameter   SLUB core
1432
  }
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1433
  static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
81819f0fc   Christoph Lameter   SLUB core
1434
  {
84e554e68   Christoph Lameter   SLUB: Make slub s...
1435
  	stat(s, CPUSLAB_FLUSH);
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1436
1437
  	slab_lock(c->page);
  	deactivate_slab(s, c);
81819f0fc   Christoph Lameter   SLUB core
1438
1439
1440
1441
  }
  
  /*
   * Flush cpu slab.
6446faa2f   Christoph Lameter   slub: Fix up comm...
1442
   *
81819f0fc   Christoph Lameter   SLUB core
1443
1444
   * Called from IPI handler with interrupts disabled.
   */
0c7100132   Christoph Lameter   SLUB: add some mo...
1445
  static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
81819f0fc   Christoph Lameter   SLUB core
1446
  {
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
1447
  	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
81819f0fc   Christoph Lameter   SLUB core
1448

dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1449
1450
  	if (likely(c && c->page))
  		flush_slab(s, c);
81819f0fc   Christoph Lameter   SLUB core
1451
1452
1453
1454
1455
  }
  
  static void flush_cpu_slab(void *d)
  {
  	struct kmem_cache *s = d;
81819f0fc   Christoph Lameter   SLUB core
1456

dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1457
  	__flush_cpu_slab(s, smp_processor_id());
81819f0fc   Christoph Lameter   SLUB core
1458
1459
1460
1461
  }
  
  static void flush_all(struct kmem_cache *s)
  {
15c8b6c1a   Jens Axboe   on_each_cpu(): ki...
1462
  	on_each_cpu(flush_cpu_slab, s, 1);
81819f0fc   Christoph Lameter   SLUB core
1463
1464
1465
  }
  
  /*
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
   * Check if the objects in a per cpu structure fit numa
   * locality expectations.
   */
  static inline int node_match(struct kmem_cache_cpu *c, int node)
  {
  #ifdef CONFIG_NUMA
  	if (node != -1 && c->node != node)
  		return 0;
  #endif
  	return 1;
  }
781b2ba6e   Pekka Enberg   SLUB: Out-of-memo...
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
  static int count_free(struct page *page)
  {
  	return page->objects - page->inuse;
  }
  
  static unsigned long count_partial(struct kmem_cache_node *n,
  					int (*get_count)(struct page *))
  {
  	unsigned long flags;
  	unsigned long x = 0;
  	struct page *page;
  
  	spin_lock_irqsave(&n->list_lock, flags);
  	list_for_each_entry(page, &n->partial, lru)
  		x += get_count(page);
  	spin_unlock_irqrestore(&n->list_lock, flags);
  	return x;
  }
26c02cf05   Alexander Beregalov   SLUB: fix build w...
1495
1496
1497
1498
1499
1500
1501
1502
  static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
  {
  #ifdef CONFIG_SLUB_DEBUG
  	return atomic_long_read(&n->total_objects);
  #else
  	return 0;
  #endif
  }
781b2ba6e   Pekka Enberg   SLUB: Out-of-memo...
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
  static noinline void
  slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
  {
  	int node;
  
  	printk(KERN_WARNING
  		"SLUB: Unable to allocate memory on node %d (gfp=0x%x)
  ",
  		nid, gfpflags);
  	printk(KERN_WARNING "  cache: %s, object size: %d, buffer size: %d, "
  		"default order: %d, min order: %d
  ", s->name, s->objsize,
  		s->size, oo_order(s->oo), oo_order(s->min));
fa5ec8a1f   David Rientjes   slub: add option ...
1516
1517
1518
1519
  	if (oo_order(s->min) > get_order(s->objsize))
  		printk(KERN_WARNING "  %s debugging increased min order, use "
  		       "slub_debug=O to disable.
  ", s->name);
781b2ba6e   Pekka Enberg   SLUB: Out-of-memo...
1520
1521
1522
1523
1524
1525
1526
1527
  	for_each_online_node(node) {
  		struct kmem_cache_node *n = get_node(s, node);
  		unsigned long nr_slabs;
  		unsigned long nr_objs;
  		unsigned long nr_free;
  
  		if (!n)
  			continue;
26c02cf05   Alexander Beregalov   SLUB: fix build w...
1528
1529
1530
  		nr_free  = count_partial(n, count_free);
  		nr_slabs = node_nr_slabs(n);
  		nr_objs  = node_nr_objs(n);
781b2ba6e   Pekka Enberg   SLUB: Out-of-memo...
1531
1532
1533
1534
1535
1536
1537
  
  		printk(KERN_WARNING
  			"  node %d: slabs: %ld, objs: %ld, free: %ld
  ",
  			node, nr_slabs, nr_objs, nr_free);
  	}
  }
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1538
  /*
894b8788d   Christoph Lameter   slub: support con...
1539
1540
1541
1542
   * Slow path. The lockless freelist is empty or we need to perform
   * debugging duties.
   *
   * Interrupts are disabled.
81819f0fc   Christoph Lameter   SLUB core
1543
   *
894b8788d   Christoph Lameter   slub: support con...
1544
1545
1546
   * Processing is still very fast if new objects have been freed to the
   * regular freelist. In that case we simply take over the regular freelist
   * as the lockless freelist and zap the regular freelist.
81819f0fc   Christoph Lameter   SLUB core
1547
   *
894b8788d   Christoph Lameter   slub: support con...
1548
1549
1550
   * If that is not working then we fall back to the partial lists. We take the
   * first element of the freelist as the object to allocate now and move the
   * rest of the freelist to the lockless freelist.
81819f0fc   Christoph Lameter   SLUB core
1551
   *
894b8788d   Christoph Lameter   slub: support con...
1552
   * And if we were unable to get a new slab from the partial slab lists then
6446faa2f   Christoph Lameter   slub: Fix up comm...
1553
1554
   * we need to allocate a new slab. This is the slowest path since it involves
   * a call to the page allocator and the setup of a new slab.
81819f0fc   Christoph Lameter   SLUB core
1555
   */
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
1556
1557
  static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
  			  unsigned long addr, struct kmem_cache_cpu *c)
81819f0fc   Christoph Lameter   SLUB core
1558
  {
81819f0fc   Christoph Lameter   SLUB core
1559
  	void **object;
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1560
  	struct page *new;
81819f0fc   Christoph Lameter   SLUB core
1561

e72e9c23e   Linus Torvalds   Revert "SLUB: rem...
1562
1563
  	/* We handle __GFP_ZERO in the caller */
  	gfpflags &= ~__GFP_ZERO;
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1564
  	if (!c->page)
81819f0fc   Christoph Lameter   SLUB core
1565
  		goto new_slab;
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1566
1567
  	slab_lock(c->page);
  	if (unlikely(!node_match(c, node)))
81819f0fc   Christoph Lameter   SLUB core
1568
  		goto another_slab;
6446faa2f   Christoph Lameter   slub: Fix up comm...
1569

84e554e68   Christoph Lameter   SLUB: Make slub s...
1570
  	stat(s, ALLOC_REFILL);
6446faa2f   Christoph Lameter   slub: Fix up comm...
1571

894b8788d   Christoph Lameter   slub: support con...
1572
  load_freelist:
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1573
  	object = c->page->freelist;
a973e9dd1   Christoph Lameter   Revert "unique en...
1574
  	if (unlikely(!object))
81819f0fc   Christoph Lameter   SLUB core
1575
  		goto another_slab;
8a38082d2   Andy Whitcroft   slub: record page...
1576
  	if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
81819f0fc   Christoph Lameter   SLUB core
1577
  		goto debug;
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
1578
  	c->freelist = get_freepointer(s, object);
39b264641   Christoph Lameter   slub: Store max n...
1579
  	c->page->inuse = c->page->objects;
a973e9dd1   Christoph Lameter   Revert "unique en...
1580
  	c->page->freelist = NULL;
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1581
  	c->node = page_to_nid(c->page);
1f84260c8   Christoph Lameter   SLUB: Alternate f...
1582
  unlock_out:
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1583
  	slab_unlock(c->page);
84e554e68   Christoph Lameter   SLUB: Make slub s...
1584
  	stat(s, ALLOC_SLOWPATH);
81819f0fc   Christoph Lameter   SLUB core
1585
1586
1587
  	return object;
  
  another_slab:
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1588
  	deactivate_slab(s, c);
81819f0fc   Christoph Lameter   SLUB core
1589
1590
  
  new_slab:
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1591
1592
1593
  	new = get_partial(s, gfpflags, node);
  	if (new) {
  		c->page = new;
84e554e68   Christoph Lameter   SLUB: Make slub s...
1594
  		stat(s, ALLOC_FROM_PARTIAL);
894b8788d   Christoph Lameter   slub: support con...
1595
  		goto load_freelist;
81819f0fc   Christoph Lameter   SLUB core
1596
  	}
b811c202a   Christoph Lameter   SLUB: simplify IR...
1597
1598
  	if (gfpflags & __GFP_WAIT)
  		local_irq_enable();
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1599
  	new = new_slab(s, gfpflags, node);
b811c202a   Christoph Lameter   SLUB: simplify IR...
1600
1601
1602
  
  	if (gfpflags & __GFP_WAIT)
  		local_irq_disable();
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1603
  	if (new) {
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
1604
  		c = __this_cpu_ptr(s->cpu_slab);
84e554e68   Christoph Lameter   SLUB: Make slub s...
1605
  		stat(s, ALLOC_SLAB);
05aa34503   Christoph Lameter   SLUB: Fix memory ...
1606
  		if (c->page)
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1607
  			flush_slab(s, c);
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1608
  		slab_lock(new);
8a38082d2   Andy Whitcroft   slub: record page...
1609
  		__SetPageSlubFrozen(new);
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1610
  		c->page = new;
4b6f07504   Christoph Lameter   SLUB: Define func...
1611
  		goto load_freelist;
81819f0fc   Christoph Lameter   SLUB core
1612
  	}
95f859893   Pekka Enberg   SLUB: Don't print...
1613
1614
  	if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
  		slab_out_of_memory(s, gfpflags, node);
71c7a06ff   Christoph Lameter   slub: Fallback to...
1615
  	return NULL;
81819f0fc   Christoph Lameter   SLUB core
1616
  debug:
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1617
  	if (!alloc_debug_processing(s, c->page, object, addr))
81819f0fc   Christoph Lameter   SLUB core
1618
  		goto another_slab;
894b8788d   Christoph Lameter   slub: support con...
1619

dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1620
  	c->page->inuse++;
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
1621
  	c->page->freelist = get_freepointer(s, object);
ee3c72a14   Christoph Lameter   SLUB: Avoid touch...
1622
  	c->node = -1;
1f84260c8   Christoph Lameter   SLUB: Alternate f...
1623
  	goto unlock_out;
894b8788d   Christoph Lameter   slub: support con...
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
  }
  
  /*
   * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
   * have the fastpath folded into their functions. So no function call
   * overhead for requests that can be satisfied on the fastpath.
   *
   * The fastpath works by first checking if the lockless freelist can be used.
   * If not then __slab_alloc is called for slow processing.
   *
   * Otherwise we can simply pick the next object from the lockless free list.
   */
064287807   Pekka Enberg   SLUB: Fix coding ...
1636
  static __always_inline void *slab_alloc(struct kmem_cache *s,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
1637
  		gfp_t gfpflags, int node, unsigned long addr)
894b8788d   Christoph Lameter   slub: support con...
1638
  {
894b8788d   Christoph Lameter   slub: support con...
1639
  	void **object;
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1640
  	struct kmem_cache_cpu *c;
1f84260c8   Christoph Lameter   SLUB: Alternate f...
1641
  	unsigned long flags;
dcce284a2   Benjamin Herrenschmidt   mm: Extend gfp ma...
1642
  	gfpflags &= gfp_allowed_mask;
7e85ee0c1   Pekka Enberg   slab,slub: don't ...
1643

cf40bd16f   Nick Piggin   lockdep: annotate...
1644
  	lockdep_trace_alloc(gfpflags);
89124d706   OGAWA Hirofumi   slub: Add might_s...
1645
  	might_sleep_if(gfpflags & __GFP_WAIT);
3c506efd7   Pekka Enberg   Merge branch 'top...
1646

4c13dd3b4   Dmitry Monakhov   failslab: add abi...
1647
  	if (should_failslab(s->objsize, gfpflags, s->flags))
773ff60e8   Akinobu Mita   SLUB: failslab su...
1648
  		return NULL;
1f84260c8   Christoph Lameter   SLUB: Alternate f...
1649

894b8788d   Christoph Lameter   slub: support con...
1650
  	local_irq_save(flags);
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
1651
1652
  	c = __this_cpu_ptr(s->cpu_slab);
  	object = c->freelist;
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
1653
  	if (unlikely(!object || !node_match(c, node)))
894b8788d   Christoph Lameter   slub: support con...
1654

dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1655
  		object = __slab_alloc(s, gfpflags, node, addr, c);
894b8788d   Christoph Lameter   slub: support con...
1656
1657
  
  	else {
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
1658
  		c->freelist = get_freepointer(s, object);
84e554e68   Christoph Lameter   SLUB: Make slub s...
1659
  		stat(s, ALLOC_FASTPATH);
894b8788d   Christoph Lameter   slub: support con...
1660
1661
  	}
  	local_irq_restore(flags);
d07dbea46   Christoph Lameter   Slab allocators: ...
1662

74e2134ff   Pekka Enberg   SLUB: Fix __GFP_Z...
1663
  	if (unlikely(gfpflags & __GFP_ZERO) && object)
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
1664
  		memset(object, 0, s->objsize);
d07dbea46   Christoph Lameter   Slab allocators: ...
1665

ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
1666
1667
  	kmemcheck_slab_alloc(s, gfpflags, object, s->objsize);
  	kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, gfpflags);
5a896d9e7   Vegard Nossum   slub: add hooks f...
1668

894b8788d   Christoph Lameter   slub: support con...
1669
  	return object;
81819f0fc   Christoph Lameter   SLUB core
1670
1671
1672
1673
  }
  
  void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
  {
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
1674
  	void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
1675
  	trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
1676
1677
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
1678
1679
  }
  EXPORT_SYMBOL(kmem_cache_alloc);
0f24f1287   Li Zefan   tracing, slab: De...
1680
  #ifdef CONFIG_TRACING
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
1681
1682
1683
1684
1685
1686
  void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
  {
  	return slab_alloc(s, gfpflags, -1, _RET_IP_);
  }
  EXPORT_SYMBOL(kmem_cache_alloc_notrace);
  #endif
81819f0fc   Christoph Lameter   SLUB core
1687
1688
1689
  #ifdef CONFIG_NUMA
  void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
  {
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
1690
  	void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
1691
1692
  	trace_kmem_cache_alloc_node(_RET_IP_, ret,
  				    s->objsize, s->size, gfpflags, node);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
1693
1694
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
1695
1696
1697
  }
  EXPORT_SYMBOL(kmem_cache_alloc_node);
  #endif
0f24f1287   Li Zefan   tracing, slab: De...
1698
  #ifdef CONFIG_TRACING
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
1699
1700
1701
1702
1703
1704
1705
1706
  void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
  				    gfp_t gfpflags,
  				    int node)
  {
  	return slab_alloc(s, gfpflags, node, _RET_IP_);
  }
  EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
  #endif
81819f0fc   Christoph Lameter   SLUB core
1707
  /*
894b8788d   Christoph Lameter   slub: support con...
1708
1709
   * Slow patch handling. This may still be called frequently since objects
   * have a longer lifetime than the cpu slabs in most processing loads.
81819f0fc   Christoph Lameter   SLUB core
1710
   *
894b8788d   Christoph Lameter   slub: support con...
1711
1712
1713
   * So we still attempt to reduce cache line usage. Just take the slab
   * lock and free the item. If there is no additional partial page
   * handling required then we can return immediately.
81819f0fc   Christoph Lameter   SLUB core
1714
   */
894b8788d   Christoph Lameter   slub: support con...
1715
  static void __slab_free(struct kmem_cache *s, struct page *page,
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
1716
  			void *x, unsigned long addr)
81819f0fc   Christoph Lameter   SLUB core
1717
1718
1719
  {
  	void *prior;
  	void **object = (void *)x;
81819f0fc   Christoph Lameter   SLUB core
1720

84e554e68   Christoph Lameter   SLUB: Make slub s...
1721
  	stat(s, FREE_SLOWPATH);
81819f0fc   Christoph Lameter   SLUB core
1722
  	slab_lock(page);
8a38082d2   Andy Whitcroft   slub: record page...
1723
  	if (unlikely(SLABDEBUG && PageSlubDebug(page)))
81819f0fc   Christoph Lameter   SLUB core
1724
  		goto debug;
6446faa2f   Christoph Lameter   slub: Fix up comm...
1725

81819f0fc   Christoph Lameter   SLUB core
1726
  checks_ok:
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
1727
1728
  	prior = page->freelist;
  	set_freepointer(s, object, prior);
81819f0fc   Christoph Lameter   SLUB core
1729
1730
  	page->freelist = object;
  	page->inuse--;
8a38082d2   Andy Whitcroft   slub: record page...
1731
  	if (unlikely(PageSlubFrozen(page))) {
84e554e68   Christoph Lameter   SLUB: Make slub s...
1732
  		stat(s, FREE_FROZEN);
81819f0fc   Christoph Lameter   SLUB core
1733
  		goto out_unlock;
8ff12cfc0   Christoph Lameter   SLUB: Support for...
1734
  	}
81819f0fc   Christoph Lameter   SLUB core
1735
1736
1737
1738
1739
  
  	if (unlikely(!page->inuse))
  		goto slab_empty;
  
  	/*
6446faa2f   Christoph Lameter   slub: Fix up comm...
1740
  	 * Objects left in the slab. If it was not on the partial list before
81819f0fc   Christoph Lameter   SLUB core
1741
1742
  	 * then add it.
  	 */
a973e9dd1   Christoph Lameter   Revert "unique en...
1743
  	if (unlikely(!prior)) {
7c2e132c5   Christoph Lameter   Add parameter to ...
1744
  		add_partial(get_node(s, page_to_nid(page)), page, 1);
84e554e68   Christoph Lameter   SLUB: Make slub s...
1745
  		stat(s, FREE_ADD_PARTIAL);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
1746
  	}
81819f0fc   Christoph Lameter   SLUB core
1747
1748
1749
  
  out_unlock:
  	slab_unlock(page);
81819f0fc   Christoph Lameter   SLUB core
1750
1751
1752
  	return;
  
  slab_empty:
a973e9dd1   Christoph Lameter   Revert "unique en...
1753
  	if (prior) {
81819f0fc   Christoph Lameter   SLUB core
1754
  		/*
672bba3a4   Christoph Lameter   SLUB: update comm...
1755
  		 * Slab still on the partial list.
81819f0fc   Christoph Lameter   SLUB core
1756
1757
  		 */
  		remove_partial(s, page);
84e554e68   Christoph Lameter   SLUB: Make slub s...
1758
  		stat(s, FREE_REMOVE_PARTIAL);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
1759
  	}
81819f0fc   Christoph Lameter   SLUB core
1760
  	slab_unlock(page);
84e554e68   Christoph Lameter   SLUB: Make slub s...
1761
  	stat(s, FREE_SLAB);
81819f0fc   Christoph Lameter   SLUB core
1762
  	discard_slab(s, page);
81819f0fc   Christoph Lameter   SLUB core
1763
1764
1765
  	return;
  
  debug:
3ec097421   Christoph Lameter   SLUB: Simplify de...
1766
  	if (!free_debug_processing(s, page, x, addr))
77c5e2d01   Christoph Lameter   slub: fix object ...
1767
  		goto out_unlock;
77c5e2d01   Christoph Lameter   slub: fix object ...
1768
  	goto checks_ok;
81819f0fc   Christoph Lameter   SLUB core
1769
  }
894b8788d   Christoph Lameter   slub: support con...
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
  /*
   * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
   * can perform fastpath freeing without additional function calls.
   *
   * The fastpath is only possible if we are freeing to the current cpu slab
   * of this processor. This typically the case if we have just allocated
   * the item before.
   *
   * If fastpath is not possible then fall back to __slab_free where we deal
   * with all sorts of special processing.
   */
064287807   Pekka Enberg   SLUB: Fix coding ...
1781
  static __always_inline void slab_free(struct kmem_cache *s,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
1782
  			struct page *page, void *x, unsigned long addr)
894b8788d   Christoph Lameter   slub: support con...
1783
1784
  {
  	void **object = (void *)x;
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1785
  	struct kmem_cache_cpu *c;
1f84260c8   Christoph Lameter   SLUB: Alternate f...
1786
  	unsigned long flags;
06f22f13f   Catalin Marinas   kmemleak: Add the...
1787
  	kmemleak_free_recursive(x, s->flags);
894b8788d   Christoph Lameter   slub: support con...
1788
  	local_irq_save(flags);
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
1789
  	c = __this_cpu_ptr(s->cpu_slab);
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
1790
1791
  	kmemcheck_slab_free(s, object, s->objsize);
  	debug_check_no_locks_freed(object, s->objsize);
3ac7fe5a4   Thomas Gleixner   infrastructure to...
1792
  	if (!(s->flags & SLAB_DEBUG_OBJECTS))
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
1793
  		debug_check_no_obj_freed(object, s->objsize);
ee3c72a14   Christoph Lameter   SLUB: Avoid touch...
1794
  	if (likely(page == c->page && c->node >= 0)) {
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
1795
  		set_freepointer(s, object, c->freelist);
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1796
  		c->freelist = object;
84e554e68   Christoph Lameter   SLUB: Make slub s...
1797
  		stat(s, FREE_FASTPATH);
894b8788d   Christoph Lameter   slub: support con...
1798
  	} else
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
1799
  		__slab_free(s, page, x, addr);
894b8788d   Christoph Lameter   slub: support con...
1800
1801
1802
  
  	local_irq_restore(flags);
  }
81819f0fc   Christoph Lameter   SLUB core
1803
1804
  void kmem_cache_free(struct kmem_cache *s, void *x)
  {
77c5e2d01   Christoph Lameter   slub: fix object ...
1805
  	struct page *page;
81819f0fc   Christoph Lameter   SLUB core
1806

b49af68ff   Christoph Lameter   Add virt_to_head_...
1807
  	page = virt_to_head_page(x);
81819f0fc   Christoph Lameter   SLUB core
1808

ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
1809
  	slab_free(s, page, x, _RET_IP_);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
1810

ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
1811
  	trace_kmem_cache_free(_RET_IP_, x);
81819f0fc   Christoph Lameter   SLUB core
1812
1813
  }
  EXPORT_SYMBOL(kmem_cache_free);
e9beef181   Cyrill Gorcunov   slub - fix get_ob...
1814
  /* Figure out on which slab page the object resides */
81819f0fc   Christoph Lameter   SLUB core
1815
1816
  static struct page *get_object_page(const void *x)
  {
b49af68ff   Christoph Lameter   Add virt_to_head_...
1817
  	struct page *page = virt_to_head_page(x);
81819f0fc   Christoph Lameter   SLUB core
1818
1819
1820
1821
1822
1823
1824
1825
  
  	if (!PageSlab(page))
  		return NULL;
  
  	return page;
  }
  
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
1826
1827
1828
1829
   * Object placement in a slab is made very easy because we always start at
   * offset 0. If we tune the size of the object to the alignment then we can
   * get the required alignment by putting one properly sized object after
   * another.
81819f0fc   Christoph Lameter   SLUB core
1830
1831
1832
1833
   *
   * Notice that the allocation order determines the sizes of the per cpu
   * caches. Each processor has always one slab available for allocations.
   * Increasing the allocation order reduces the number of times that slabs
672bba3a4   Christoph Lameter   SLUB: update comm...
1834
   * must be moved on and off the partial lists and is therefore a factor in
81819f0fc   Christoph Lameter   SLUB core
1835
   * locking overhead.
81819f0fc   Christoph Lameter   SLUB core
1836
1837
1838
1839
1840
1841
1842
1843
1844
   */
  
  /*
   * Mininum / Maximum order of slab pages. This influences locking overhead
   * and slab fragmentation. A higher order reduces the number of partial slabs
   * and increases the number of allocations possible without having to
   * take the list_lock.
   */
  static int slub_min_order;
114e9e89e   Christoph Lameter   slub: Drop DEFAUL...
1845
  static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
9b2cd506e   Christoph Lameter   slub: Calculate m...
1846
  static int slub_min_objects;
81819f0fc   Christoph Lameter   SLUB core
1847
1848
1849
  
  /*
   * Merge control. If this is set then no merging of slab caches will occur.
672bba3a4   Christoph Lameter   SLUB: update comm...
1850
   * (Could be removed. This was introduced to pacify the merge skeptics.)
81819f0fc   Christoph Lameter   SLUB core
1851
1852
1853
1854
   */
  static int slub_nomerge;
  
  /*
81819f0fc   Christoph Lameter   SLUB core
1855
1856
   * Calculate the order of allocation given an slab object size.
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
1857
1858
1859
1860
   * The order of allocation has significant impact on performance and other
   * system components. Generally order 0 allocations should be preferred since
   * order 0 does not cause fragmentation in the page allocator. Larger objects
   * be problematic to put into order 0 slabs because there may be too much
c124f5b54   Christoph Lameter   slub: pack object...
1861
   * unused space left. We go to a higher order if more than 1/16th of the slab
672bba3a4   Christoph Lameter   SLUB: update comm...
1862
1863
1864
1865
1866
1867
   * would be wasted.
   *
   * In order to reach satisfactory performance we must ensure that a minimum
   * number of objects is in one slab. Otherwise we may generate too much
   * activity on the partial lists which requires taking the list_lock. This is
   * less a concern for large slabs though which are rarely used.
81819f0fc   Christoph Lameter   SLUB core
1868
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
1869
1870
1871
1872
   * slub_max_order specifies the order where we begin to stop considering the
   * number of objects in a slab as critical. If we reach slub_max_order then
   * we try to keep the page order as low as possible. So we accept more waste
   * of space in favor of a small page order.
81819f0fc   Christoph Lameter   SLUB core
1873
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
1874
1875
1876
1877
   * Higher order allocations also allow the placement of more objects in a
   * slab and thereby reduce object handling overhead. If the user has
   * requested a higher mininum order then we start with that one instead of
   * the smallest order which will fit the object.
81819f0fc   Christoph Lameter   SLUB core
1878
   */
5e6d444ea   Christoph Lameter   SLUB: rework slab...
1879
1880
  static inline int slab_order(int size, int min_objects,
  				int max_order, int fract_leftover)
81819f0fc   Christoph Lameter   SLUB core
1881
1882
1883
  {
  	int order;
  	int rem;
6300ea750   Christoph Lameter   SLUB: ensure that...
1884
  	int min_order = slub_min_order;
81819f0fc   Christoph Lameter   SLUB core
1885

210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
1886
1887
  	if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE)
  		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
39b264641   Christoph Lameter   slub: Store max n...
1888

6300ea750   Christoph Lameter   SLUB: ensure that...
1889
  	for (order = max(min_order,
5e6d444ea   Christoph Lameter   SLUB: rework slab...
1890
1891
  				fls(min_objects * size - 1) - PAGE_SHIFT);
  			order <= max_order; order++) {
81819f0fc   Christoph Lameter   SLUB core
1892

5e6d444ea   Christoph Lameter   SLUB: rework slab...
1893
  		unsigned long slab_size = PAGE_SIZE << order;
81819f0fc   Christoph Lameter   SLUB core
1894

5e6d444ea   Christoph Lameter   SLUB: rework slab...
1895
  		if (slab_size < min_objects * size)
81819f0fc   Christoph Lameter   SLUB core
1896
1897
1898
  			continue;
  
  		rem = slab_size % size;
5e6d444ea   Christoph Lameter   SLUB: rework slab...
1899
  		if (rem <= slab_size / fract_leftover)
81819f0fc   Christoph Lameter   SLUB core
1900
1901
1902
  			break;
  
  	}
672bba3a4   Christoph Lameter   SLUB: update comm...
1903

81819f0fc   Christoph Lameter   SLUB core
1904
1905
  	return order;
  }
5e6d444ea   Christoph Lameter   SLUB: rework slab...
1906
1907
1908
1909
1910
  static inline int calculate_order(int size)
  {
  	int order;
  	int min_objects;
  	int fraction;
e8120ff1f   Zhang Yanmin   SLUB: Fix default...
1911
  	int max_objects;
5e6d444ea   Christoph Lameter   SLUB: rework slab...
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
  
  	/*
  	 * Attempt to find best configuration for a slab. This
  	 * works by first attempting to generate a layout with
  	 * the best configuration and backing off gradually.
  	 *
  	 * First we reduce the acceptable waste in a slab. Then
  	 * we reduce the minimum objects required in a slab.
  	 */
  	min_objects = slub_min_objects;
9b2cd506e   Christoph Lameter   slub: Calculate m...
1922
1923
  	if (!min_objects)
  		min_objects = 4 * (fls(nr_cpu_ids) + 1);
e8120ff1f   Zhang Yanmin   SLUB: Fix default...
1924
1925
  	max_objects = (PAGE_SIZE << slub_max_order)/size;
  	min_objects = min(min_objects, max_objects);
5e6d444ea   Christoph Lameter   SLUB: rework slab...
1926
  	while (min_objects > 1) {
c124f5b54   Christoph Lameter   slub: pack object...
1927
  		fraction = 16;
5e6d444ea   Christoph Lameter   SLUB: rework slab...
1928
1929
1930
1931
1932
1933
1934
  		while (fraction >= 4) {
  			order = slab_order(size, min_objects,
  						slub_max_order, fraction);
  			if (order <= slub_max_order)
  				return order;
  			fraction /= 2;
  		}
5086c389c   Amerigo Wang   SLUB: Fix some co...
1935
  		min_objects--;
5e6d444ea   Christoph Lameter   SLUB: rework slab...
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
  	}
  
  	/*
  	 * We were unable to place multiple objects in a slab. Now
  	 * lets see if we can place a single object there.
  	 */
  	order = slab_order(size, 1, slub_max_order, 1);
  	if (order <= slub_max_order)
  		return order;
  
  	/*
  	 * Doh this slab cannot be placed using slub_max_order.
  	 */
  	order = slab_order(size, 1, MAX_ORDER, 1);
818cf5909   David Rientjes   slub: enforce MAX...
1950
  	if (order < MAX_ORDER)
5e6d444ea   Christoph Lameter   SLUB: rework slab...
1951
1952
1953
  		return order;
  	return -ENOSYS;
  }
81819f0fc   Christoph Lameter   SLUB core
1954
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
1955
   * Figure out what the alignment of the objects will be.
81819f0fc   Christoph Lameter   SLUB core
1956
1957
1958
1959
1960
   */
  static unsigned long calculate_alignment(unsigned long flags,
  		unsigned long align, unsigned long size)
  {
  	/*
6446faa2f   Christoph Lameter   slub: Fix up comm...
1961
1962
  	 * If the user wants hardware cache aligned objects then follow that
  	 * suggestion if the object is sufficiently large.
81819f0fc   Christoph Lameter   SLUB core
1963
  	 *
6446faa2f   Christoph Lameter   slub: Fix up comm...
1964
1965
  	 * The hardware cache alignment cannot override the specified
  	 * alignment though. If that is greater then use it.
81819f0fc   Christoph Lameter   SLUB core
1966
  	 */
b62103867   Nick Piggin   slub: Do not cros...
1967
1968
1969
1970
1971
1972
  	if (flags & SLAB_HWCACHE_ALIGN) {
  		unsigned long ralign = cache_line_size();
  		while (size <= ralign / 2)
  			ralign /= 2;
  		align = max(align, ralign);
  	}
81819f0fc   Christoph Lameter   SLUB core
1973
1974
  
  	if (align < ARCH_SLAB_MINALIGN)
b62103867   Nick Piggin   slub: Do not cros...
1975
  		align = ARCH_SLAB_MINALIGN;
81819f0fc   Christoph Lameter   SLUB core
1976
1977
1978
  
  	return ALIGN(align, sizeof(void *));
  }
5595cffc8   Pekka Enberg   SLUB: dynamic per...
1979
1980
  static void
  init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
81819f0fc   Christoph Lameter   SLUB core
1981
1982
  {
  	n->nr_partial = 0;
81819f0fc   Christoph Lameter   SLUB core
1983
1984
  	spin_lock_init(&n->list_lock);
  	INIT_LIST_HEAD(&n->partial);
8ab1372fa   Christoph Lameter   SLUB: Fix CONFIG_...
1985
  #ifdef CONFIG_SLUB_DEBUG
0f389ec63   Christoph Lameter   slub: No need for...
1986
  	atomic_long_set(&n->nr_slabs, 0);
02b71b701   Salman Qazi   slub: fixed unini...
1987
  	atomic_long_set(&n->total_objects, 0);
643b11384   Christoph Lameter   slub: enable trac...
1988
  	INIT_LIST_HEAD(&n->full);
8ab1372fa   Christoph Lameter   SLUB: Fix CONFIG_...
1989
  #endif
81819f0fc   Christoph Lameter   SLUB core
1990
  }
91efd773c   Christoph Lameter   dma kmalloc handl...
1991
  static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[KMALLOC_CACHES]);
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
1992

9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
1993
  static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
1994
  {
756dee758   Christoph Lameter   SLUB: Get rid of ...
1995
  	if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches)
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
1996
1997
1998
1999
  		/*
  		 * Boot time creation of the kmalloc array. Use static per cpu data
  		 * since the per cpu allocator is not available yet.
  		 */
1154fab73   Stephen Rothwell   SLUB: Fix per-cpu...
2000
  		s->cpu_slab = kmalloc_percpu + (s - kmalloc_caches);
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
2001
2002
  	else
  		s->cpu_slab =  alloc_percpu(struct kmem_cache_cpu);
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2003

9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
2004
2005
  	if (!s->cpu_slab)
  		return 0;
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2006

4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2007
2008
  	return 1;
  }
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2009

81819f0fc   Christoph Lameter   SLUB core
2010
2011
2012
2013
2014
2015
2016
  #ifdef CONFIG_NUMA
  /*
   * No kmalloc_node yet so do it by hand. We know that this is the first
   * slab on the node for this slabcache. There are no concurrent accesses
   * possible.
   *
   * Note that this function only works on the kmalloc_node_cache
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2017
2018
   * when allocating for the kmalloc_node_cache. This is used for bootstrapping
   * memory on a fresh node that has no slab structures yet.
81819f0fc   Christoph Lameter   SLUB core
2019
   */
0094de92a   David Rientjes   slub: make early_...
2020
  static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node)
81819f0fc   Christoph Lameter   SLUB core
2021
2022
2023
  {
  	struct page *page;
  	struct kmem_cache_node *n;
ba84c73c7   root   SLUB: Do not upse...
2024
  	unsigned long flags;
81819f0fc   Christoph Lameter   SLUB core
2025
2026
  
  	BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
a2f92ee7e   Christoph Lameter   SLUB: do not fail...
2027
  	page = new_slab(kmalloc_caches, gfpflags, node);
81819f0fc   Christoph Lameter   SLUB core
2028
2029
  
  	BUG_ON(!page);
a2f92ee7e   Christoph Lameter   SLUB: do not fail...
2030
2031
2032
2033
2034
2035
2036
2037
  	if (page_to_nid(page) != node) {
  		printk(KERN_ERR "SLUB: Unable to allocate memory from "
  				"node %d
  ", node);
  		printk(KERN_ERR "SLUB: Allocating a useless per node structure "
  				"in order to be able to continue
  ");
  	}
81819f0fc   Christoph Lameter   SLUB core
2038
2039
2040
2041
2042
  	n = page->freelist;
  	BUG_ON(!n);
  	page->freelist = get_freepointer(kmalloc_caches, n);
  	page->inuse++;
  	kmalloc_caches->node[node] = n;
8ab1372fa   Christoph Lameter   SLUB: Fix CONFIG_...
2043
  #ifdef CONFIG_SLUB_DEBUG
d45f39cb0   Christoph Lameter   SLUB Debug: fix i...
2044
2045
  	init_object(kmalloc_caches, n, 1);
  	init_tracking(kmalloc_caches, n);
8ab1372fa   Christoph Lameter   SLUB: Fix CONFIG_...
2046
  #endif
5595cffc8   Pekka Enberg   SLUB: dynamic per...
2047
  	init_kmem_cache_node(n, kmalloc_caches);
205ab99dd   Christoph Lameter   slub: Update stat...
2048
  	inc_slabs_node(kmalloc_caches, node, page->objects);
6446faa2f   Christoph Lameter   slub: Fix up comm...
2049

ba84c73c7   root   SLUB: Do not upse...
2050
2051
2052
2053
2054
2055
  	/*
  	 * lockdep requires consistent irq usage for each lock
  	 * so even though there cannot be a race this early in
  	 * the boot sequence, we still disable irqs.
  	 */
  	local_irq_save(flags);
7c2e132c5   Christoph Lameter   Add parameter to ...
2056
  	add_partial(n, page, 0);
ba84c73c7   root   SLUB: Do not upse...
2057
  	local_irq_restore(flags);
81819f0fc   Christoph Lameter   SLUB core
2058
2059
2060
2061
2062
  }
  
  static void free_kmem_cache_nodes(struct kmem_cache *s)
  {
  	int node;
f64dc58c5   Christoph Lameter   Memoryless nodes:...
2063
  	for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0fc   Christoph Lameter   SLUB core
2064
  		struct kmem_cache_node *n = s->node[node];
73367bd8e   Alexander Duyck   slub: move kmem_c...
2065
  		if (n)
81819f0fc   Christoph Lameter   SLUB core
2066
2067
2068
2069
2070
2071
2072
2073
  			kmem_cache_free(kmalloc_caches, n);
  		s->node[node] = NULL;
  	}
  }
  
  static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
  {
  	int node;
81819f0fc   Christoph Lameter   SLUB core
2074

f64dc58c5   Christoph Lameter   Memoryless nodes:...
2075
  	for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0fc   Christoph Lameter   SLUB core
2076
  		struct kmem_cache_node *n;
73367bd8e   Alexander Duyck   slub: move kmem_c...
2077
2078
2079
2080
2081
2082
  		if (slab_state == DOWN) {
  			early_kmem_cache_node_alloc(gfpflags, node);
  			continue;
  		}
  		n = kmem_cache_alloc_node(kmalloc_caches,
  						gfpflags, node);
81819f0fc   Christoph Lameter   SLUB core
2083

73367bd8e   Alexander Duyck   slub: move kmem_c...
2084
2085
2086
  		if (!n) {
  			free_kmem_cache_nodes(s);
  			return 0;
81819f0fc   Christoph Lameter   SLUB core
2087
  		}
73367bd8e   Alexander Duyck   slub: move kmem_c...
2088

81819f0fc   Christoph Lameter   SLUB core
2089
  		s->node[node] = n;
5595cffc8   Pekka Enberg   SLUB: dynamic per...
2090
  		init_kmem_cache_node(n, s);
81819f0fc   Christoph Lameter   SLUB core
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
  	}
  	return 1;
  }
  #else
  static void free_kmem_cache_nodes(struct kmem_cache *s)
  {
  }
  
  static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
  {
5595cffc8   Pekka Enberg   SLUB: dynamic per...
2101
  	init_kmem_cache_node(&s->local_node, s);
81819f0fc   Christoph Lameter   SLUB core
2102
2103
2104
  	return 1;
  }
  #endif
c0bdb232b   David Rientjes   slub: rename calc...
2105
  static void set_min_partial(struct kmem_cache *s, unsigned long min)
3b89d7d88   David Rientjes   slub: move min_pa...
2106
2107
2108
2109
2110
2111
2112
  {
  	if (min < MIN_PARTIAL)
  		min = MIN_PARTIAL;
  	else if (min > MAX_PARTIAL)
  		min = MAX_PARTIAL;
  	s->min_partial = min;
  }
81819f0fc   Christoph Lameter   SLUB core
2113
2114
2115
2116
  /*
   * calculate_sizes() determines the order and the distribution of data within
   * a slab object.
   */
06b285dc3   Christoph Lameter   slub: Make the or...
2117
  static int calculate_sizes(struct kmem_cache *s, int forced_order)
81819f0fc   Christoph Lameter   SLUB core
2118
2119
2120
2121
  {
  	unsigned long flags = s->flags;
  	unsigned long size = s->objsize;
  	unsigned long align = s->align;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
2122
  	int order;
81819f0fc   Christoph Lameter   SLUB core
2123
2124
  
  	/*
d8b42bf54   Christoph Lameter   slub: Rearrange #...
2125
2126
2127
2128
2129
2130
2131
2132
  	 * Round up object size to the next word boundary. We can only
  	 * place the free pointer at word boundaries and this determines
  	 * the possible location of the free pointer.
  	 */
  	size = ALIGN(size, sizeof(void *));
  
  #ifdef CONFIG_SLUB_DEBUG
  	/*
81819f0fc   Christoph Lameter   SLUB core
2133
2134
2135
2136
2137
  	 * Determine if we can poison the object itself. If the user of
  	 * the slab may touch the object after free or before allocation
  	 * then we should never poison the object itself.
  	 */
  	if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
c59def9f2   Christoph Lameter   Slab allocators: ...
2138
  			!s->ctor)
81819f0fc   Christoph Lameter   SLUB core
2139
2140
2141
  		s->flags |= __OBJECT_POISON;
  	else
  		s->flags &= ~__OBJECT_POISON;
81819f0fc   Christoph Lameter   SLUB core
2142
2143
  
  	/*
672bba3a4   Christoph Lameter   SLUB: update comm...
2144
  	 * If we are Redzoning then check if there is some space between the
81819f0fc   Christoph Lameter   SLUB core
2145
  	 * end of the object and the free pointer. If not then add an
672bba3a4   Christoph Lameter   SLUB: update comm...
2146
  	 * additional word to have some bytes to store Redzone information.
81819f0fc   Christoph Lameter   SLUB core
2147
2148
2149
  	 */
  	if ((flags & SLAB_RED_ZONE) && size == s->objsize)
  		size += sizeof(void *);
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
2150
  #endif
81819f0fc   Christoph Lameter   SLUB core
2151
2152
  
  	/*
672bba3a4   Christoph Lameter   SLUB: update comm...
2153
2154
  	 * With that we have determined the number of bytes in actual use
  	 * by the object. This is the potential offset to the free pointer.
81819f0fc   Christoph Lameter   SLUB core
2155
2156
2157
2158
  	 */
  	s->inuse = size;
  
  	if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
c59def9f2   Christoph Lameter   Slab allocators: ...
2159
  		s->ctor)) {
81819f0fc   Christoph Lameter   SLUB core
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
  		/*
  		 * Relocate free pointer after the object if it is not
  		 * permitted to overwrite the first word of the object on
  		 * kmem_cache_free.
  		 *
  		 * This is the case if we do RCU, have a constructor or
  		 * destructor or are poisoning the objects.
  		 */
  		s->offset = size;
  		size += sizeof(void *);
  	}
c12b3c625   Christoph Lameter   SLUB Debug: Fix o...
2171
  #ifdef CONFIG_SLUB_DEBUG
81819f0fc   Christoph Lameter   SLUB core
2172
2173
2174
2175
2176
2177
  	if (flags & SLAB_STORE_USER)
  		/*
  		 * Need to store information about allocs and frees after
  		 * the object.
  		 */
  		size += 2 * sizeof(struct track);
be7b3fbce   Christoph Lameter   SLUB: after objec...
2178
  	if (flags & SLAB_RED_ZONE)
81819f0fc   Christoph Lameter   SLUB core
2179
2180
2181
2182
  		/*
  		 * Add some empty padding so that we can catch
  		 * overwrites from earlier objects rather than let
  		 * tracking information or the free pointer be
0211a9c85   Frederik Schwarzer   trivial: fix an -...
2183
  		 * corrupted if a user writes before the start
81819f0fc   Christoph Lameter   SLUB core
2184
2185
2186
  		 * of the object.
  		 */
  		size += sizeof(void *);
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
2187
  #endif
672bba3a4   Christoph Lameter   SLUB: update comm...
2188

81819f0fc   Christoph Lameter   SLUB core
2189
2190
  	/*
  	 * Determine the alignment based on various parameters that the
65c02d4cf   Christoph Lameter   SLUB: add support...
2191
2192
  	 * user specified and the dynamic determination of cache line size
  	 * on bootup.
81819f0fc   Christoph Lameter   SLUB core
2193
2194
  	 */
  	align = calculate_alignment(flags, align, s->objsize);
dcb0ce1bd   Zhang, Yanmin   slub: change kmem...
2195
  	s->align = align;
81819f0fc   Christoph Lameter   SLUB core
2196
2197
2198
2199
2200
2201
2202
2203
  
  	/*
  	 * SLUB stores one object immediately after another beginning from
  	 * offset 0. In order to align the objects we have to simply size
  	 * each object to conform to the alignment.
  	 */
  	size = ALIGN(size, align);
  	s->size = size;
06b285dc3   Christoph Lameter   slub: Make the or...
2204
2205
2206
2207
  	if (forced_order >= 0)
  		order = forced_order;
  	else
  		order = calculate_order(size);
81819f0fc   Christoph Lameter   SLUB core
2208

834f3d119   Christoph Lameter   slub: Add kmem_ca...
2209
  	if (order < 0)
81819f0fc   Christoph Lameter   SLUB core
2210
  		return 0;
b7a49f0d4   Christoph Lameter   slub: Determine g...
2211
  	s->allocflags = 0;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
2212
  	if (order)
b7a49f0d4   Christoph Lameter   slub: Determine g...
2213
2214
2215
2216
2217
2218
2219
  		s->allocflags |= __GFP_COMP;
  
  	if (s->flags & SLAB_CACHE_DMA)
  		s->allocflags |= SLUB_DMA;
  
  	if (s->flags & SLAB_RECLAIM_ACCOUNT)
  		s->allocflags |= __GFP_RECLAIMABLE;
81819f0fc   Christoph Lameter   SLUB core
2220
2221
2222
  	/*
  	 * Determine the number of objects per slab
  	 */
834f3d119   Christoph Lameter   slub: Add kmem_ca...
2223
  	s->oo = oo_make(order, size);
65c3376aa   Christoph Lameter   slub: Fallback to...
2224
  	s->min = oo_make(get_order(size), size);
205ab99dd   Christoph Lameter   slub: Update stat...
2225
2226
  	if (oo_objects(s->oo) > oo_objects(s->max))
  		s->max = s->oo;
81819f0fc   Christoph Lameter   SLUB core
2227

834f3d119   Christoph Lameter   slub: Add kmem_ca...
2228
  	return !!oo_objects(s->oo);
81819f0fc   Christoph Lameter   SLUB core
2229
2230
  
  }
81819f0fc   Christoph Lameter   SLUB core
2231
2232
2233
  static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
  		const char *name, size_t size,
  		size_t align, unsigned long flags,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
2234
  		void (*ctor)(void *))
81819f0fc   Christoph Lameter   SLUB core
2235
2236
2237
2238
  {
  	memset(s, 0, kmem_size);
  	s->name = name;
  	s->ctor = ctor;
81819f0fc   Christoph Lameter   SLUB core
2239
  	s->objsize = size;
81819f0fc   Christoph Lameter   SLUB core
2240
  	s->align = align;
ba0268a8b   Christoph Lameter   SLUB: accurately ...
2241
  	s->flags = kmem_cache_flags(size, flags, name, ctor);
81819f0fc   Christoph Lameter   SLUB core
2242

06b285dc3   Christoph Lameter   slub: Make the or...
2243
  	if (!calculate_sizes(s, -1))
81819f0fc   Christoph Lameter   SLUB core
2244
  		goto error;
3de472138   David Rientjes   slub: use size an...
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
  	if (disable_higher_order_debug) {
  		/*
  		 * Disable debugging flags that store metadata if the min slab
  		 * order increased.
  		 */
  		if (get_order(s->size) > get_order(s->objsize)) {
  			s->flags &= ~DEBUG_METADATA_FLAGS;
  			s->offset = 0;
  			if (!calculate_sizes(s, -1))
  				goto error;
  		}
  	}
81819f0fc   Christoph Lameter   SLUB core
2257

3b89d7d88   David Rientjes   slub: move min_pa...
2258
2259
2260
2261
  	/*
  	 * The larger the object size is, the more pages we want on the partial
  	 * list to avoid pounding the page allocator excessively.
  	 */
c0bdb232b   David Rientjes   slub: rename calc...
2262
  	set_min_partial(s, ilog2(s->size));
81819f0fc   Christoph Lameter   SLUB core
2263
2264
  	s->refcount = 1;
  #ifdef CONFIG_NUMA
e2cb96b7e   Christoph Lameter   slub: Disable NUM...
2265
  	s->remote_node_defrag_ratio = 1000;
81819f0fc   Christoph Lameter   SLUB core
2266
  #endif
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
2267
2268
  	if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
  		goto error;
81819f0fc   Christoph Lameter   SLUB core
2269

dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
2270
  	if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
81819f0fc   Christoph Lameter   SLUB core
2271
  		return 1;
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
2272

4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2273
  	free_kmem_cache_nodes(s);
81819f0fc   Christoph Lameter   SLUB core
2274
2275
2276
2277
2278
  error:
  	if (flags & SLAB_PANIC)
  		panic("Cannot create slab %s size=%lu realsize=%u "
  			"order=%u offset=%u flags=%lx
  ",
834f3d119   Christoph Lameter   slub: Add kmem_ca...
2279
  			s->name, (unsigned long)size, s->size, oo_order(s->oo),
81819f0fc   Christoph Lameter   SLUB core
2280
2281
2282
  			s->offset, flags);
  	return 0;
  }
81819f0fc   Christoph Lameter   SLUB core
2283
2284
2285
2286
2287
2288
  
  /*
   * Check if a given pointer is valid
   */
  int kmem_ptr_validate(struct kmem_cache *s, const void *object)
  {
064287807   Pekka Enberg   SLUB: Fix coding ...
2289
  	struct page *page;
81819f0fc   Christoph Lameter   SLUB core
2290

d3e06e2b1   Pekka Enberg   slub: Fix kmem_pt...
2291
2292
  	if (!kern_ptr_validate(object, s->size))
  		return 0;
81819f0fc   Christoph Lameter   SLUB core
2293
2294
2295
2296
2297
  	page = get_object_page(object);
  
  	if (!page || s != page->slab)
  		/* No slab or wrong slab */
  		return 0;
abcd08a6f   Christoph Lameter   SLUB: use check_v...
2298
  	if (!check_valid_pointer(s, page, object))
81819f0fc   Christoph Lameter   SLUB core
2299
2300
2301
2302
2303
  		return 0;
  
  	/*
  	 * We could also check if the object is on the slabs freelist.
  	 * But this would be too expensive and it seems that the main
6446faa2f   Christoph Lameter   slub: Fix up comm...
2304
  	 * purpose of kmem_ptr_valid() is to check if the object belongs
81819f0fc   Christoph Lameter   SLUB core
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
  	 * to a certain slab.
  	 */
  	return 1;
  }
  EXPORT_SYMBOL(kmem_ptr_validate);
  
  /*
   * Determine the size of a slab object
   */
  unsigned int kmem_cache_size(struct kmem_cache *s)
  {
  	return s->objsize;
  }
  EXPORT_SYMBOL(kmem_cache_size);
  
  const char *kmem_cache_name(struct kmem_cache *s)
  {
  	return s->name;
  }
  EXPORT_SYMBOL(kmem_cache_name);
33b12c381   Christoph Lameter   slub: Dump list o...
2325
2326
2327
2328
2329
2330
  static void list_slab_objects(struct kmem_cache *s, struct page *page,
  							const char *text)
  {
  #ifdef CONFIG_SLUB_DEBUG
  	void *addr = page_address(page);
  	void *p;
bbd7d57bf   Eric Dumazet   slub: Potential s...
2331
2332
  	long *map = kzalloc(BITS_TO_LONGS(page->objects) * sizeof(long),
  			    GFP_ATOMIC);
33b12c381   Christoph Lameter   slub: Dump list o...
2333

bbd7d57bf   Eric Dumazet   slub: Potential s...
2334
2335
  	if (!map)
  		return;
33b12c381   Christoph Lameter   slub: Dump list o...
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
  	slab_err(s, page, "%s", text);
  	slab_lock(page);
  	for_each_free_object(p, s, page->freelist)
  		set_bit(slab_index(p, s, addr), map);
  
  	for_each_object(p, s, addr, page->objects) {
  
  		if (!test_bit(slab_index(p, s, addr), map)) {
  			printk(KERN_ERR "INFO: Object 0x%p @offset=%tu
  ",
  							p, p - addr);
  			print_tracking(s, p);
  		}
  	}
  	slab_unlock(page);
bbd7d57bf   Eric Dumazet   slub: Potential s...
2351
  	kfree(map);
33b12c381   Christoph Lameter   slub: Dump list o...
2352
2353
  #endif
  }
81819f0fc   Christoph Lameter   SLUB core
2354
  /*
599870b17   Christoph Lameter   slub: free_list()...
2355
   * Attempt to free all partial slabs on a node.
81819f0fc   Christoph Lameter   SLUB core
2356
   */
599870b17   Christoph Lameter   slub: free_list()...
2357
  static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
81819f0fc   Christoph Lameter   SLUB core
2358
  {
81819f0fc   Christoph Lameter   SLUB core
2359
2360
2361
2362
  	unsigned long flags;
  	struct page *page, *h;
  
  	spin_lock_irqsave(&n->list_lock, flags);
33b12c381   Christoph Lameter   slub: Dump list o...
2363
  	list_for_each_entry_safe(page, h, &n->partial, lru) {
81819f0fc   Christoph Lameter   SLUB core
2364
2365
2366
  		if (!page->inuse) {
  			list_del(&page->lru);
  			discard_slab(s, page);
599870b17   Christoph Lameter   slub: free_list()...
2367
  			n->nr_partial--;
33b12c381   Christoph Lameter   slub: Dump list o...
2368
2369
2370
  		} else {
  			list_slab_objects(s, page,
  				"Objects remaining on kmem_cache_close()");
599870b17   Christoph Lameter   slub: free_list()...
2371
  		}
33b12c381   Christoph Lameter   slub: Dump list o...
2372
  	}
81819f0fc   Christoph Lameter   SLUB core
2373
  	spin_unlock_irqrestore(&n->list_lock, flags);
81819f0fc   Christoph Lameter   SLUB core
2374
2375
2376
  }
  
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
2377
   * Release all resources used by a slab cache.
81819f0fc   Christoph Lameter   SLUB core
2378
   */
0c7100132   Christoph Lameter   SLUB: add some mo...
2379
  static inline int kmem_cache_close(struct kmem_cache *s)
81819f0fc   Christoph Lameter   SLUB core
2380
2381
2382
2383
  {
  	int node;
  
  	flush_all(s);
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
2384
  	free_percpu(s->cpu_slab);
81819f0fc   Christoph Lameter   SLUB core
2385
  	/* Attempt to free all objects */
f64dc58c5   Christoph Lameter   Memoryless nodes:...
2386
  	for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0fc   Christoph Lameter   SLUB core
2387
  		struct kmem_cache_node *n = get_node(s, node);
599870b17   Christoph Lameter   slub: free_list()...
2388
2389
  		free_partial(s, n);
  		if (n->nr_partial || slabs_node(s, node))
81819f0fc   Christoph Lameter   SLUB core
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
  			return 1;
  	}
  	free_kmem_cache_nodes(s);
  	return 0;
  }
  
  /*
   * Close a cache and release the kmem_cache structure
   * (must be used for caches created using kmem_cache_create)
   */
  void kmem_cache_destroy(struct kmem_cache *s)
  {
  	down_write(&slub_lock);
  	s->refcount--;
  	if (!s->refcount) {
  		list_del(&s->list);
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
2406
  		up_write(&slub_lock);
d629d8195   Pekka Enberg   slub: improve kme...
2407
2408
2409
2410
2411
2412
  		if (kmem_cache_close(s)) {
  			printk(KERN_ERR "SLUB %s: %s called for cache that "
  				"still has objects.
  ", s->name, __func__);
  			dump_stack();
  		}
d76b1590e   Eric Dumazet   slub: Fix kmem_ca...
2413
2414
  		if (s->flags & SLAB_DESTROY_BY_RCU)
  			rcu_barrier();
81819f0fc   Christoph Lameter   SLUB core
2415
  		sysfs_slab_remove(s);
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
2416
2417
  	} else
  		up_write(&slub_lock);
81819f0fc   Christoph Lameter   SLUB core
2418
2419
2420
2421
2422
2423
  }
  EXPORT_SYMBOL(kmem_cache_destroy);
  
  /********************************************************************
   *		Kmalloc subsystem
   *******************************************************************/
756dee758   Christoph Lameter   SLUB: Get rid of ...
2424
  struct kmem_cache kmalloc_caches[KMALLOC_CACHES] __cacheline_aligned;
81819f0fc   Christoph Lameter   SLUB core
2425
  EXPORT_SYMBOL(kmalloc_caches);
81819f0fc   Christoph Lameter   SLUB core
2426
2427
  static int __init setup_slub_min_order(char *str)
  {
064287807   Pekka Enberg   SLUB: Fix coding ...
2428
  	get_option(&str, &slub_min_order);
81819f0fc   Christoph Lameter   SLUB core
2429
2430
2431
2432
2433
2434
2435
2436
  
  	return 1;
  }
  
  __setup("slub_min_order=", setup_slub_min_order);
  
  static int __init setup_slub_max_order(char *str)
  {
064287807   Pekka Enberg   SLUB: Fix coding ...
2437
  	get_option(&str, &slub_max_order);
818cf5909   David Rientjes   slub: enforce MAX...
2438
  	slub_max_order = min(slub_max_order, MAX_ORDER - 1);
81819f0fc   Christoph Lameter   SLUB core
2439
2440
2441
2442
2443
2444
2445
2446
  
  	return 1;
  }
  
  __setup("slub_max_order=", setup_slub_max_order);
  
  static int __init setup_slub_min_objects(char *str)
  {
064287807   Pekka Enberg   SLUB: Fix coding ...
2447
  	get_option(&str, &slub_min_objects);
81819f0fc   Christoph Lameter   SLUB core
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
  
  	return 1;
  }
  
  __setup("slub_min_objects=", setup_slub_min_objects);
  
  static int __init setup_slub_nomerge(char *str)
  {
  	slub_nomerge = 1;
  	return 1;
  }
  
  __setup("slub_nomerge", setup_slub_nomerge);
81819f0fc   Christoph Lameter   SLUB core
2461
2462
2463
2464
2465
2466
2467
  static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
  		const char *name, int size, gfp_t gfp_flags)
  {
  	unsigned int flags = 0;
  
  	if (gfp_flags & SLUB_DMA)
  		flags = SLAB_CACHE_DMA;
83b519e8b   Pekka Enberg   slab: setup alloc...
2468
2469
2470
2471
  	/*
  	 * This function is called with IRQs disabled during early-boot on
  	 * single CPU so there's no need to take slub_lock here.
  	 */
81819f0fc   Christoph Lameter   SLUB core
2472
  	if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
319d1e240   Christoph Lameter   slub: Drop fallba...
2473
  								flags, NULL))
81819f0fc   Christoph Lameter   SLUB core
2474
2475
2476
  		goto panic;
  
  	list_add(&s->list, &slab_caches);
83b519e8b   Pekka Enberg   slab: setup alloc...
2477

81819f0fc   Christoph Lameter   SLUB core
2478
2479
2480
2481
2482
2483
2484
2485
  	if (sysfs_slab_add(s))
  		goto panic;
  	return s;
  
  panic:
  	panic("Creation of kmalloc slab %s size=%d failed.
  ", name, size);
  }
2e443fd00   Christoph Lameter   SLUB: extract dma...
2486
  #ifdef CONFIG_ZONE_DMA
ffadd4d0f   Christoph Lameter   SLUB: Introduce a...
2487
  static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
1ceef4024   Christoph Lameter   SLUB: Fix dynamic...
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
  
  static void sysfs_add_func(struct work_struct *w)
  {
  	struct kmem_cache *s;
  
  	down_write(&slub_lock);
  	list_for_each_entry(s, &slab_caches, list) {
  		if (s->flags & __SYSFS_ADD_DEFERRED) {
  			s->flags &= ~__SYSFS_ADD_DEFERRED;
  			sysfs_slab_add(s);
  		}
  	}
  	up_write(&slub_lock);
  }
  
  static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
2e443fd00   Christoph Lameter   SLUB: extract dma...
2504
2505
2506
  static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
  {
  	struct kmem_cache *s;
2e443fd00   Christoph Lameter   SLUB: extract dma...
2507
2508
  	char *text;
  	size_t realsize;
964cf35c8   Nick Piggin   SLUB: Fix early b...
2509
  	unsigned long slabflags;
756dee758   Christoph Lameter   SLUB: Get rid of ...
2510
  	int i;
2e443fd00   Christoph Lameter   SLUB: extract dma...
2511
2512
2513
2514
2515
2516
  
  	s = kmalloc_caches_dma[index];
  	if (s)
  		return s;
  
  	/* Dynamically create dma cache */
1ceef4024   Christoph Lameter   SLUB: Fix dynamic...
2517
2518
2519
2520
2521
2522
2523
2524
2525
  	if (flags & __GFP_WAIT)
  		down_write(&slub_lock);
  	else {
  		if (!down_write_trylock(&slub_lock))
  			goto out;
  	}
  
  	if (kmalloc_caches_dma[index])
  		goto unlock_out;
2e443fd00   Christoph Lameter   SLUB: extract dma...
2526

7b55f620e   Christoph Lameter   SLUB: Simplify dm...
2527
  	realsize = kmalloc_caches[index].objsize;
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
2528
2529
  	text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
  			 (unsigned int)realsize);
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
2530

756dee758   Christoph Lameter   SLUB: Get rid of ...
2531
2532
2533
2534
  	s = NULL;
  	for (i = 0; i < KMALLOC_CACHES; i++)
  		if (!kmalloc_caches[i].size)
  			break;
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
2535

756dee758   Christoph Lameter   SLUB: Get rid of ...
2536
2537
  	BUG_ON(i >= KMALLOC_CACHES);
  	s = kmalloc_caches + i;
1ceef4024   Christoph Lameter   SLUB: Fix dynamic...
2538

964cf35c8   Nick Piggin   SLUB: Fix early b...
2539
2540
2541
2542
2543
2544
  	/*
  	 * Must defer sysfs creation to a workqueue because we don't know
  	 * what context we are called from. Before sysfs comes up, we don't
  	 * need to do anything because our sysfs initcall will start by
  	 * adding all existing slabs to sysfs.
  	 */
5caf5c7dc   Pekka Enberg   Merge branch 'slu...
2545
  	slabflags = SLAB_CACHE_DMA|SLAB_NOTRACK;
964cf35c8   Nick Piggin   SLUB: Fix early b...
2546
2547
  	if (slab_state >= SYSFS)
  		slabflags |= __SYSFS_ADD_DEFERRED;
7738dd9e8   David Rientjes   slub: remove impo...
2548
  	if (!text || !kmem_cache_open(s, flags, text,
964cf35c8   Nick Piggin   SLUB: Fix early b...
2549
  			realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) {
756dee758   Christoph Lameter   SLUB: Get rid of ...
2550
  		s->size = 0;
1ceef4024   Christoph Lameter   SLUB: Fix dynamic...
2551
2552
  		kfree(text);
  		goto unlock_out;
dfce8648d   Christoph Lameter   SLUB: do proper l...
2553
  	}
1ceef4024   Christoph Lameter   SLUB: Fix dynamic...
2554
2555
2556
  
  	list_add(&s->list, &slab_caches);
  	kmalloc_caches_dma[index] = s;
964cf35c8   Nick Piggin   SLUB: Fix early b...
2557
2558
  	if (slab_state >= SYSFS)
  		schedule_work(&sysfs_add_work);
1ceef4024   Christoph Lameter   SLUB: Fix dynamic...
2559
2560
  
  unlock_out:
dfce8648d   Christoph Lameter   SLUB: do proper l...
2561
  	up_write(&slub_lock);
1ceef4024   Christoph Lameter   SLUB: Fix dynamic...
2562
  out:
dfce8648d   Christoph Lameter   SLUB: do proper l...
2563
  	return kmalloc_caches_dma[index];
2e443fd00   Christoph Lameter   SLUB: extract dma...
2564
2565
  }
  #endif
f1b263393   Christoph Lameter   SLUB: faster more...
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
  /*
   * Conversion table for small slabs sizes / 8 to the index in the
   * kmalloc array. This is necessary for slabs < 192 since we have non power
   * of two cache sizes there. The size of larger slabs can be determined using
   * fls.
   */
  static s8 size_index[24] = {
  	3,	/* 8 */
  	4,	/* 16 */
  	5,	/* 24 */
  	5,	/* 32 */
  	6,	/* 40 */
  	6,	/* 48 */
  	6,	/* 56 */
  	6,	/* 64 */
  	1,	/* 72 */
  	1,	/* 80 */
  	1,	/* 88 */
  	1,	/* 96 */
  	7,	/* 104 */
  	7,	/* 112 */
  	7,	/* 120 */
  	7,	/* 128 */
  	2,	/* 136 */
  	2,	/* 144 */
  	2,	/* 152 */
  	2,	/* 160 */
  	2,	/* 168 */
  	2,	/* 176 */
  	2,	/* 184 */
  	2	/* 192 */
  };
acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
2598
2599
2600
2601
  static inline int size_index_elem(size_t bytes)
  {
  	return (bytes - 1) / 8;
  }
81819f0fc   Christoph Lameter   SLUB core
2602
2603
  static struct kmem_cache *get_slab(size_t size, gfp_t flags)
  {
f1b263393   Christoph Lameter   SLUB: faster more...
2604
  	int index;
81819f0fc   Christoph Lameter   SLUB core
2605

f1b263393   Christoph Lameter   SLUB: faster more...
2606
2607
2608
  	if (size <= 192) {
  		if (!size)
  			return ZERO_SIZE_PTR;
81819f0fc   Christoph Lameter   SLUB core
2609

acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
2610
  		index = size_index[size_index_elem(size)];
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
2611
  	} else
f1b263393   Christoph Lameter   SLUB: faster more...
2612
  		index = fls(size - 1);
81819f0fc   Christoph Lameter   SLUB core
2613
2614
  
  #ifdef CONFIG_ZONE_DMA
f1b263393   Christoph Lameter   SLUB: faster more...
2615
  	if (unlikely((flags & SLUB_DMA)))
2e443fd00   Christoph Lameter   SLUB: extract dma...
2616
  		return dma_kmalloc_cache(index, flags);
f1b263393   Christoph Lameter   SLUB: faster more...
2617

81819f0fc   Christoph Lameter   SLUB core
2618
2619
2620
2621
2622
2623
  #endif
  	return &kmalloc_caches[index];
  }
  
  void *__kmalloc(size_t size, gfp_t flags)
  {
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
2624
  	struct kmem_cache *s;
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2625
  	void *ret;
81819f0fc   Christoph Lameter   SLUB core
2626

ffadd4d0f   Christoph Lameter   SLUB: Introduce a...
2627
  	if (unlikely(size > SLUB_MAX_SIZE))
eada35efc   Pekka Enberg   slub: kmalloc pag...
2628
  		return kmalloc_large(size, flags);
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
2629
2630
2631
2632
  
  	s = get_slab(size, flags);
  
  	if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f9132   Christoph Lameter   Slab allocators: ...
2633
  		return s;
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2634
  	ret = slab_alloc(s, flags, -1, _RET_IP_);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
2635
  	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2636
2637
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
2638
2639
  }
  EXPORT_SYMBOL(__kmalloc);
f619cfe1b   Christoph Lameter   slub: Add kmalloc...
2640
2641
  static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
  {
b1eeab676   Vegard Nossum   kmemcheck: add ho...
2642
  	struct page *page;
e4f7c0b44   Catalin Marinas   kmemleak: Trace t...
2643
  	void *ptr = NULL;
f619cfe1b   Christoph Lameter   slub: Add kmalloc...
2644

b1eeab676   Vegard Nossum   kmemcheck: add ho...
2645
2646
  	flags |= __GFP_COMP | __GFP_NOTRACK;
  	page = alloc_pages_node(node, flags, get_order(size));
f619cfe1b   Christoph Lameter   slub: Add kmalloc...
2647
  	if (page)
e4f7c0b44   Catalin Marinas   kmemleak: Trace t...
2648
2649
2650
2651
  		ptr = page_address(page);
  
  	kmemleak_alloc(ptr, size, 1, flags);
  	return ptr;
f619cfe1b   Christoph Lameter   slub: Add kmalloc...
2652
  }
81819f0fc   Christoph Lameter   SLUB core
2653
2654
2655
  #ifdef CONFIG_NUMA
  void *__kmalloc_node(size_t size, gfp_t flags, int node)
  {
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
2656
  	struct kmem_cache *s;
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2657
  	void *ret;
81819f0fc   Christoph Lameter   SLUB core
2658

057685cf5   Ingo Molnar   Merge branch 'for...
2659
  	if (unlikely(size > SLUB_MAX_SIZE)) {
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2660
  		ret = kmalloc_large_node(size, flags, node);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
2661
2662
2663
  		trace_kmalloc_node(_RET_IP_, ret,
  				   size, PAGE_SIZE << get_order(size),
  				   flags, node);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2664
2665
2666
  
  		return ret;
  	}
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
2667
2668
2669
2670
  
  	s = get_slab(size, flags);
  
  	if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f9132   Christoph Lameter   Slab allocators: ...
2671
  		return s;
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2672
  	ret = slab_alloc(s, flags, node, _RET_IP_);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
2673
  	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2674
2675
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
2676
2677
2678
2679
2680
2681
  }
  EXPORT_SYMBOL(__kmalloc_node);
  #endif
  
  size_t ksize(const void *object)
  {
272c1d21d   Christoph Lameter   SLUB: return ZERO...
2682
  	struct page *page;
81819f0fc   Christoph Lameter   SLUB core
2683
  	struct kmem_cache *s;
ef8b4520b   Christoph Lameter   Slab allocators: ...
2684
  	if (unlikely(object == ZERO_SIZE_PTR))
272c1d21d   Christoph Lameter   SLUB: return ZERO...
2685
  		return 0;
294a80a8e   Vegard Nossum   SLUB's ksize() fa...
2686
  	page = virt_to_head_page(object);
294a80a8e   Vegard Nossum   SLUB's ksize() fa...
2687

76994412f   Pekka Enberg   slub: ksize() abu...
2688
2689
  	if (unlikely(!PageSlab(page))) {
  		WARN_ON(!PageCompound(page));
294a80a8e   Vegard Nossum   SLUB's ksize() fa...
2690
  		return PAGE_SIZE << compound_order(page);
76994412f   Pekka Enberg   slub: ksize() abu...
2691
  	}
81819f0fc   Christoph Lameter   SLUB core
2692
  	s = page->slab;
81819f0fc   Christoph Lameter   SLUB core
2693

ae20bfda6   Christoph Lameter   slub: Remove BUG_...
2694
  #ifdef CONFIG_SLUB_DEBUG
81819f0fc   Christoph Lameter   SLUB core
2695
2696
2697
2698
2699
2700
  	/*
  	 * Debugging requires use of the padding between object
  	 * and whatever may come after it.
  	 */
  	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
  		return s->objsize;
ae20bfda6   Christoph Lameter   slub: Remove BUG_...
2701
  #endif
81819f0fc   Christoph Lameter   SLUB core
2702
2703
2704
2705
2706
2707
2708
  	/*
  	 * If we have the need to store the freelist pointer
  	 * back there or track user information then we can
  	 * only use the space before that information.
  	 */
  	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
  		return s->inuse;
81819f0fc   Christoph Lameter   SLUB core
2709
2710
2711
2712
2713
  	/*
  	 * Else we can use all the padding etc for the allocation
  	 */
  	return s->size;
  }
b1aabecd5   Kirill A. Shutemov   mm: Export symbol...
2714
  EXPORT_SYMBOL(ksize);
81819f0fc   Christoph Lameter   SLUB core
2715
2716
2717
  
  void kfree(const void *x)
  {
81819f0fc   Christoph Lameter   SLUB core
2718
  	struct page *page;
5bb983b0c   Christoph Lameter   SLUB: Deal with a...
2719
  	void *object = (void *)x;
81819f0fc   Christoph Lameter   SLUB core
2720

2121db74b   Pekka Enberg   kmemtrace: trace ...
2721
  	trace_kfree(_RET_IP_, x);
2408c5503   Satyam Sharma   {slub, slob}: use...
2722
  	if (unlikely(ZERO_OR_NULL_PTR(x)))
81819f0fc   Christoph Lameter   SLUB core
2723
  		return;
b49af68ff   Christoph Lameter   Add virt_to_head_...
2724
  	page = virt_to_head_page(x);
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
2725
  	if (unlikely(!PageSlab(page))) {
0937502af   Christoph Lameter   slub: Add check f...
2726
  		BUG_ON(!PageCompound(page));
e4f7c0b44   Catalin Marinas   kmemleak: Trace t...
2727
  		kmemleak_free(x);
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
2728
2729
2730
  		put_page(page);
  		return;
  	}
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
2731
  	slab_free(page->slab, page, object, _RET_IP_);
81819f0fc   Christoph Lameter   SLUB core
2732
2733
  }
  EXPORT_SYMBOL(kfree);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
2734
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
2735
2736
2737
2738
2739
2740
2741
2742
   * kmem_cache_shrink removes empty slabs from the partial lists and sorts
   * the remaining slabs by the number of items in use. The slabs with the
   * most items in use come first. New allocations will then fill those up
   * and thus they can be removed from the partial lists.
   *
   * The slabs with the least items are placed last. This results in them
   * being allocated from last increasing the chance that the last objects
   * are freed in them.
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
2743
2744
2745
2746
2747
2748
2749
2750
   */
  int kmem_cache_shrink(struct kmem_cache *s)
  {
  	int node;
  	int i;
  	struct kmem_cache_node *n;
  	struct page *page;
  	struct page *t;
205ab99dd   Christoph Lameter   slub: Update stat...
2751
  	int objects = oo_objects(s->max);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
2752
  	struct list_head *slabs_by_inuse =
834f3d119   Christoph Lameter   slub: Add kmem_ca...
2753
  		kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
2754
2755
2756
2757
2758
2759
  	unsigned long flags;
  
  	if (!slabs_by_inuse)
  		return -ENOMEM;
  
  	flush_all(s);
f64dc58c5   Christoph Lameter   Memoryless nodes:...
2760
  	for_each_node_state(node, N_NORMAL_MEMORY) {
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
2761
2762
2763
2764
  		n = get_node(s, node);
  
  		if (!n->nr_partial)
  			continue;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
2765
  		for (i = 0; i < objects; i++)
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
2766
2767
2768
2769
2770
  			INIT_LIST_HEAD(slabs_by_inuse + i);
  
  		spin_lock_irqsave(&n->list_lock, flags);
  
  		/*
672bba3a4   Christoph Lameter   SLUB: update comm...
2771
  		 * Build lists indexed by the items in use in each slab.
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
2772
  		 *
672bba3a4   Christoph Lameter   SLUB: update comm...
2773
2774
  		 * Note that concurrent frees may occur while we hold the
  		 * list_lock. page->inuse here is the upper limit.
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
  		 */
  		list_for_each_entry_safe(page, t, &n->partial, lru) {
  			if (!page->inuse && slab_trylock(page)) {
  				/*
  				 * Must hold slab lock here because slab_free
  				 * may have freed the last object and be
  				 * waiting to release the slab.
  				 */
  				list_del(&page->lru);
  				n->nr_partial--;
  				slab_unlock(page);
  				discard_slab(s, page);
  			} else {
fcda3d89b   Christoph Lameter   SLUB: Remove chec...
2788
2789
  				list_move(&page->lru,
  				slabs_by_inuse + page->inuse);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
2790
2791
  			}
  		}
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
2792
  		/*
672bba3a4   Christoph Lameter   SLUB: update comm...
2793
2794
  		 * Rebuild the partial list with the slabs filled up most
  		 * first and the least used slabs at the end.
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
2795
  		 */
834f3d119   Christoph Lameter   slub: Add kmem_ca...
2796
  		for (i = objects - 1; i >= 0; i--)
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
2797
  			list_splice(slabs_by_inuse + i, n->partial.prev);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
2798
2799
2800
2801
2802
2803
2804
  		spin_unlock_irqrestore(&n->list_lock, flags);
  	}
  
  	kfree(slabs_by_inuse);
  	return 0;
  }
  EXPORT_SYMBOL(kmem_cache_shrink);
b9049e234   Yasunori Goto   memory hotplug: m...
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
  #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
  static int slab_mem_going_offline_callback(void *arg)
  {
  	struct kmem_cache *s;
  
  	down_read(&slub_lock);
  	list_for_each_entry(s, &slab_caches, list)
  		kmem_cache_shrink(s);
  	up_read(&slub_lock);
  
  	return 0;
  }
  
  static void slab_mem_offline_callback(void *arg)
  {
  	struct kmem_cache_node *n;
  	struct kmem_cache *s;
  	struct memory_notify *marg = arg;
  	int offline_node;
  
  	offline_node = marg->status_change_nid;
  
  	/*
  	 * If the node still has available memory. we need kmem_cache_node
  	 * for it yet.
  	 */
  	if (offline_node < 0)
  		return;
  
  	down_read(&slub_lock);
  	list_for_each_entry(s, &slab_caches, list) {
  		n = get_node(s, offline_node);
  		if (n) {
  			/*
  			 * if n->nr_slabs > 0, slabs still exist on the node
  			 * that is going down. We were unable to free them,
c9404c9c3   Adam Buchbinder   Fix misspelling o...
2841
  			 * and offline_pages() function shouldn't call this
b9049e234   Yasunori Goto   memory hotplug: m...
2842
2843
  			 * callback. So, we must fail.
  			 */
0f389ec63   Christoph Lameter   slub: No need for...
2844
  			BUG_ON(slabs_node(s, offline_node));
b9049e234   Yasunori Goto   memory hotplug: m...
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
  
  			s->node[offline_node] = NULL;
  			kmem_cache_free(kmalloc_caches, n);
  		}
  	}
  	up_read(&slub_lock);
  }
  
  static int slab_mem_going_online_callback(void *arg)
  {
  	struct kmem_cache_node *n;
  	struct kmem_cache *s;
  	struct memory_notify *marg = arg;
  	int nid = marg->status_change_nid;
  	int ret = 0;
  
  	/*
  	 * If the node's memory is already available, then kmem_cache_node is
  	 * already created. Nothing to do.
  	 */
  	if (nid < 0)
  		return 0;
  
  	/*
0121c619d   Christoph Lameter   slub: Whitespace ...
2869
  	 * We are bringing a node online. No memory is available yet. We must
b9049e234   Yasunori Goto   memory hotplug: m...
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
  	 * allocate a kmem_cache_node structure in order to bring the node
  	 * online.
  	 */
  	down_read(&slub_lock);
  	list_for_each_entry(s, &slab_caches, list) {
  		/*
  		 * XXX: kmem_cache_alloc_node will fallback to other nodes
  		 *      since memory is not yet available from the node that
  		 *      is brought up.
  		 */
  		n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL);
  		if (!n) {
  			ret = -ENOMEM;
  			goto out;
  		}
5595cffc8   Pekka Enberg   SLUB: dynamic per...
2885
  		init_kmem_cache_node(n, s);
b9049e234   Yasunori Goto   memory hotplug: m...
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
  		s->node[nid] = n;
  	}
  out:
  	up_read(&slub_lock);
  	return ret;
  }
  
  static int slab_memory_callback(struct notifier_block *self,
  				unsigned long action, void *arg)
  {
  	int ret = 0;
  
  	switch (action) {
  	case MEM_GOING_ONLINE:
  		ret = slab_mem_going_online_callback(arg);
  		break;
  	case MEM_GOING_OFFLINE:
  		ret = slab_mem_going_offline_callback(arg);
  		break;
  	case MEM_OFFLINE:
  	case MEM_CANCEL_ONLINE:
  		slab_mem_offline_callback(arg);
  		break;
  	case MEM_ONLINE:
  	case MEM_CANCEL_OFFLINE:
  		break;
  	}
dc19f9db3   KAMEZAWA Hiroyuki   memcg: memory hot...
2913
2914
2915
2916
  	if (ret)
  		ret = notifier_from_errno(ret);
  	else
  		ret = NOTIFY_OK;
b9049e234   Yasunori Goto   memory hotplug: m...
2917
2918
2919
2920
  	return ret;
  }
  
  #endif /* CONFIG_MEMORY_HOTPLUG */
81819f0fc   Christoph Lameter   SLUB core
2921
2922
2923
2924
2925
2926
2927
  /********************************************************************
   *			Basic setup of slabs
   *******************************************************************/
  
  void __init kmem_cache_init(void)
  {
  	int i;
4b356be01   Christoph Lameter   SLUB: minimum ali...
2928
  	int caches = 0;
81819f0fc   Christoph Lameter   SLUB core
2929
2930
2931
2932
  
  #ifdef CONFIG_NUMA
  	/*
  	 * Must first have the slab cache available for the allocations of the
672bba3a4   Christoph Lameter   SLUB: update comm...
2933
  	 * struct kmem_cache_node's. There is special bootstrap code in
81819f0fc   Christoph Lameter   SLUB core
2934
2935
2936
  	 * kmem_cache_open for slab_state == DOWN.
  	 */
  	create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
83b519e8b   Pekka Enberg   slab: setup alloc...
2937
  		sizeof(struct kmem_cache_node), GFP_NOWAIT);
8ffa68755   Christoph Lameter   SLUB: Fix NUMA / ...
2938
  	kmalloc_caches[0].refcount = -1;
4b356be01   Christoph Lameter   SLUB: minimum ali...
2939
  	caches++;
b9049e234   Yasunori Goto   memory hotplug: m...
2940

0c40ba4fd   Nadia Derbey   ipc: define the s...
2941
  	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
81819f0fc   Christoph Lameter   SLUB core
2942
2943
2944
2945
2946
2947
  #endif
  
  	/* Able to allocate the per node structures */
  	slab_state = PARTIAL;
  
  	/* Caches that are not of the two-to-the-power-of size */
acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
2948
  	if (KMALLOC_MIN_SIZE <= 32) {
4b356be01   Christoph Lameter   SLUB: minimum ali...
2949
  		create_kmalloc_cache(&kmalloc_caches[1],
83b519e8b   Pekka Enberg   slab: setup alloc...
2950
  				"kmalloc-96", 96, GFP_NOWAIT);
4b356be01   Christoph Lameter   SLUB: minimum ali...
2951
  		caches++;
acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
2952
2953
  	}
  	if (KMALLOC_MIN_SIZE <= 64) {
4b356be01   Christoph Lameter   SLUB: minimum ali...
2954
  		create_kmalloc_cache(&kmalloc_caches[2],
83b519e8b   Pekka Enberg   slab: setup alloc...
2955
  				"kmalloc-192", 192, GFP_NOWAIT);
4b356be01   Christoph Lameter   SLUB: minimum ali...
2956
2957
  		caches++;
  	}
81819f0fc   Christoph Lameter   SLUB core
2958

ffadd4d0f   Christoph Lameter   SLUB: Introduce a...
2959
  	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
81819f0fc   Christoph Lameter   SLUB core
2960
  		create_kmalloc_cache(&kmalloc_caches[i],
83b519e8b   Pekka Enberg   slab: setup alloc...
2961
  			"kmalloc", 1 << i, GFP_NOWAIT);
4b356be01   Christoph Lameter   SLUB: minimum ali...
2962
2963
  		caches++;
  	}
81819f0fc   Christoph Lameter   SLUB core
2964

f1b263393   Christoph Lameter   SLUB: faster more...
2965
2966
2967
2968
  
  	/*
  	 * Patch up the size_index table if we have strange large alignment
  	 * requirements for the kmalloc array. This is only the case for
6446faa2f   Christoph Lameter   slub: Fix up comm...
2969
  	 * MIPS it seems. The standard arches will not generate any code here.
f1b263393   Christoph Lameter   SLUB: faster more...
2970
2971
2972
2973
2974
2975
2976
2977
2978
  	 *
  	 * Largest permitted alignment is 256 bytes due to the way we
  	 * handle the index determination for the smaller caches.
  	 *
  	 * Make sure that nothing crazy happens if someone starts tinkering
  	 * around with ARCH_KMALLOC_MINALIGN
  	 */
  	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
  		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
2979
2980
2981
2982
2983
2984
  	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
  		int elem = size_index_elem(i);
  		if (elem >= ARRAY_SIZE(size_index))
  			break;
  		size_index[elem] = KMALLOC_SHIFT_LOW;
  	}
f1b263393   Christoph Lameter   SLUB: faster more...
2985

acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
2986
2987
2988
2989
2990
2991
2992
2993
  	if (KMALLOC_MIN_SIZE == 64) {
  		/*
  		 * The 96 byte size cache is not used if the alignment
  		 * is 64 byte.
  		 */
  		for (i = 64 + 8; i <= 96; i += 8)
  			size_index[size_index_elem(i)] = 7;
  	} else if (KMALLOC_MIN_SIZE == 128) {
41d54d3bf   Christoph Lameter   slub: Do not use ...
2994
2995
2996
2997
2998
2999
  		/*
  		 * The 192 byte sized cache is not used if the alignment
  		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
  		 * instead.
  		 */
  		for (i = 128 + 8; i <= 192; i += 8)
acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
3000
  			size_index[size_index_elem(i)] = 8;
41d54d3bf   Christoph Lameter   slub: Do not use ...
3001
  	}
81819f0fc   Christoph Lameter   SLUB core
3002
3003
3004
  	slab_state = UP;
  
  	/* Provide the correct kmalloc names now that the caches are up */
ffadd4d0f   Christoph Lameter   SLUB: Introduce a...
3005
  	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
81819f0fc   Christoph Lameter   SLUB core
3006
  		kmalloc_caches[i]. name =
83b519e8b   Pekka Enberg   slab: setup alloc...
3007
  			kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
81819f0fc   Christoph Lameter   SLUB core
3008
3009
3010
  
  #ifdef CONFIG_SMP
  	register_cpu_notifier(&slab_notifier);
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
3011
3012
3013
3014
  #endif
  #ifdef CONFIG_NUMA
  	kmem_size = offsetof(struct kmem_cache, node) +
  				nr_node_ids * sizeof(struct kmem_cache_node *);
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
3015
3016
  #else
  	kmem_size = sizeof(struct kmem_cache);
81819f0fc   Christoph Lameter   SLUB core
3017
  #endif
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
3018
3019
  	printk(KERN_INFO
  		"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
4b356be01   Christoph Lameter   SLUB: minimum ali...
3020
3021
3022
  		" CPUs=%d, Nodes=%d
  ",
  		caches, cache_line_size(),
81819f0fc   Christoph Lameter   SLUB core
3023
3024
3025
  		slub_min_order, slub_max_order, slub_min_objects,
  		nr_cpu_ids, nr_node_ids);
  }
7e85ee0c1   Pekka Enberg   slab,slub: don't ...
3026
3027
  void __init kmem_cache_init_late(void)
  {
7e85ee0c1   Pekka Enberg   slab,slub: don't ...
3028
  }
81819f0fc   Christoph Lameter   SLUB core
3029
3030
3031
3032
3033
3034
3035
  /*
   * Find a mergeable slab cache
   */
  static int slab_unmergeable(struct kmem_cache *s)
  {
  	if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
  		return 1;
c59def9f2   Christoph Lameter   Slab allocators: ...
3036
  	if (s->ctor)
81819f0fc   Christoph Lameter   SLUB core
3037
  		return 1;
8ffa68755   Christoph Lameter   SLUB: Fix NUMA / ...
3038
3039
3040
3041
3042
  	/*
  	 * We may have set a slab to be unmergeable during bootstrap.
  	 */
  	if (s->refcount < 0)
  		return 1;
81819f0fc   Christoph Lameter   SLUB core
3043
3044
3045
3046
  	return 0;
  }
  
  static struct kmem_cache *find_mergeable(size_t size,
ba0268a8b   Christoph Lameter   SLUB: accurately ...
3047
  		size_t align, unsigned long flags, const char *name,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
3048
  		void (*ctor)(void *))
81819f0fc   Christoph Lameter   SLUB core
3049
  {
5b95a4acf   Christoph Lameter   SLUB: use list_fo...
3050
  	struct kmem_cache *s;
81819f0fc   Christoph Lameter   SLUB core
3051
3052
3053
  
  	if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
  		return NULL;
c59def9f2   Christoph Lameter   Slab allocators: ...
3054
  	if (ctor)
81819f0fc   Christoph Lameter   SLUB core
3055
3056
3057
3058
3059
  		return NULL;
  
  	size = ALIGN(size, sizeof(void *));
  	align = calculate_alignment(flags, align, size);
  	size = ALIGN(size, align);
ba0268a8b   Christoph Lameter   SLUB: accurately ...
3060
  	flags = kmem_cache_flags(size, flags, name, NULL);
81819f0fc   Christoph Lameter   SLUB core
3061

5b95a4acf   Christoph Lameter   SLUB: use list_fo...
3062
  	list_for_each_entry(s, &slab_caches, list) {
81819f0fc   Christoph Lameter   SLUB core
3063
3064
3065
3066
3067
  		if (slab_unmergeable(s))
  			continue;
  
  		if (size > s->size)
  			continue;
ba0268a8b   Christoph Lameter   SLUB: accurately ...
3068
  		if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
81819f0fc   Christoph Lameter   SLUB core
3069
3070
3071
3072
3073
  				continue;
  		/*
  		 * Check if alignment is compatible.
  		 * Courtesy of Adrian Drzewiecki
  		 */
064287807   Pekka Enberg   SLUB: Fix coding ...
3074
  		if ((s->size & ~(align - 1)) != s->size)
81819f0fc   Christoph Lameter   SLUB core
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
  			continue;
  
  		if (s->size - size >= sizeof(void *))
  			continue;
  
  		return s;
  	}
  	return NULL;
  }
  
  struct kmem_cache *kmem_cache_create(const char *name, size_t size,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
3086
  		size_t align, unsigned long flags, void (*ctor)(void *))
81819f0fc   Christoph Lameter   SLUB core
3087
3088
  {
  	struct kmem_cache *s;
fe1ff49d0   Benjamin Herrenschmidt   mm: kmem_cache_cr...
3089
3090
  	if (WARN_ON(!name))
  		return NULL;
81819f0fc   Christoph Lameter   SLUB core
3091
  	down_write(&slub_lock);
ba0268a8b   Christoph Lameter   SLUB: accurately ...
3092
  	s = find_mergeable(size, align, flags, name, ctor);
81819f0fc   Christoph Lameter   SLUB core
3093
3094
3095
3096
3097
3098
3099
3100
  	if (s) {
  		s->refcount++;
  		/*
  		 * Adjust the object sizes so that we clear
  		 * the complete object on kzalloc.
  		 */
  		s->objsize = max(s->objsize, (int)size);
  		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
3101
  		up_write(&slub_lock);
6446faa2f   Christoph Lameter   slub: Fix up comm...
3102

7b8f3b66d   David Rientjes   slub: avoid leaki...
3103
3104
3105
3106
  		if (sysfs_slab_alias(s, name)) {
  			down_write(&slub_lock);
  			s->refcount--;
  			up_write(&slub_lock);
81819f0fc   Christoph Lameter   SLUB core
3107
  			goto err;
7b8f3b66d   David Rientjes   slub: avoid leaki...
3108
  		}
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
3109
3110
  		return s;
  	}
6446faa2f   Christoph Lameter   slub: Fix up comm...
3111

a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
3112
3113
3114
  	s = kmalloc(kmem_size, GFP_KERNEL);
  	if (s) {
  		if (kmem_cache_open(s, GFP_KERNEL, name,
c59def9f2   Christoph Lameter   Slab allocators: ...
3115
  				size, align, flags, ctor)) {
81819f0fc   Christoph Lameter   SLUB core
3116
  			list_add(&s->list, &slab_caches);
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
3117
  			up_write(&slub_lock);
7b8f3b66d   David Rientjes   slub: avoid leaki...
3118
3119
3120
3121
3122
  			if (sysfs_slab_add(s)) {
  				down_write(&slub_lock);
  				list_del(&s->list);
  				up_write(&slub_lock);
  				kfree(s);
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
3123
  				goto err;
7b8f3b66d   David Rientjes   slub: avoid leaki...
3124
  			}
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
3125
3126
3127
  			return s;
  		}
  		kfree(s);
81819f0fc   Christoph Lameter   SLUB core
3128
3129
  	}
  	up_write(&slub_lock);
81819f0fc   Christoph Lameter   SLUB core
3130
3131
  
  err:
81819f0fc   Christoph Lameter   SLUB core
3132
3133
3134
3135
3136
3137
3138
3139
  	if (flags & SLAB_PANIC)
  		panic("Cannot create slabcache %s
  ", name);
  	else
  		s = NULL;
  	return s;
  }
  EXPORT_SYMBOL(kmem_cache_create);
81819f0fc   Christoph Lameter   SLUB core
3140
  #ifdef CONFIG_SMP
27390bc33   Christoph Lameter   SLUB: fix locking...
3141
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
3142
3143
   * Use the cpu notifier to insure that the cpu slabs are flushed when
   * necessary.
81819f0fc   Christoph Lameter   SLUB core
3144
3145
3146
3147
3148
   */
  static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
  		unsigned long action, void *hcpu)
  {
  	long cpu = (long)hcpu;
5b95a4acf   Christoph Lameter   SLUB: use list_fo...
3149
3150
  	struct kmem_cache *s;
  	unsigned long flags;
81819f0fc   Christoph Lameter   SLUB core
3151
3152
3153
  
  	switch (action) {
  	case CPU_UP_CANCELED:
8bb784428   Rafael J. Wysocki   Add suspend-relat...
3154
  	case CPU_UP_CANCELED_FROZEN:
81819f0fc   Christoph Lameter   SLUB core
3155
  	case CPU_DEAD:
8bb784428   Rafael J. Wysocki   Add suspend-relat...
3156
  	case CPU_DEAD_FROZEN:
5b95a4acf   Christoph Lameter   SLUB: use list_fo...
3157
3158
3159
3160
3161
3162
3163
  		down_read(&slub_lock);
  		list_for_each_entry(s, &slab_caches, list) {
  			local_irq_save(flags);
  			__flush_cpu_slab(s, cpu);
  			local_irq_restore(flags);
  		}
  		up_read(&slub_lock);
81819f0fc   Christoph Lameter   SLUB core
3164
3165
3166
3167
3168
3169
  		break;
  	default:
  		break;
  	}
  	return NOTIFY_OK;
  }
064287807   Pekka Enberg   SLUB: Fix coding ...
3170
  static struct notifier_block __cpuinitdata slab_notifier = {
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
3171
  	.notifier_call = slab_cpuup_callback
064287807   Pekka Enberg   SLUB: Fix coding ...
3172
  };
81819f0fc   Christoph Lameter   SLUB core
3173
3174
  
  #endif
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
3175
  void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
81819f0fc   Christoph Lameter   SLUB core
3176
  {
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3177
  	struct kmem_cache *s;
94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3178
  	void *ret;
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3179

ffadd4d0f   Christoph Lameter   SLUB: Introduce a...
3180
  	if (unlikely(size > SLUB_MAX_SIZE))
eada35efc   Pekka Enberg   slub: kmalloc pag...
3181
  		return kmalloc_large(size, gfpflags);
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3182
  	s = get_slab(size, gfpflags);
81819f0fc   Christoph Lameter   SLUB core
3183

2408c5503   Satyam Sharma   {slub, slob}: use...
3184
  	if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f9132   Christoph Lameter   Slab allocators: ...
3185
  		return s;
81819f0fc   Christoph Lameter   SLUB core
3186

94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3187
3188
3189
  	ret = slab_alloc(s, gfpflags, -1, caller);
  
  	/* Honor the call site pointer we recieved. */
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
3190
  	trace_kmalloc(caller, ret, size, s->size, gfpflags);
94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3191
3192
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
3193
3194
3195
  }
  
  void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
3196
  					int node, unsigned long caller)
81819f0fc   Christoph Lameter   SLUB core
3197
  {
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3198
  	struct kmem_cache *s;
94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3199
  	void *ret;
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3200

d3e14aa33   Xiaotian Feng   slub: __kmalloc_n...
3201
3202
3203
3204
3205
3206
3207
3208
3209
  	if (unlikely(size > SLUB_MAX_SIZE)) {
  		ret = kmalloc_large_node(size, gfpflags, node);
  
  		trace_kmalloc_node(caller, ret,
  				   size, PAGE_SIZE << get_order(size),
  				   gfpflags, node);
  
  		return ret;
  	}
eada35efc   Pekka Enberg   slub: kmalloc pag...
3210

aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3211
  	s = get_slab(size, gfpflags);
81819f0fc   Christoph Lameter   SLUB core
3212

2408c5503   Satyam Sharma   {slub, slob}: use...
3213
  	if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f9132   Christoph Lameter   Slab allocators: ...
3214
  		return s;
81819f0fc   Christoph Lameter   SLUB core
3215

94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3216
3217
3218
  	ret = slab_alloc(s, gfpflags, node, caller);
  
  	/* Honor the call site pointer we recieved. */
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
3219
  	trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3220
3221
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
3222
  }
f6acb6350   Christoph Lameter   slub: #ifdef simp...
3223
  #ifdef CONFIG_SLUB_DEBUG
205ab99dd   Christoph Lameter   slub: Update stat...
3224
3225
3226
3227
3228
3229
3230
3231
3232
  static int count_inuse(struct page *page)
  {
  	return page->inuse;
  }
  
  static int count_total(struct page *page)
  {
  	return page->objects;
  }
434e245dd   Christoph Lameter   SLUB: Do not allo...
3233
3234
  static int validate_slab(struct kmem_cache *s, struct page *page,
  						unsigned long *map)
53e15af03   Christoph Lameter   slub: validation ...
3235
3236
  {
  	void *p;
a973e9dd1   Christoph Lameter   Revert "unique en...
3237
  	void *addr = page_address(page);
53e15af03   Christoph Lameter   slub: validation ...
3238
3239
3240
3241
3242
3243
  
  	if (!check_slab(s, page) ||
  			!on_freelist(s, page, NULL))
  		return 0;
  
  	/* Now we know that a valid freelist exists */
39b264641   Christoph Lameter   slub: Store max n...
3244
  	bitmap_zero(map, page->objects);
53e15af03   Christoph Lameter   slub: validation ...
3245

7656c72b5   Christoph Lameter   SLUB: add macros ...
3246
3247
  	for_each_free_object(p, s, page->freelist) {
  		set_bit(slab_index(p, s, addr), map);
53e15af03   Christoph Lameter   slub: validation ...
3248
3249
3250
  		if (!check_object(s, page, p, 0))
  			return 0;
  	}
224a88be4   Christoph Lameter   slub: for_each_ob...
3251
  	for_each_object(p, s, addr, page->objects)
7656c72b5   Christoph Lameter   SLUB: add macros ...
3252
  		if (!test_bit(slab_index(p, s, addr), map))
53e15af03   Christoph Lameter   slub: validation ...
3253
3254
3255
3256
  			if (!check_object(s, page, p, 1))
  				return 0;
  	return 1;
  }
434e245dd   Christoph Lameter   SLUB: Do not allo...
3257
3258
  static void validate_slab_slab(struct kmem_cache *s, struct page *page,
  						unsigned long *map)
53e15af03   Christoph Lameter   slub: validation ...
3259
3260
  {
  	if (slab_trylock(page)) {
434e245dd   Christoph Lameter   SLUB: Do not allo...
3261
  		validate_slab(s, page, map);
53e15af03   Christoph Lameter   slub: validation ...
3262
3263
3264
3265
3266
3267
3268
  		slab_unlock(page);
  	} else
  		printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p
  ",
  			s->name, page);
  
  	if (s->flags & DEBUG_DEFAULT_FLAGS) {
8a38082d2   Andy Whitcroft   slub: record page...
3269
3270
  		if (!PageSlubDebug(page))
  			printk(KERN_ERR "SLUB %s: SlubDebug not set "
53e15af03   Christoph Lameter   slub: validation ...
3271
3272
3273
  				"on slab 0x%p
  ", s->name, page);
  	} else {
8a38082d2   Andy Whitcroft   slub: record page...
3274
3275
  		if (PageSlubDebug(page))
  			printk(KERN_ERR "SLUB %s: SlubDebug set on "
53e15af03   Christoph Lameter   slub: validation ...
3276
3277
3278
3279
  				"slab 0x%p
  ", s->name, page);
  	}
  }
434e245dd   Christoph Lameter   SLUB: Do not allo...
3280
3281
  static int validate_slab_node(struct kmem_cache *s,
  		struct kmem_cache_node *n, unsigned long *map)
53e15af03   Christoph Lameter   slub: validation ...
3282
3283
3284
3285
3286
3287
3288
3289
  {
  	unsigned long count = 0;
  	struct page *page;
  	unsigned long flags;
  
  	spin_lock_irqsave(&n->list_lock, flags);
  
  	list_for_each_entry(page, &n->partial, lru) {
434e245dd   Christoph Lameter   SLUB: Do not allo...
3290
  		validate_slab_slab(s, page, map);
53e15af03   Christoph Lameter   slub: validation ...
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
  		count++;
  	}
  	if (count != n->nr_partial)
  		printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
  			"counter=%ld
  ", s->name, count, n->nr_partial);
  
  	if (!(s->flags & SLAB_STORE_USER))
  		goto out;
  
  	list_for_each_entry(page, &n->full, lru) {
434e245dd   Christoph Lameter   SLUB: Do not allo...
3302
  		validate_slab_slab(s, page, map);
53e15af03   Christoph Lameter   slub: validation ...
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
  		count++;
  	}
  	if (count != atomic_long_read(&n->nr_slabs))
  		printk(KERN_ERR "SLUB: %s %ld slabs counted but "
  			"counter=%ld
  ", s->name, count,
  			atomic_long_read(&n->nr_slabs));
  
  out:
  	spin_unlock_irqrestore(&n->list_lock, flags);
  	return count;
  }
434e245dd   Christoph Lameter   SLUB: Do not allo...
3315
  static long validate_slab_cache(struct kmem_cache *s)
53e15af03   Christoph Lameter   slub: validation ...
3316
3317
3318
  {
  	int node;
  	unsigned long count = 0;
205ab99dd   Christoph Lameter   slub: Update stat...
3319
  	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
434e245dd   Christoph Lameter   SLUB: Do not allo...
3320
3321
3322
3323
  				sizeof(unsigned long), GFP_KERNEL);
  
  	if (!map)
  		return -ENOMEM;
53e15af03   Christoph Lameter   slub: validation ...
3324
3325
  
  	flush_all(s);
f64dc58c5   Christoph Lameter   Memoryless nodes:...
3326
  	for_each_node_state(node, N_NORMAL_MEMORY) {
53e15af03   Christoph Lameter   slub: validation ...
3327
  		struct kmem_cache_node *n = get_node(s, node);
434e245dd   Christoph Lameter   SLUB: Do not allo...
3328
  		count += validate_slab_node(s, n, map);
53e15af03   Christoph Lameter   slub: validation ...
3329
  	}
434e245dd   Christoph Lameter   SLUB: Do not allo...
3330
  	kfree(map);
53e15af03   Christoph Lameter   slub: validation ...
3331
3332
  	return count;
  }
b34597090   Christoph Lameter   SLUB: move resili...
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
  #ifdef SLUB_RESILIENCY_TEST
  static void resiliency_test(void)
  {
  	u8 *p;
  
  	printk(KERN_ERR "SLUB resiliency testing
  ");
  	printk(KERN_ERR "-----------------------
  ");
  	printk(KERN_ERR "A. Corruption after allocation
  ");
  
  	p = kzalloc(16, GFP_KERNEL);
  	p[16] = 0x12;
  	printk(KERN_ERR "
  1. kmalloc-16: Clobber Redzone/next pointer"
  			" 0x12->0x%p
  
  ", p + 16);
  
  	validate_slab_cache(kmalloc_caches + 4);
  
  	/* Hmmm... The next two are dangerous */
  	p = kzalloc(32, GFP_KERNEL);
  	p[32 + sizeof(void *)] = 0x34;
  	printk(KERN_ERR "
  2. kmalloc-32: Clobber next pointer/next slab"
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
3360
3361
3362
3363
3364
3365
  			" 0x34 -> -0x%p
  ", p);
  	printk(KERN_ERR
  		"If allocated object is overwritten then not detectable
  
  ");
b34597090   Christoph Lameter   SLUB: move resili...
3366
3367
3368
3369
3370
3371
3372
3373
3374
  
  	validate_slab_cache(kmalloc_caches + 5);
  	p = kzalloc(64, GFP_KERNEL);
  	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
  	*p = 0x56;
  	printk(KERN_ERR "
  3. kmalloc-64: corrupting random byte 0x56->0x%p
  ",
  									p);
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
3375
3376
3377
3378
  	printk(KERN_ERR
  		"If allocated object is overwritten then not detectable
  
  ");
b34597090   Christoph Lameter   SLUB: move resili...
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
  	validate_slab_cache(kmalloc_caches + 6);
  
  	printk(KERN_ERR "
  B. Corruption after free
  ");
  	p = kzalloc(128, GFP_KERNEL);
  	kfree(p);
  	*p = 0x78;
  	printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p
  
  ", p);
  	validate_slab_cache(kmalloc_caches + 7);
  
  	p = kzalloc(256, GFP_KERNEL);
  	kfree(p);
  	p[50] = 0x9a;
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
3395
3396
3397
3398
3399
  	printk(KERN_ERR "
  2. kmalloc-256: Clobber 50th byte 0x9a->0x%p
  
  ",
  			p);
b34597090   Christoph Lameter   SLUB: move resili...
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
  	validate_slab_cache(kmalloc_caches + 8);
  
  	p = kzalloc(512, GFP_KERNEL);
  	kfree(p);
  	p[512] = 0xab;
  	printk(KERN_ERR "
  3. kmalloc-512: Clobber redzone 0xab->0x%p
  
  ", p);
  	validate_slab_cache(kmalloc_caches + 9);
  }
  #else
  static void resiliency_test(void) {};
  #endif
88a420e4e   Christoph Lameter   slub: add ability...
3414
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
3415
   * Generate lists of code addresses where slabcache objects are allocated
88a420e4e   Christoph Lameter   slub: add ability...
3416
3417
3418
3419
3420
   * and freed.
   */
  
  struct location {
  	unsigned long count;
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
3421
  	unsigned long addr;
45edfa580   Christoph Lameter   SLUB: include lif...
3422
3423
3424
3425
3426
  	long long sum_time;
  	long min_time;
  	long max_time;
  	long min_pid;
  	long max_pid;
174596a0b   Rusty Russell   cpumask: convert mm/
3427
  	DECLARE_BITMAP(cpus, NR_CPUS);
45edfa580   Christoph Lameter   SLUB: include lif...
3428
  	nodemask_t nodes;
88a420e4e   Christoph Lameter   slub: add ability...
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
  };
  
  struct loc_track {
  	unsigned long max;
  	unsigned long count;
  	struct location *loc;
  };
  
  static void free_loc_track(struct loc_track *t)
  {
  	if (t->max)
  		free_pages((unsigned long)t->loc,
  			get_order(sizeof(struct location) * t->max));
  }
68dff6a9a   Christoph Lameter   SLUB slab validat...
3443
  static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
88a420e4e   Christoph Lameter   slub: add ability...
3444
3445
3446
  {
  	struct location *l;
  	int order;
88a420e4e   Christoph Lameter   slub: add ability...
3447
  	order = get_order(sizeof(struct location) * max);
68dff6a9a   Christoph Lameter   SLUB slab validat...
3448
  	l = (void *)__get_free_pages(flags, order);
88a420e4e   Christoph Lameter   slub: add ability...
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
  	if (!l)
  		return 0;
  
  	if (t->count) {
  		memcpy(l, t->loc, sizeof(struct location) * t->count);
  		free_loc_track(t);
  	}
  	t->max = max;
  	t->loc = l;
  	return 1;
  }
  
  static int add_location(struct loc_track *t, struct kmem_cache *s,
45edfa580   Christoph Lameter   SLUB: include lif...
3462
  				const struct track *track)
88a420e4e   Christoph Lameter   slub: add ability...
3463
3464
3465
  {
  	long start, end, pos;
  	struct location *l;
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
3466
  	unsigned long caddr;
45edfa580   Christoph Lameter   SLUB: include lif...
3467
  	unsigned long age = jiffies - track->when;
88a420e4e   Christoph Lameter   slub: add ability...
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
  
  	start = -1;
  	end = t->count;
  
  	for ( ; ; ) {
  		pos = start + (end - start + 1) / 2;
  
  		/*
  		 * There is nothing at "end". If we end up there
  		 * we need to add something to before end.
  		 */
  		if (pos == end)
  			break;
  
  		caddr = t->loc[pos].addr;
45edfa580   Christoph Lameter   SLUB: include lif...
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
  		if (track->addr == caddr) {
  
  			l = &t->loc[pos];
  			l->count++;
  			if (track->when) {
  				l->sum_time += age;
  				if (age < l->min_time)
  					l->min_time = age;
  				if (age > l->max_time)
  					l->max_time = age;
  
  				if (track->pid < l->min_pid)
  					l->min_pid = track->pid;
  				if (track->pid > l->max_pid)
  					l->max_pid = track->pid;
174596a0b   Rusty Russell   cpumask: convert mm/
3498
3499
  				cpumask_set_cpu(track->cpu,
  						to_cpumask(l->cpus));
45edfa580   Christoph Lameter   SLUB: include lif...
3500
3501
  			}
  			node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4e   Christoph Lameter   slub: add ability...
3502
3503
  			return 1;
  		}
45edfa580   Christoph Lameter   SLUB: include lif...
3504
  		if (track->addr < caddr)
88a420e4e   Christoph Lameter   slub: add ability...
3505
3506
3507
3508
3509
3510
  			end = pos;
  		else
  			start = pos;
  	}
  
  	/*
672bba3a4   Christoph Lameter   SLUB: update comm...
3511
  	 * Not found. Insert new tracking element.
88a420e4e   Christoph Lameter   slub: add ability...
3512
  	 */
68dff6a9a   Christoph Lameter   SLUB slab validat...
3513
  	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
88a420e4e   Christoph Lameter   slub: add ability...
3514
3515
3516
3517
3518
3519
3520
3521
  		return 0;
  
  	l = t->loc + pos;
  	if (pos < t->count)
  		memmove(l + 1, l,
  			(t->count - pos) * sizeof(struct location));
  	t->count++;
  	l->count = 1;
45edfa580   Christoph Lameter   SLUB: include lif...
3522
3523
3524
3525
3526
3527
  	l->addr = track->addr;
  	l->sum_time = age;
  	l->min_time = age;
  	l->max_time = age;
  	l->min_pid = track->pid;
  	l->max_pid = track->pid;
174596a0b   Rusty Russell   cpumask: convert mm/
3528
3529
  	cpumask_clear(to_cpumask(l->cpus));
  	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
45edfa580   Christoph Lameter   SLUB: include lif...
3530
3531
  	nodes_clear(l->nodes);
  	node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4e   Christoph Lameter   slub: add ability...
3532
3533
3534
3535
  	return 1;
  }
  
  static void process_slab(struct loc_track *t, struct kmem_cache *s,
bbd7d57bf   Eric Dumazet   slub: Potential s...
3536
3537
  		struct page *page, enum track_item alloc,
  		long *map)
88a420e4e   Christoph Lameter   slub: add ability...
3538
  {
a973e9dd1   Christoph Lameter   Revert "unique en...
3539
  	void *addr = page_address(page);
88a420e4e   Christoph Lameter   slub: add ability...
3540
  	void *p;
39b264641   Christoph Lameter   slub: Store max n...
3541
  	bitmap_zero(map, page->objects);
7656c72b5   Christoph Lameter   SLUB: add macros ...
3542
3543
  	for_each_free_object(p, s, page->freelist)
  		set_bit(slab_index(p, s, addr), map);
88a420e4e   Christoph Lameter   slub: add ability...
3544

224a88be4   Christoph Lameter   slub: for_each_ob...
3545
  	for_each_object(p, s, addr, page->objects)
45edfa580   Christoph Lameter   SLUB: include lif...
3546
3547
  		if (!test_bit(slab_index(p, s, addr), map))
  			add_location(t, s, get_track(s, p, alloc));
88a420e4e   Christoph Lameter   slub: add ability...
3548
3549
3550
3551
3552
  }
  
  static int list_locations(struct kmem_cache *s, char *buf,
  					enum track_item alloc)
  {
e374d4835   Harvey Harrison   slub: fix shadowe...
3553
  	int len = 0;
88a420e4e   Christoph Lameter   slub: add ability...
3554
  	unsigned long i;
68dff6a9a   Christoph Lameter   SLUB slab validat...
3555
  	struct loc_track t = { 0, 0, NULL };
88a420e4e   Christoph Lameter   slub: add ability...
3556
  	int node;
bbd7d57bf   Eric Dumazet   slub: Potential s...
3557
3558
  	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
  				     sizeof(unsigned long), GFP_KERNEL);
88a420e4e   Christoph Lameter   slub: add ability...
3559

bbd7d57bf   Eric Dumazet   slub: Potential s...
3560
3561
3562
  	if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
  				     GFP_TEMPORARY)) {
  		kfree(map);
68dff6a9a   Christoph Lameter   SLUB slab validat...
3563
3564
  		return sprintf(buf, "Out of memory
  ");
bbd7d57bf   Eric Dumazet   slub: Potential s...
3565
  	}
88a420e4e   Christoph Lameter   slub: add ability...
3566
3567
  	/* Push back cpu slabs */
  	flush_all(s);
f64dc58c5   Christoph Lameter   Memoryless nodes:...
3568
  	for_each_node_state(node, N_NORMAL_MEMORY) {
88a420e4e   Christoph Lameter   slub: add ability...
3569
3570
3571
  		struct kmem_cache_node *n = get_node(s, node);
  		unsigned long flags;
  		struct page *page;
9e86943b6   Christoph Lameter   SLUB: use atomic_...
3572
  		if (!atomic_long_read(&n->nr_slabs))
88a420e4e   Christoph Lameter   slub: add ability...
3573
3574
3575
3576
  			continue;
  
  		spin_lock_irqsave(&n->list_lock, flags);
  		list_for_each_entry(page, &n->partial, lru)
bbd7d57bf   Eric Dumazet   slub: Potential s...
3577
  			process_slab(&t, s, page, alloc, map);
88a420e4e   Christoph Lameter   slub: add ability...
3578
  		list_for_each_entry(page, &n->full, lru)
bbd7d57bf   Eric Dumazet   slub: Potential s...
3579
  			process_slab(&t, s, page, alloc, map);
88a420e4e   Christoph Lameter   slub: add ability...
3580
3581
3582
3583
  		spin_unlock_irqrestore(&n->list_lock, flags);
  	}
  
  	for (i = 0; i < t.count; i++) {
45edfa580   Christoph Lameter   SLUB: include lif...
3584
  		struct location *l = &t.loc[i];
88a420e4e   Christoph Lameter   slub: add ability...
3585

9c2462472   Hugh Dickins   KSYM_SYMBOL_LEN f...
3586
  		if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
88a420e4e   Christoph Lameter   slub: add ability...
3587
  			break;
e374d4835   Harvey Harrison   slub: fix shadowe...
3588
  		len += sprintf(buf + len, "%7ld ", l->count);
45edfa580   Christoph Lameter   SLUB: include lif...
3589
3590
  
  		if (l->addr)
e374d4835   Harvey Harrison   slub: fix shadowe...
3591
  			len += sprint_symbol(buf + len, (unsigned long)l->addr);
88a420e4e   Christoph Lameter   slub: add ability...
3592
  		else
e374d4835   Harvey Harrison   slub: fix shadowe...
3593
  			len += sprintf(buf + len, "<not-available>");
45edfa580   Christoph Lameter   SLUB: include lif...
3594
3595
  
  		if (l->sum_time != l->min_time) {
e374d4835   Harvey Harrison   slub: fix shadowe...
3596
  			len += sprintf(buf + len, " age=%ld/%ld/%ld",
f8bd2258e   Roman Zippel   remove div_long_l...
3597
3598
3599
  				l->min_time,
  				(long)div_u64(l->sum_time, l->count),
  				l->max_time);
45edfa580   Christoph Lameter   SLUB: include lif...
3600
  		} else
e374d4835   Harvey Harrison   slub: fix shadowe...
3601
  			len += sprintf(buf + len, " age=%ld",
45edfa580   Christoph Lameter   SLUB: include lif...
3602
3603
3604
  				l->min_time);
  
  		if (l->min_pid != l->max_pid)
e374d4835   Harvey Harrison   slub: fix shadowe...
3605
  			len += sprintf(buf + len, " pid=%ld-%ld",
45edfa580   Christoph Lameter   SLUB: include lif...
3606
3607
  				l->min_pid, l->max_pid);
  		else
e374d4835   Harvey Harrison   slub: fix shadowe...
3608
  			len += sprintf(buf + len, " pid=%ld",
45edfa580   Christoph Lameter   SLUB: include lif...
3609
  				l->min_pid);
174596a0b   Rusty Russell   cpumask: convert mm/
3610
3611
  		if (num_online_cpus() > 1 &&
  				!cpumask_empty(to_cpumask(l->cpus)) &&
e374d4835   Harvey Harrison   slub: fix shadowe...
3612
3613
3614
  				len < PAGE_SIZE - 60) {
  			len += sprintf(buf + len, " cpus=");
  			len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
174596a0b   Rusty Russell   cpumask: convert mm/
3615
  						 to_cpumask(l->cpus));
45edfa580   Christoph Lameter   SLUB: include lif...
3616
  		}
62bc62a87   Christoph Lameter   page allocator: u...
3617
  		if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
e374d4835   Harvey Harrison   slub: fix shadowe...
3618
3619
3620
  				len < PAGE_SIZE - 60) {
  			len += sprintf(buf + len, " nodes=");
  			len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
45edfa580   Christoph Lameter   SLUB: include lif...
3621
3622
  					l->nodes);
  		}
e374d4835   Harvey Harrison   slub: fix shadowe...
3623
3624
  		len += sprintf(buf + len, "
  ");
88a420e4e   Christoph Lameter   slub: add ability...
3625
3626
3627
  	}
  
  	free_loc_track(&t);
bbd7d57bf   Eric Dumazet   slub: Potential s...
3628
  	kfree(map);
88a420e4e   Christoph Lameter   slub: add ability...
3629
  	if (!t.count)
e374d4835   Harvey Harrison   slub: fix shadowe...
3630
3631
3632
  		len += sprintf(buf, "No data
  ");
  	return len;
88a420e4e   Christoph Lameter   slub: add ability...
3633
  }
81819f0fc   Christoph Lameter   SLUB core
3634
  enum slab_stat_type {
205ab99dd   Christoph Lameter   slub: Update stat...
3635
3636
3637
3638
3639
  	SL_ALL,			/* All slabs */
  	SL_PARTIAL,		/* Only partially allocated slabs */
  	SL_CPU,			/* Only slabs used for cpu caches */
  	SL_OBJECTS,		/* Determine allocated objects not slabs */
  	SL_TOTAL		/* Determine object capacity not slabs */
81819f0fc   Christoph Lameter   SLUB core
3640
  };
205ab99dd   Christoph Lameter   slub: Update stat...
3641
  #define SO_ALL		(1 << SL_ALL)
81819f0fc   Christoph Lameter   SLUB core
3642
3643
3644
  #define SO_PARTIAL	(1 << SL_PARTIAL)
  #define SO_CPU		(1 << SL_CPU)
  #define SO_OBJECTS	(1 << SL_OBJECTS)
205ab99dd   Christoph Lameter   slub: Update stat...
3645
  #define SO_TOTAL	(1 << SL_TOTAL)
81819f0fc   Christoph Lameter   SLUB core
3646

62e5c4b4d   Cyrill Gorcunov   slub: fix possibl...
3647
3648
  static ssize_t show_slab_objects(struct kmem_cache *s,
  			    char *buf, unsigned long flags)
81819f0fc   Christoph Lameter   SLUB core
3649
3650
  {
  	unsigned long total = 0;
81819f0fc   Christoph Lameter   SLUB core
3651
3652
3653
3654
3655
3656
  	int node;
  	int x;
  	unsigned long *nodes;
  	unsigned long *per_cpu;
  
  	nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
62e5c4b4d   Cyrill Gorcunov   slub: fix possibl...
3657
3658
  	if (!nodes)
  		return -ENOMEM;
81819f0fc   Christoph Lameter   SLUB core
3659
  	per_cpu = nodes + nr_node_ids;
205ab99dd   Christoph Lameter   slub: Update stat...
3660
3661
  	if (flags & SO_CPU) {
  		int cpu;
81819f0fc   Christoph Lameter   SLUB core
3662

205ab99dd   Christoph Lameter   slub: Update stat...
3663
  		for_each_possible_cpu(cpu) {
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
3664
  			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
3665

205ab99dd   Christoph Lameter   slub: Update stat...
3666
3667
3668
3669
3670
3671
3672
3673
  			if (!c || c->node < 0)
  				continue;
  
  			if (c->page) {
  					if (flags & SO_TOTAL)
  						x = c->page->objects;
  				else if (flags & SO_OBJECTS)
  					x = c->page->inuse;
81819f0fc   Christoph Lameter   SLUB core
3674
3675
  				else
  					x = 1;
205ab99dd   Christoph Lameter   slub: Update stat...
3676

81819f0fc   Christoph Lameter   SLUB core
3677
  				total += x;
205ab99dd   Christoph Lameter   slub: Update stat...
3678
  				nodes[c->node] += x;
81819f0fc   Christoph Lameter   SLUB core
3679
  			}
205ab99dd   Christoph Lameter   slub: Update stat...
3680
  			per_cpu[c->node]++;
81819f0fc   Christoph Lameter   SLUB core
3681
3682
  		}
  	}
205ab99dd   Christoph Lameter   slub: Update stat...
3683
3684
3685
3686
3687
3688
3689
3690
3691
  	if (flags & SO_ALL) {
  		for_each_node_state(node, N_NORMAL_MEMORY) {
  			struct kmem_cache_node *n = get_node(s, node);
  
  		if (flags & SO_TOTAL)
  			x = atomic_long_read(&n->total_objects);
  		else if (flags & SO_OBJECTS)
  			x = atomic_long_read(&n->total_objects) -
  				count_partial(n, count_free);
81819f0fc   Christoph Lameter   SLUB core
3692

81819f0fc   Christoph Lameter   SLUB core
3693
  			else
205ab99dd   Christoph Lameter   slub: Update stat...
3694
  				x = atomic_long_read(&n->nr_slabs);
81819f0fc   Christoph Lameter   SLUB core
3695
3696
3697
  			total += x;
  			nodes[node] += x;
  		}
205ab99dd   Christoph Lameter   slub: Update stat...
3698
3699
3700
  	} else if (flags & SO_PARTIAL) {
  		for_each_node_state(node, N_NORMAL_MEMORY) {
  			struct kmem_cache_node *n = get_node(s, node);
81819f0fc   Christoph Lameter   SLUB core
3701

205ab99dd   Christoph Lameter   slub: Update stat...
3702
3703
3704
3705
  			if (flags & SO_TOTAL)
  				x = count_partial(n, count_total);
  			else if (flags & SO_OBJECTS)
  				x = count_partial(n, count_inuse);
81819f0fc   Christoph Lameter   SLUB core
3706
  			else
205ab99dd   Christoph Lameter   slub: Update stat...
3707
  				x = n->nr_partial;
81819f0fc   Christoph Lameter   SLUB core
3708
3709
3710
3711
  			total += x;
  			nodes[node] += x;
  		}
  	}
81819f0fc   Christoph Lameter   SLUB core
3712
3713
  	x = sprintf(buf, "%lu", total);
  #ifdef CONFIG_NUMA
f64dc58c5   Christoph Lameter   Memoryless nodes:...
3714
  	for_each_node_state(node, N_NORMAL_MEMORY)
81819f0fc   Christoph Lameter   SLUB core
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
  		if (nodes[node])
  			x += sprintf(buf + x, " N%d=%lu",
  					node, nodes[node]);
  #endif
  	kfree(nodes);
  	return x + sprintf(buf + x, "
  ");
  }
  
  static int any_slab_objects(struct kmem_cache *s)
  {
  	int node;
81819f0fc   Christoph Lameter   SLUB core
3727

dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
3728
  	for_each_online_node(node) {
81819f0fc   Christoph Lameter   SLUB core
3729
  		struct kmem_cache_node *n = get_node(s, node);
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
3730
3731
  		if (!n)
  			continue;
4ea33e2dc   Benjamin Herrenschmidt   slub: fix atomic ...
3732
  		if (atomic_long_read(&n->total_objects))
81819f0fc   Christoph Lameter   SLUB core
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
  			return 1;
  	}
  	return 0;
  }
  
  #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
  #define to_slab(n) container_of(n, struct kmem_cache, kobj);
  
  struct slab_attribute {
  	struct attribute attr;
  	ssize_t (*show)(struct kmem_cache *s, char *buf);
  	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
  };
  
  #define SLAB_ATTR_RO(_name) \
  	static struct slab_attribute _name##_attr = __ATTR_RO(_name)
  
  #define SLAB_ATTR(_name) \
  	static struct slab_attribute _name##_attr =  \
  	__ATTR(_name, 0644, _name##_show, _name##_store)
81819f0fc   Christoph Lameter   SLUB core
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
  static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", s->size);
  }
  SLAB_ATTR_RO(slab_size);
  
  static ssize_t align_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", s->align);
  }
  SLAB_ATTR_RO(align);
  
  static ssize_t object_size_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", s->objsize);
  }
  SLAB_ATTR_RO(object_size);
  
  static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
  {
834f3d119   Christoph Lameter   slub: Add kmem_ca...
3776
3777
  	return sprintf(buf, "%d
  ", oo_objects(s->oo));
81819f0fc   Christoph Lameter   SLUB core
3778
3779
  }
  SLAB_ATTR_RO(objs_per_slab);
06b285dc3   Christoph Lameter   slub: Make the or...
3780
3781
3782
  static ssize_t order_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
0121c619d   Christoph Lameter   slub: Whitespace ...
3783
3784
3785
3786
3787
3788
  	unsigned long order;
  	int err;
  
  	err = strict_strtoul(buf, 10, &order);
  	if (err)
  		return err;
06b285dc3   Christoph Lameter   slub: Make the or...
3789
3790
3791
3792
3793
3794
3795
  
  	if (order > slub_max_order || order < slub_min_order)
  		return -EINVAL;
  
  	calculate_sizes(s, order);
  	return length;
  }
81819f0fc   Christoph Lameter   SLUB core
3796
3797
  static ssize_t order_show(struct kmem_cache *s, char *buf)
  {
834f3d119   Christoph Lameter   slub: Add kmem_ca...
3798
3799
  	return sprintf(buf, "%d
  ", oo_order(s->oo));
81819f0fc   Christoph Lameter   SLUB core
3800
  }
06b285dc3   Christoph Lameter   slub: Make the or...
3801
  SLAB_ATTR(order);
81819f0fc   Christoph Lameter   SLUB core
3802

73d342b16   David Rientjes   slub: add min_par...
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817
  static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%lu
  ", s->min_partial);
  }
  
  static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
  				 size_t length)
  {
  	unsigned long min;
  	int err;
  
  	err = strict_strtoul(buf, 10, &min);
  	if (err)
  		return err;
c0bdb232b   David Rientjes   slub: rename calc...
3818
  	set_min_partial(s, min);
73d342b16   David Rientjes   slub: add min_par...
3819
3820
3821
  	return length;
  }
  SLAB_ATTR(min_partial);
81819f0fc   Christoph Lameter   SLUB core
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
  static ssize_t ctor_show(struct kmem_cache *s, char *buf)
  {
  	if (s->ctor) {
  		int n = sprint_symbol(buf, (unsigned long)s->ctor);
  
  		return n + sprintf(buf + n, "
  ");
  	}
  	return 0;
  }
  SLAB_ATTR_RO(ctor);
81819f0fc   Christoph Lameter   SLUB core
3833
3834
3835
3836
3837
3838
3839
3840
3841
  static ssize_t aliases_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", s->refcount - 1);
  }
  SLAB_ATTR_RO(aliases);
  
  static ssize_t slabs_show(struct kmem_cache *s, char *buf)
  {
205ab99dd   Christoph Lameter   slub: Update stat...
3842
  	return show_slab_objects(s, buf, SO_ALL);
81819f0fc   Christoph Lameter   SLUB core
3843
3844
3845
3846
3847
  }
  SLAB_ATTR_RO(slabs);
  
  static ssize_t partial_show(struct kmem_cache *s, char *buf)
  {
d9acf4b7b   Christoph Lameter   slub: rename slab...
3848
  	return show_slab_objects(s, buf, SO_PARTIAL);
81819f0fc   Christoph Lameter   SLUB core
3849
3850
3851
3852
3853
  }
  SLAB_ATTR_RO(partial);
  
  static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
  {
d9acf4b7b   Christoph Lameter   slub: rename slab...
3854
  	return show_slab_objects(s, buf, SO_CPU);
81819f0fc   Christoph Lameter   SLUB core
3855
3856
3857
3858
3859
  }
  SLAB_ATTR_RO(cpu_slabs);
  
  static ssize_t objects_show(struct kmem_cache *s, char *buf)
  {
205ab99dd   Christoph Lameter   slub: Update stat...
3860
  	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
81819f0fc   Christoph Lameter   SLUB core
3861
3862
  }
  SLAB_ATTR_RO(objects);
205ab99dd   Christoph Lameter   slub: Update stat...
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
  static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
  {
  	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
  }
  SLAB_ATTR_RO(objects_partial);
  
  static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
  {
  	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
  }
  SLAB_ATTR_RO(total_objects);
81819f0fc   Christoph Lameter   SLUB core
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
  static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_DEBUG_FREE));
  }
  
  static ssize_t sanity_checks_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
  	s->flags &= ~SLAB_DEBUG_FREE;
  	if (buf[0] == '1')
  		s->flags |= SLAB_DEBUG_FREE;
  	return length;
  }
  SLAB_ATTR(sanity_checks);
  
  static ssize_t trace_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_TRACE));
  }
  
  static ssize_t trace_store(struct kmem_cache *s, const char *buf,
  							size_t length)
  {
  	s->flags &= ~SLAB_TRACE;
  	if (buf[0] == '1')
  		s->flags |= SLAB_TRACE;
  	return length;
  }
  SLAB_ATTR(trace);
4c13dd3b4   Dmitry Monakhov   failslab: add abi...
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
  #ifdef CONFIG_FAILSLAB
  static ssize_t failslab_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_FAILSLAB));
  }
  
  static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
  							size_t length)
  {
  	s->flags &= ~SLAB_FAILSLAB;
  	if (buf[0] == '1')
  		s->flags |= SLAB_FAILSLAB;
  	return length;
  }
  SLAB_ATTR(failslab);
  #endif
81819f0fc   Christoph Lameter   SLUB core
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
  static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
  }
  
  static ssize_t reclaim_account_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
  	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
  	if (buf[0] == '1')
  		s->flags |= SLAB_RECLAIM_ACCOUNT;
  	return length;
  }
  SLAB_ATTR(reclaim_account);
  
  static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
  {
5af608399   Christoph Lameter   slab allocators: ...
3940
3941
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_HWCACHE_ALIGN));
81819f0fc   Christoph Lameter   SLUB core
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
  }
  SLAB_ATTR_RO(hwcache_align);
  
  #ifdef CONFIG_ZONE_DMA
  static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_CACHE_DMA));
  }
  SLAB_ATTR_RO(cache_dma);
  #endif
  
  static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_DESTROY_BY_RCU));
  }
  SLAB_ATTR_RO(destroy_by_rcu);
  
  static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_RED_ZONE));
  }
  
  static ssize_t red_zone_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
  	if (any_slab_objects(s))
  		return -EBUSY;
  
  	s->flags &= ~SLAB_RED_ZONE;
  	if (buf[0] == '1')
  		s->flags |= SLAB_RED_ZONE;
06b285dc3   Christoph Lameter   slub: Make the or...
3976
  	calculate_sizes(s, -1);
81819f0fc   Christoph Lameter   SLUB core
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
  	return length;
  }
  SLAB_ATTR(red_zone);
  
  static ssize_t poison_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_POISON));
  }
  
  static ssize_t poison_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
  	if (any_slab_objects(s))
  		return -EBUSY;
  
  	s->flags &= ~SLAB_POISON;
  	if (buf[0] == '1')
  		s->flags |= SLAB_POISON;
06b285dc3   Christoph Lameter   slub: Make the or...
3996
  	calculate_sizes(s, -1);
81819f0fc   Christoph Lameter   SLUB core
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014
4015
  	return length;
  }
  SLAB_ATTR(poison);
  
  static ssize_t store_user_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_STORE_USER));
  }
  
  static ssize_t store_user_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
  	if (any_slab_objects(s))
  		return -EBUSY;
  
  	s->flags &= ~SLAB_STORE_USER;
  	if (buf[0] == '1')
  		s->flags |= SLAB_STORE_USER;
06b285dc3   Christoph Lameter   slub: Make the or...
4016
  	calculate_sizes(s, -1);
81819f0fc   Christoph Lameter   SLUB core
4017
4018
4019
  	return length;
  }
  SLAB_ATTR(store_user);
53e15af03   Christoph Lameter   slub: validation ...
4020
4021
4022
4023
4024
4025
4026
4027
  static ssize_t validate_show(struct kmem_cache *s, char *buf)
  {
  	return 0;
  }
  
  static ssize_t validate_store(struct kmem_cache *s,
  			const char *buf, size_t length)
  {
434e245dd   Christoph Lameter   SLUB: Do not allo...
4028
4029
4030
4031
4032
4033
4034
4035
  	int ret = -EINVAL;
  
  	if (buf[0] == '1') {
  		ret = validate_slab_cache(s);
  		if (ret >= 0)
  			ret = length;
  	}
  	return ret;
53e15af03   Christoph Lameter   slub: validation ...
4036
4037
  }
  SLAB_ATTR(validate);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
  static ssize_t shrink_show(struct kmem_cache *s, char *buf)
  {
  	return 0;
  }
  
  static ssize_t shrink_store(struct kmem_cache *s,
  			const char *buf, size_t length)
  {
  	if (buf[0] == '1') {
  		int rc = kmem_cache_shrink(s);
  
  		if (rc)
  			return rc;
  	} else
  		return -EINVAL;
  	return length;
  }
  SLAB_ATTR(shrink);
88a420e4e   Christoph Lameter   slub: add ability...
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
  static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
  {
  	if (!(s->flags & SLAB_STORE_USER))
  		return -ENOSYS;
  	return list_locations(s, buf, TRACK_ALLOC);
  }
  SLAB_ATTR_RO(alloc_calls);
  
  static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
  {
  	if (!(s->flags & SLAB_STORE_USER))
  		return -ENOSYS;
  	return list_locations(s, buf, TRACK_FREE);
  }
  SLAB_ATTR_RO(free_calls);
81819f0fc   Christoph Lameter   SLUB core
4071
  #ifdef CONFIG_NUMA
9824601ea   Christoph Lameter   SLUB: rename defr...
4072
  static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
81819f0fc   Christoph Lameter   SLUB core
4073
  {
9824601ea   Christoph Lameter   SLUB: rename defr...
4074
4075
  	return sprintf(buf, "%d
  ", s->remote_node_defrag_ratio / 10);
81819f0fc   Christoph Lameter   SLUB core
4076
  }
9824601ea   Christoph Lameter   SLUB: rename defr...
4077
  static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
81819f0fc   Christoph Lameter   SLUB core
4078
4079
  				const char *buf, size_t length)
  {
0121c619d   Christoph Lameter   slub: Whitespace ...
4080
4081
4082
4083
4084
4085
  	unsigned long ratio;
  	int err;
  
  	err = strict_strtoul(buf, 10, &ratio);
  	if (err)
  		return err;
e2cb96b7e   Christoph Lameter   slub: Disable NUM...
4086
  	if (ratio <= 100)
0121c619d   Christoph Lameter   slub: Whitespace ...
4087
  		s->remote_node_defrag_ratio = ratio * 10;
81819f0fc   Christoph Lameter   SLUB core
4088

81819f0fc   Christoph Lameter   SLUB core
4089
4090
  	return length;
  }
9824601ea   Christoph Lameter   SLUB: rename defr...
4091
  SLAB_ATTR(remote_node_defrag_ratio);
81819f0fc   Christoph Lameter   SLUB core
4092
  #endif
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4093
  #ifdef CONFIG_SLUB_STATS
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
  static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
  {
  	unsigned long sum  = 0;
  	int cpu;
  	int len;
  	int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
  
  	if (!data)
  		return -ENOMEM;
  
  	for_each_online_cpu(cpu) {
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
4105
  		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4106
4107
4108
4109
4110
4111
  
  		data[cpu] = x;
  		sum += x;
  	}
  
  	len = sprintf(buf, "%lu", sum);
50ef37b96   Christoph Lameter   slub: Fixes to pe...
4112
  #ifdef CONFIG_SMP
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4113
4114
  	for_each_online_cpu(cpu) {
  		if (data[cpu] && len < PAGE_SIZE - 20)
50ef37b96   Christoph Lameter   slub: Fixes to pe...
4115
  			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4116
  	}
50ef37b96   Christoph Lameter   slub: Fixes to pe...
4117
  #endif
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4118
4119
4120
4121
  	kfree(data);
  	return len + sprintf(buf + len, "
  ");
  }
78eb00cc5   David Rientjes   slub: allow stats...
4122
4123
4124
4125
4126
  static void clear_stat(struct kmem_cache *s, enum stat_item si)
  {
  	int cpu;
  
  	for_each_online_cpu(cpu)
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
4127
  		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
78eb00cc5   David Rientjes   slub: allow stats...
4128
  }
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4129
4130
4131
4132
4133
  #define STAT_ATTR(si, text) 					\
  static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
  {								\
  	return show_stat(s, buf, si);				\
  }								\
78eb00cc5   David Rientjes   slub: allow stats...
4134
4135
4136
4137
4138
4139
4140
4141
4142
  static ssize_t text##_store(struct kmem_cache *s,		\
  				const char *buf, size_t length)	\
  {								\
  	if (buf[0] != '0')					\
  		return -EINVAL;					\
  	clear_stat(s, si);					\
  	return length;						\
  }								\
  SLAB_ATTR(text);						\
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
  
  STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
  STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
  STAT_ATTR(FREE_FASTPATH, free_fastpath);
  STAT_ATTR(FREE_SLOWPATH, free_slowpath);
  STAT_ATTR(FREE_FROZEN, free_frozen);
  STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
  STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
  STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
  STAT_ATTR(ALLOC_SLAB, alloc_slab);
  STAT_ATTR(ALLOC_REFILL, alloc_refill);
  STAT_ATTR(FREE_SLAB, free_slab);
  STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
  STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
  STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
  STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
  STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
  STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
65c3376aa   Christoph Lameter   slub: Fallback to...
4161
  STAT_ATTR(ORDER_FALLBACK, order_fallback);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4162
  #endif
064287807   Pekka Enberg   SLUB: Fix coding ...
4163
  static struct attribute *slab_attrs[] = {
81819f0fc   Christoph Lameter   SLUB core
4164
4165
4166
4167
  	&slab_size_attr.attr,
  	&object_size_attr.attr,
  	&objs_per_slab_attr.attr,
  	&order_attr.attr,
73d342b16   David Rientjes   slub: add min_par...
4168
  	&min_partial_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4169
  	&objects_attr.attr,
205ab99dd   Christoph Lameter   slub: Update stat...
4170
4171
  	&objects_partial_attr.attr,
  	&total_objects_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4172
4173
4174
4175
  	&slabs_attr.attr,
  	&partial_attr.attr,
  	&cpu_slabs_attr.attr,
  	&ctor_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
  	&aliases_attr.attr,
  	&align_attr.attr,
  	&sanity_checks_attr.attr,
  	&trace_attr.attr,
  	&hwcache_align_attr.attr,
  	&reclaim_account_attr.attr,
  	&destroy_by_rcu_attr.attr,
  	&red_zone_attr.attr,
  	&poison_attr.attr,
  	&store_user_attr.attr,
53e15af03   Christoph Lameter   slub: validation ...
4186
  	&validate_attr.attr,
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
4187
  	&shrink_attr.attr,
88a420e4e   Christoph Lameter   slub: add ability...
4188
4189
  	&alloc_calls_attr.attr,
  	&free_calls_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4190
4191
4192
4193
  #ifdef CONFIG_ZONE_DMA
  	&cache_dma_attr.attr,
  #endif
  #ifdef CONFIG_NUMA
9824601ea   Christoph Lameter   SLUB: rename defr...
4194
  	&remote_node_defrag_ratio_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4195
  #endif
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
  #ifdef CONFIG_SLUB_STATS
  	&alloc_fastpath_attr.attr,
  	&alloc_slowpath_attr.attr,
  	&free_fastpath_attr.attr,
  	&free_slowpath_attr.attr,
  	&free_frozen_attr.attr,
  	&free_add_partial_attr.attr,
  	&free_remove_partial_attr.attr,
  	&alloc_from_partial_attr.attr,
  	&alloc_slab_attr.attr,
  	&alloc_refill_attr.attr,
  	&free_slab_attr.attr,
  	&cpuslab_flush_attr.attr,
  	&deactivate_full_attr.attr,
  	&deactivate_empty_attr.attr,
  	&deactivate_to_head_attr.attr,
  	&deactivate_to_tail_attr.attr,
  	&deactivate_remote_frees_attr.attr,
65c3376aa   Christoph Lameter   slub: Fallback to...
4214
  	&order_fallback_attr.attr,
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4215
  #endif
4c13dd3b4   Dmitry Monakhov   failslab: add abi...
4216
4217
4218
  #ifdef CONFIG_FAILSLAB
  	&failslab_attr.attr,
  #endif
81819f0fc   Christoph Lameter   SLUB core
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
  	NULL
  };
  
  static struct attribute_group slab_attr_group = {
  	.attrs = slab_attrs,
  };
  
  static ssize_t slab_attr_show(struct kobject *kobj,
  				struct attribute *attr,
  				char *buf)
  {
  	struct slab_attribute *attribute;
  	struct kmem_cache *s;
  	int err;
  
  	attribute = to_slab_attr(attr);
  	s = to_slab(kobj);
  
  	if (!attribute->show)
  		return -EIO;
  
  	err = attribute->show(s, buf);
  
  	return err;
  }
  
  static ssize_t slab_attr_store(struct kobject *kobj,
  				struct attribute *attr,
  				const char *buf, size_t len)
  {
  	struct slab_attribute *attribute;
  	struct kmem_cache *s;
  	int err;
  
  	attribute = to_slab_attr(attr);
  	s = to_slab(kobj);
  
  	if (!attribute->store)
  		return -EIO;
  
  	err = attribute->store(s, buf, len);
  
  	return err;
  }
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
4263
4264
4265
4266
4267
4268
  static void kmem_cache_release(struct kobject *kobj)
  {
  	struct kmem_cache *s = to_slab(kobj);
  
  	kfree(s);
  }
52cf25d0a   Emese Revfy   Driver core: Cons...
4269
  static const struct sysfs_ops slab_sysfs_ops = {
81819f0fc   Christoph Lameter   SLUB core
4270
4271
4272
4273
4274
4275
  	.show = slab_attr_show,
  	.store = slab_attr_store,
  };
  
  static struct kobj_type slab_ktype = {
  	.sysfs_ops = &slab_sysfs_ops,
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
4276
  	.release = kmem_cache_release
81819f0fc   Christoph Lameter   SLUB core
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
  };
  
  static int uevent_filter(struct kset *kset, struct kobject *kobj)
  {
  	struct kobj_type *ktype = get_ktype(kobj);
  
  	if (ktype == &slab_ktype)
  		return 1;
  	return 0;
  }
9cd43611c   Emese Revfy   kobject: Constify...
4287
  static const struct kset_uevent_ops slab_uevent_ops = {
81819f0fc   Christoph Lameter   SLUB core
4288
4289
  	.filter = uevent_filter,
  };
27c3a314d   Greg Kroah-Hartman   kset: convert slu...
4290
  static struct kset *slab_kset;
81819f0fc   Christoph Lameter   SLUB core
4291
4292
4293
4294
  
  #define ID_STR_LENGTH 64
  
  /* Create a unique string id for a slab cache:
6446faa2f   Christoph Lameter   slub: Fix up comm...
4295
4296
   *
   * Format	:[flags-]size
81819f0fc   Christoph Lameter   SLUB core
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
   */
  static char *create_unique_id(struct kmem_cache *s)
  {
  	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
  	char *p = name;
  
  	BUG_ON(!name);
  
  	*p++ = ':';
  	/*
  	 * First flags affecting slabcache operations. We will only
  	 * get here for aliasable slabs so we do not need to support
  	 * too many flags. The flags here must cover all flags that
  	 * are matched during merging to guarantee that the id is
  	 * unique.
  	 */
  	if (s->flags & SLAB_CACHE_DMA)
  		*p++ = 'd';
  	if (s->flags & SLAB_RECLAIM_ACCOUNT)
  		*p++ = 'a';
  	if (s->flags & SLAB_DEBUG_FREE)
  		*p++ = 'F';
5a896d9e7   Vegard Nossum   slub: add hooks f...
4319
4320
  	if (!(s->flags & SLAB_NOTRACK))
  		*p++ = 't';
81819f0fc   Christoph Lameter   SLUB core
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
  	if (p != name + 1)
  		*p++ = '-';
  	p += sprintf(p, "%07d", s->size);
  	BUG_ON(p > name + ID_STR_LENGTH - 1);
  	return name;
  }
  
  static int sysfs_slab_add(struct kmem_cache *s)
  {
  	int err;
  	const char *name;
  	int unmergeable;
  
  	if (slab_state < SYSFS)
  		/* Defer until later */
  		return 0;
  
  	unmergeable = slab_unmergeable(s);
  	if (unmergeable) {
  		/*
  		 * Slabcache can never be merged so we can use the name proper.
  		 * This is typically the case for debug situations. In that
  		 * case we can catch duplicate names easily.
  		 */
27c3a314d   Greg Kroah-Hartman   kset: convert slu...
4345
  		sysfs_remove_link(&slab_kset->kobj, s->name);
81819f0fc   Christoph Lameter   SLUB core
4346
4347
4348
4349
4350
4351
4352
4353
  		name = s->name;
  	} else {
  		/*
  		 * Create a unique name for the slab as a target
  		 * for the symlinks.
  		 */
  		name = create_unique_id(s);
  	}
27c3a314d   Greg Kroah-Hartman   kset: convert slu...
4354
  	s->kobj.kset = slab_kset;
1eada11c8   Greg Kroah-Hartman   Kobject: convert ...
4355
4356
4357
  	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
  	if (err) {
  		kobject_put(&s->kobj);
81819f0fc   Christoph Lameter   SLUB core
4358
  		return err;
1eada11c8   Greg Kroah-Hartman   Kobject: convert ...
4359
  	}
81819f0fc   Christoph Lameter   SLUB core
4360
4361
  
  	err = sysfs_create_group(&s->kobj, &slab_attr_group);
5788d8ad6   Xiaotian Feng   slub: release kob...
4362
4363
4364
  	if (err) {
  		kobject_del(&s->kobj);
  		kobject_put(&s->kobj);
81819f0fc   Christoph Lameter   SLUB core
4365
  		return err;
5788d8ad6   Xiaotian Feng   slub: release kob...
4366
  	}
81819f0fc   Christoph Lameter   SLUB core
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
  	kobject_uevent(&s->kobj, KOBJ_ADD);
  	if (!unmergeable) {
  		/* Setup first alias */
  		sysfs_slab_alias(s, s->name);
  		kfree(name);
  	}
  	return 0;
  }
  
  static void sysfs_slab_remove(struct kmem_cache *s)
  {
  	kobject_uevent(&s->kobj, KOBJ_REMOVE);
  	kobject_del(&s->kobj);
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
4380
  	kobject_put(&s->kobj);
81819f0fc   Christoph Lameter   SLUB core
4381
4382
4383
4384
  }
  
  /*
   * Need to buffer aliases during bootup until sysfs becomes
9f6c708e5   Nick Andrew   slub: Fix incorre...
4385
   * available lest we lose that information.
81819f0fc   Christoph Lameter   SLUB core
4386
4387
4388
4389
4390
4391
   */
  struct saved_alias {
  	struct kmem_cache *s;
  	const char *name;
  	struct saved_alias *next;
  };
5af328a51   Adrian Bunk   mm/slub.c: make c...
4392
  static struct saved_alias *alias_list;
81819f0fc   Christoph Lameter   SLUB core
4393
4394
4395
4396
4397
4398
4399
4400
4401
  
  static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
  {
  	struct saved_alias *al;
  
  	if (slab_state == SYSFS) {
  		/*
  		 * If we have a leftover link then remove it.
  		 */
27c3a314d   Greg Kroah-Hartman   kset: convert slu...
4402
4403
  		sysfs_remove_link(&slab_kset->kobj, name);
  		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
81819f0fc   Christoph Lameter   SLUB core
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
  	}
  
  	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
  	if (!al)
  		return -ENOMEM;
  
  	al->s = s;
  	al->name = name;
  	al->next = alias_list;
  	alias_list = al;
  	return 0;
  }
  
  static int __init slab_sysfs_init(void)
  {
5b95a4acf   Christoph Lameter   SLUB: use list_fo...
4419
  	struct kmem_cache *s;
81819f0fc   Christoph Lameter   SLUB core
4420
  	int err;
0ff21e466   Greg Kroah-Hartman   kobject: convert ...
4421
  	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
27c3a314d   Greg Kroah-Hartman   kset: convert slu...
4422
  	if (!slab_kset) {
81819f0fc   Christoph Lameter   SLUB core
4423
4424
4425
4426
  		printk(KERN_ERR "Cannot register slab subsystem.
  ");
  		return -ENOSYS;
  	}
26a7bd030   Christoph Lameter   SLUB: get rid of ...
4427
  	slab_state = SYSFS;
5b95a4acf   Christoph Lameter   SLUB: use list_fo...
4428
  	list_for_each_entry(s, &slab_caches, list) {
26a7bd030   Christoph Lameter   SLUB: get rid of ...
4429
  		err = sysfs_slab_add(s);
5d540fb71   Christoph Lameter   slub: do not fail...
4430
4431
4432
4433
  		if (err)
  			printk(KERN_ERR "SLUB: Unable to add boot slab %s"
  						" to sysfs
  ", s->name);
26a7bd030   Christoph Lameter   SLUB: get rid of ...
4434
  	}
81819f0fc   Christoph Lameter   SLUB core
4435
4436
4437
4438
4439
4440
  
  	while (alias_list) {
  		struct saved_alias *al = alias_list;
  
  		alias_list = alias_list->next;
  		err = sysfs_slab_alias(al->s, al->name);
5d540fb71   Christoph Lameter   slub: do not fail...
4441
4442
4443
4444
  		if (err)
  			printk(KERN_ERR "SLUB: Unable to add boot slab alias"
  					" %s to sysfs
  ", s->name);
81819f0fc   Christoph Lameter   SLUB core
4445
4446
4447
4448
4449
4450
4451
4452
  		kfree(al);
  	}
  
  	resiliency_test();
  	return 0;
  }
  
  __initcall(slab_sysfs_init);
81819f0fc   Christoph Lameter   SLUB core
4453
  #endif
57ed3eda9   Pekka J Enberg   slub: provide /pr...
4454
4455
4456
4457
  
  /*
   * The /proc/slabinfo ABI
   */
158a96242   Linus Torvalds   Unify /proc/slabi...
4458
  #ifdef CONFIG_SLABINFO
57ed3eda9   Pekka J Enberg   slub: provide /pr...
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496
  static void print_slabinfo_header(struct seq_file *m)
  {
  	seq_puts(m, "slabinfo - version: 2.1
  ");
  	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
  		 "<objperslab> <pagesperslab>");
  	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
  	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
  	seq_putc(m, '
  ');
  }
  
  static void *s_start(struct seq_file *m, loff_t *pos)
  {
  	loff_t n = *pos;
  
  	down_read(&slub_lock);
  	if (!n)
  		print_slabinfo_header(m);
  
  	return seq_list_start(&slab_caches, *pos);
  }
  
  static void *s_next(struct seq_file *m, void *p, loff_t *pos)
  {
  	return seq_list_next(p, &slab_caches, pos);
  }
  
  static void s_stop(struct seq_file *m, void *p)
  {
  	up_read(&slub_lock);
  }
  
  static int s_show(struct seq_file *m, void *p)
  {
  	unsigned long nr_partials = 0;
  	unsigned long nr_slabs = 0;
  	unsigned long nr_inuse = 0;
205ab99dd   Christoph Lameter   slub: Update stat...
4497
4498
  	unsigned long nr_objs = 0;
  	unsigned long nr_free = 0;
57ed3eda9   Pekka J Enberg   slub: provide /pr...
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
  	struct kmem_cache *s;
  	int node;
  
  	s = list_entry(p, struct kmem_cache, list);
  
  	for_each_online_node(node) {
  		struct kmem_cache_node *n = get_node(s, node);
  
  		if (!n)
  			continue;
  
  		nr_partials += n->nr_partial;
  		nr_slabs += atomic_long_read(&n->nr_slabs);
205ab99dd   Christoph Lameter   slub: Update stat...
4512
4513
  		nr_objs += atomic_long_read(&n->total_objects);
  		nr_free += count_partial(n, count_free);
57ed3eda9   Pekka J Enberg   slub: provide /pr...
4514
  	}
205ab99dd   Christoph Lameter   slub: Update stat...
4515
  	nr_inuse = nr_objs - nr_free;
57ed3eda9   Pekka J Enberg   slub: provide /pr...
4516
4517
  
  	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
834f3d119   Christoph Lameter   slub: Add kmem_ca...
4518
4519
  		   nr_objs, s->size, oo_objects(s->oo),
  		   (1 << oo_order(s->oo)));
57ed3eda9   Pekka J Enberg   slub: provide /pr...
4520
4521
4522
4523
4524
4525
4526
  	seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
  	seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
  		   0UL);
  	seq_putc(m, '
  ');
  	return 0;
  }
7b3c3a50a   Alexey Dobriyan   proc: move /proc/...
4527
  static const struct seq_operations slabinfo_op = {
57ed3eda9   Pekka J Enberg   slub: provide /pr...
4528
4529
4530
4531
4532
  	.start = s_start,
  	.next = s_next,
  	.stop = s_stop,
  	.show = s_show,
  };
7b3c3a50a   Alexey Dobriyan   proc: move /proc/...
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
  static int slabinfo_open(struct inode *inode, struct file *file)
  {
  	return seq_open(file, &slabinfo_op);
  }
  
  static const struct file_operations proc_slabinfo_operations = {
  	.open		= slabinfo_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
  	.release	= seq_release,
  };
  
  static int __init slab_proc_init(void)
  {
cf5d11317   WANG Cong   SLUB: Drop write ...
4547
  	proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
7b3c3a50a   Alexey Dobriyan   proc: move /proc/...
4548
4549
4550
  	return 0;
  }
  module_init(slab_proc_init);
158a96242   Linus Torvalds   Unify /proc/slabi...
4551
  #endif /* CONFIG_SLABINFO */