Blame view

mm/slub.c 123 KB
81819f0fc   Christoph Lameter   SLUB core
1
2
3
4
  /*
   * SLUB: A slab allocator that limits cache line use instead of queuing
   * objects in per cpu and per node lists.
   *
881db7fb0   Christoph Lameter   slub: Invert lock...
5
6
   * The allocator synchronizes using per slab locks or atomic operatios
   * and only uses a centralized lock to manage a pool of partial slabs.
81819f0fc   Christoph Lameter   SLUB core
7
   *
cde535359   Christoph Lameter   Christoph has moved
8
   * (C) 2007 SGI, Christoph Lameter
881db7fb0   Christoph Lameter   slub: Invert lock...
9
   * (C) 2011 Linux Foundation, Christoph Lameter
81819f0fc   Christoph Lameter   SLUB core
10
11
12
   */
  
  #include <linux/mm.h>
1eb5ac646   Nick Piggin   mm: SLUB fix recl...
13
  #include <linux/swap.h> /* struct reclaim_state */
81819f0fc   Christoph Lameter   SLUB core
14
15
16
17
18
  #include <linux/module.h>
  #include <linux/bit_spinlock.h>
  #include <linux/interrupt.h>
  #include <linux/bitops.h>
  #include <linux/slab.h>
7b3c3a50a   Alexey Dobriyan   proc: move /proc/...
19
  #include <linux/proc_fs.h>
81819f0fc   Christoph Lameter   SLUB core
20
  #include <linux/seq_file.h>
5a896d9e7   Vegard Nossum   slub: add hooks f...
21
  #include <linux/kmemcheck.h>
81819f0fc   Christoph Lameter   SLUB core
22
23
24
25
  #include <linux/cpu.h>
  #include <linux/cpuset.h>
  #include <linux/mempolicy.h>
  #include <linux/ctype.h>
3ac7fe5a4   Thomas Gleixner   infrastructure to...
26
  #include <linux/debugobjects.h>
81819f0fc   Christoph Lameter   SLUB core
27
  #include <linux/kallsyms.h>
b9049e234   Yasunori Goto   memory hotplug: m...
28
  #include <linux/memory.h>
f8bd2258e   Roman Zippel   remove div_long_l...
29
  #include <linux/math64.h>
773ff60e8   Akinobu Mita   SLUB: failslab su...
30
  #include <linux/fault-inject.h>
bfa71457a   Pekka Enberg   SLUB: Fix missing...
31
  #include <linux/stacktrace.h>
81819f0fc   Christoph Lameter   SLUB core
32

4a92379bd   Richard Kennedy   slub tracing: mov...
33
  #include <trace/events/kmem.h>
81819f0fc   Christoph Lameter   SLUB core
34
35
  /*
   * Lock order:
881db7fb0   Christoph Lameter   slub: Invert lock...
36
37
38
   *   1. slub_lock (Global Semaphore)
   *   2. node->list_lock
   *   3. slab_lock(page) (Only on some arches and for debugging)
81819f0fc   Christoph Lameter   SLUB core
39
   *
881db7fb0   Christoph Lameter   slub: Invert lock...
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
   *   slub_lock
   *
   *   The role of the slub_lock is to protect the list of all the slabs
   *   and to synchronize major metadata changes to slab cache structures.
   *
   *   The slab_lock is only used for debugging and on arches that do not
   *   have the ability to do a cmpxchg_double. It only protects the second
   *   double word in the page struct. Meaning
   *	A. page->freelist	-> List of object free in a page
   *	B. page->counters	-> Counters of objects
   *	C. page->frozen		-> frozen state
   *
   *   If a slab is frozen then it is exempt from list management. It is not
   *   on any list. The processor that froze the slab is the one who can
   *   perform list operations on the page. Other processors may put objects
   *   onto the freelist but the processor that froze the slab is the only
   *   one that can retrieve the objects from the page's freelist.
81819f0fc   Christoph Lameter   SLUB core
57
58
59
60
61
62
63
64
65
66
67
68
   *
   *   The list_lock protects the partial and full list on each node and
   *   the partial slab counter. If taken then no new slabs may be added or
   *   removed from the lists nor make the number of partial slabs be modified.
   *   (Note that the total number of slabs is an atomic value that may be
   *   modified without taking the list lock).
   *
   *   The list_lock is a centralized lock and thus we avoid taking it as
   *   much as possible. As long as SLUB does not have to handle partial
   *   slabs, operations can continue without any centralized lock. F.e.
   *   allocating a long series of objects that fill up slabs does not require
   *   the list lock.
81819f0fc   Christoph Lameter   SLUB core
69
70
71
72
73
74
75
76
   *   Interrupts are disabled during allocation and deallocation in order to
   *   make the slab allocator safe to use in the context of an irq. In addition
   *   interrupts are disabled to ensure that the processor does not change
   *   while handling per_cpu slabs, due to kernel preemption.
   *
   * SLUB assigns one slab for allocation to each processor.
   * Allocations only occur from these slabs called cpu slabs.
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
77
78
   * Slabs with free elements are kept on a partial list and during regular
   * operations no list for full slabs is used. If an object in a full slab is
81819f0fc   Christoph Lameter   SLUB core
79
   * freed then the slab will show up again on the partial lists.
672bba3a4   Christoph Lameter   SLUB: update comm...
80
81
   * We track full slabs for debugging purposes though because otherwise we
   * cannot scan all objects.
81819f0fc   Christoph Lameter   SLUB core
82
83
84
85
86
87
88
   *
   * Slabs are freed when they become empty. Teardown and setup is
   * minimal so we rely on the page allocators per cpu caches for
   * fast frees and allocs.
   *
   * Overloading of page flags that are otherwise used for LRU management.
   *
4b6f07504   Christoph Lameter   SLUB: Define func...
89
90
91
92
93
94
95
96
97
98
99
100
   * PageActive 		The slab is frozen and exempt from list processing.
   * 			This means that the slab is dedicated to a purpose
   * 			such as satisfying allocations for a specific
   * 			processor. Objects may be freed in the slab while
   * 			it is frozen but slab_free will then skip the usual
   * 			list operations. It is up to the processor holding
   * 			the slab to integrate the slab into the slab lists
   * 			when the slab is no longer needed.
   *
   * 			One use of this flag is to mark slabs that are
   * 			used for allocations. Then such a slab becomes a cpu
   * 			slab. The cpu slab may be equipped with an additional
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
101
   * 			freelist that allows lockless access to
894b8788d   Christoph Lameter   slub: support con...
102
103
   * 			free objects in addition to the regular freelist
   * 			that requires the slab lock.
81819f0fc   Christoph Lameter   SLUB core
104
105
106
   *
   * PageError		Slab requires special handling due to debug
   * 			options set. This moves	slab handling out of
894b8788d   Christoph Lameter   slub: support con...
107
   * 			the fast path and disables lockless freelists.
81819f0fc   Christoph Lameter   SLUB core
108
   */
af537b0a6   Christoph Lameter   slub: Use kmem_ca...
109
110
111
112
113
  #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
  		SLAB_TRACE | SLAB_DEBUG_FREE)
  
  static inline int kmem_cache_debug(struct kmem_cache *s)
  {
5577bd8a8   Christoph Lameter   SLUB: Do our own ...
114
  #ifdef CONFIG_SLUB_DEBUG
af537b0a6   Christoph Lameter   slub: Use kmem_ca...
115
  	return unlikely(s->flags & SLAB_DEBUG_FLAGS);
5577bd8a8   Christoph Lameter   SLUB: Do our own ...
116
  #else
af537b0a6   Christoph Lameter   slub: Use kmem_ca...
117
  	return 0;
5577bd8a8   Christoph Lameter   SLUB: Do our own ...
118
  #endif
af537b0a6   Christoph Lameter   slub: Use kmem_ca...
119
  }
5577bd8a8   Christoph Lameter   SLUB: Do our own ...
120

81819f0fc   Christoph Lameter   SLUB core
121
122
123
  /*
   * Issues still to be resolved:
   *
81819f0fc   Christoph Lameter   SLUB core
124
125
   * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
   *
81819f0fc   Christoph Lameter   SLUB core
126
127
128
129
130
   * - Variable sizing of the per node arrays
   */
  
  /* Enable to test recovery from slab corruption on boot */
  #undef SLUB_RESILIENCY_TEST
b789ef518   Christoph Lameter   slub: Add cmpxchg...
131
132
  /* Enable to log cmpxchg failures */
  #undef SLUB_DEBUG_CMPXCHG
81819f0fc   Christoph Lameter   SLUB core
133
  /*
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
134
135
136
   * Mininum number of partial slabs. These will be left on the partial
   * lists even if they are empty. kmem_cache_shrink may reclaim them.
   */
76be89500   Christoph Lameter   SLUB: Improve hac...
137
  #define MIN_PARTIAL 5
e95eed571   Christoph Lameter   SLUB: Add MIN_PAR...
138

2086d26a0   Christoph Lameter   SLUB: Free slabs ...
139
140
141
142
143
144
  /*
   * Maximum number of desirable partial slabs.
   * The existence of more partial slabs makes kmem_cache_shrink
   * sort the partial list by the number of objects in the.
   */
  #define MAX_PARTIAL 10
81819f0fc   Christoph Lameter   SLUB core
145
146
  #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
  				SLAB_POISON | SLAB_STORE_USER)
672bba3a4   Christoph Lameter   SLUB: update comm...
147

81819f0fc   Christoph Lameter   SLUB core
148
  /*
3de472138   David Rientjes   slub: use size an...
149
150
151
   * Debugging flags that require metadata to be stored in the slab.  These get
   * disabled when slub_debug=O is used and a cache's min order increases with
   * metadata.
fa5ec8a1f   David Rientjes   slub: add option ...
152
   */
3de472138   David Rientjes   slub: use size an...
153
  #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
fa5ec8a1f   David Rientjes   slub: add option ...
154
155
  
  /*
81819f0fc   Christoph Lameter   SLUB core
156
157
158
   * Set of flags that will prevent slab merging
   */
  #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
4c13dd3b4   Dmitry Monakhov   failslab: add abi...
159
160
  		SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
  		SLAB_FAILSLAB)
81819f0fc   Christoph Lameter   SLUB core
161
162
  
  #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
5a896d9e7   Vegard Nossum   slub: add hooks f...
163
  		SLAB_CACHE_DMA | SLAB_NOTRACK)
81819f0fc   Christoph Lameter   SLUB core
164

210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
165
166
  #define OO_SHIFT	16
  #define OO_MASK		((1 << OO_SHIFT) - 1)
50d5c41cd   Christoph Lameter   slub: Do not use ...
167
  #define MAX_OBJS_PER_PAGE	32767 /* since page.objects is u15 */
210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
168

81819f0fc   Christoph Lameter   SLUB core
169
  /* Internal SLUB flags */
f90ec3901   Christoph Lameter   SLUB: Constants n...
170
  #define __OBJECT_POISON		0x80000000UL /* Poison object */
b789ef518   Christoph Lameter   slub: Add cmpxchg...
171
  #define __CMPXCHG_DOUBLE	0x40000000UL /* Use cmpxchg_double */
81819f0fc   Christoph Lameter   SLUB core
172
173
174
175
176
177
178
179
180
  
  static int kmem_size = sizeof(struct kmem_cache);
  
  #ifdef CONFIG_SMP
  static struct notifier_block slab_notifier;
  #endif
  
  static enum {
  	DOWN,		/* No slab functionality available */
51df11428   Christoph Lameter   slub: Dynamically...
181
  	PARTIAL,	/* Kmem_cache_node works */
672bba3a4   Christoph Lameter   SLUB: update comm...
182
  	UP,		/* Everything works but does not show up in sysfs */
81819f0fc   Christoph Lameter   SLUB core
183
184
185
186
187
  	SYSFS		/* Sysfs up */
  } slab_state = DOWN;
  
  /* A list of all slab caches on the system */
  static DECLARE_RWSEM(slub_lock);
5af328a51   Adrian Bunk   mm/slub.c: make c...
188
  static LIST_HEAD(slab_caches);
81819f0fc   Christoph Lameter   SLUB core
189

02cbc8744   Christoph Lameter   SLUB: move tracki...
190
191
192
  /*
   * Tracking user of a slab.
   */
d6543e393   Ben Greear   slub: Enable back...
193
  #define TRACK_ADDRS_COUNT 16
02cbc8744   Christoph Lameter   SLUB: move tracki...
194
  struct track {
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
195
  	unsigned long addr;	/* Called from address */
d6543e393   Ben Greear   slub: Enable back...
196
197
198
  #ifdef CONFIG_STACKTRACE
  	unsigned long addrs[TRACK_ADDRS_COUNT];	/* Called from address */
  #endif
02cbc8744   Christoph Lameter   SLUB: move tracki...
199
200
201
202
203
204
  	int cpu;		/* Was running on cpu */
  	int pid;		/* Pid context */
  	unsigned long when;	/* When did the operation occur */
  };
  
  enum track_item { TRACK_ALLOC, TRACK_FREE };
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
205
  #ifdef CONFIG_SYSFS
81819f0fc   Christoph Lameter   SLUB core
206
207
208
  static int sysfs_slab_add(struct kmem_cache *);
  static int sysfs_slab_alias(struct kmem_cache *, const char *);
  static void sysfs_slab_remove(struct kmem_cache *);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
209

81819f0fc   Christoph Lameter   SLUB core
210
  #else
0c7100132   Christoph Lameter   SLUB: add some mo...
211
212
213
  static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
  static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
  							{ return 0; }
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
214
215
  static inline void sysfs_slab_remove(struct kmem_cache *s)
  {
84c1cf624   Pekka Enberg   SLUB: Fix merged ...
216
  	kfree(s->name);
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
217
218
  	kfree(s);
  }
8ff12cfc0   Christoph Lameter   SLUB: Support for...
219

81819f0fc   Christoph Lameter   SLUB core
220
  #endif
4fdccdfbb   Christoph Lameter   slub: Add statist...
221
  static inline void stat(const struct kmem_cache *s, enum stat_item si)
8ff12cfc0   Christoph Lameter   SLUB: Support for...
222
223
  {
  #ifdef CONFIG_SLUB_STATS
84e554e68   Christoph Lameter   SLUB: Make slub s...
224
  	__this_cpu_inc(s->cpu_slab->stat[si]);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
225
226
  #endif
  }
81819f0fc   Christoph Lameter   SLUB core
227
228
229
230
231
232
233
234
235
236
237
  /********************************************************************
   * 			Core slab cache functions
   *******************************************************************/
  
  int slab_is_available(void)
  {
  	return slab_state >= UP;
  }
  
  static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
  {
81819f0fc   Christoph Lameter   SLUB core
238
  	return s->node[node];
81819f0fc   Christoph Lameter   SLUB core
239
  }
6446faa2f   Christoph Lameter   slub: Fix up comm...
240
  /* Verify that a pointer has an address that is valid within a slab page */
02cbc8744   Christoph Lameter   SLUB: move tracki...
241
242
243
244
  static inline int check_valid_pointer(struct kmem_cache *s,
  				struct page *page, const void *object)
  {
  	void *base;
a973e9dd1   Christoph Lameter   Revert "unique en...
245
  	if (!object)
02cbc8744   Christoph Lameter   SLUB: move tracki...
246
  		return 1;
a973e9dd1   Christoph Lameter   Revert "unique en...
247
  	base = page_address(page);
39b264641   Christoph Lameter   slub: Store max n...
248
  	if (object < base || object >= base + page->objects * s->size ||
02cbc8744   Christoph Lameter   SLUB: move tracki...
249
250
251
252
253
254
  		(object - base) % s->size) {
  		return 0;
  	}
  
  	return 1;
  }
7656c72b5   Christoph Lameter   SLUB: add macros ...
255
256
257
258
  static inline void *get_freepointer(struct kmem_cache *s, void *object)
  {
  	return *(void **)(object + s->offset);
  }
1393d9a18   Christoph Lameter   slub: Make CONFIG...
259
260
261
262
263
264
265
266
267
268
269
  static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
  {
  	void *p;
  
  #ifdef CONFIG_DEBUG_PAGEALLOC
  	probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
  #else
  	p = get_freepointer(s, object);
  #endif
  	return p;
  }
7656c72b5   Christoph Lameter   SLUB: add macros ...
270
271
272
273
274
275
  static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
  {
  	*(void **)(object + s->offset) = fp;
  }
  
  /* Loop over all objects in a slab */
224a88be4   Christoph Lameter   slub: for_each_ob...
276
277
  #define for_each_object(__p, __s, __addr, __objects) \
  	for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
7656c72b5   Christoph Lameter   SLUB: add macros ...
278
  			__p += (__s)->size)
7656c72b5   Christoph Lameter   SLUB: add macros ...
279
280
281
282
283
  /* Determine object index from a given position */
  static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
  {
  	return (p - addr) / s->size;
  }
d71f606f6   Mariusz Kozlowski   slub: fix ksize()...
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
  static inline size_t slab_ksize(const struct kmem_cache *s)
  {
  #ifdef CONFIG_SLUB_DEBUG
  	/*
  	 * Debugging requires use of the padding between object
  	 * and whatever may come after it.
  	 */
  	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
  		return s->objsize;
  
  #endif
  	/*
  	 * If we have the need to store the freelist pointer
  	 * back there or track user information then we can
  	 * only use the space before that information.
  	 */
  	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
  		return s->inuse;
  	/*
  	 * Else we can use all the padding etc for the allocation
  	 */
  	return s->size;
  }
ab9a0f196   Lai Jiangshan   slub: automatical...
307
308
309
310
  static inline int order_objects(int order, unsigned long size, int reserved)
  {
  	return ((PAGE_SIZE << order) - reserved) / size;
  }
834f3d119   Christoph Lameter   slub: Add kmem_ca...
311
  static inline struct kmem_cache_order_objects oo_make(int order,
ab9a0f196   Lai Jiangshan   slub: automatical...
312
  		unsigned long size, int reserved)
834f3d119   Christoph Lameter   slub: Add kmem_ca...
313
314
  {
  	struct kmem_cache_order_objects x = {
ab9a0f196   Lai Jiangshan   slub: automatical...
315
  		(order << OO_SHIFT) + order_objects(order, size, reserved)
834f3d119   Christoph Lameter   slub: Add kmem_ca...
316
317
318
319
320
321
322
  	};
  
  	return x;
  }
  
  static inline int oo_order(struct kmem_cache_order_objects x)
  {
210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
323
  	return x.x >> OO_SHIFT;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
324
325
326
327
  }
  
  static inline int oo_objects(struct kmem_cache_order_objects x)
  {
210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
328
  	return x.x & OO_MASK;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
329
  }
881db7fb0   Christoph Lameter   slub: Invert lock...
330
331
332
333
334
335
336
337
338
339
340
341
  /*
   * Per slab locking using the pagelock
   */
  static __always_inline void slab_lock(struct page *page)
  {
  	bit_spin_lock(PG_locked, &page->flags);
  }
  
  static __always_inline void slab_unlock(struct page *page)
  {
  	__bit_spin_unlock(PG_locked, &page->flags);
  }
1d07171c5   Christoph Lameter   slub: disable int...
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
  /* Interrupts must be disabled (for the fallback code to work right) */
  static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
  		void *freelist_old, unsigned long counters_old,
  		void *freelist_new, unsigned long counters_new,
  		const char *n)
  {
  	VM_BUG_ON(!irqs_disabled());
  #ifdef CONFIG_CMPXCHG_DOUBLE
  	if (s->flags & __CMPXCHG_DOUBLE) {
  		if (cmpxchg_double(&page->freelist,
  			freelist_old, counters_old,
  			freelist_new, counters_new))
  		return 1;
  	} else
  #endif
  	{
  		slab_lock(page);
  		if (page->freelist == freelist_old && page->counters == counters_old) {
  			page->freelist = freelist_new;
  			page->counters = counters_new;
  			slab_unlock(page);
  			return 1;
  		}
  		slab_unlock(page);
  	}
  
  	cpu_relax();
  	stat(s, CMPXCHG_DOUBLE_FAIL);
  
  #ifdef SLUB_DEBUG_CMPXCHG
  	printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
  #endif
  
  	return 0;
  }
b789ef518   Christoph Lameter   slub: Add cmpxchg...
377
378
379
380
381
382
383
384
385
386
387
388
389
390
  static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
  		void *freelist_old, unsigned long counters_old,
  		void *freelist_new, unsigned long counters_new,
  		const char *n)
  {
  #ifdef CONFIG_CMPXCHG_DOUBLE
  	if (s->flags & __CMPXCHG_DOUBLE) {
  		if (cmpxchg_double(&page->freelist,
  			freelist_old, counters_old,
  			freelist_new, counters_new))
  		return 1;
  	} else
  #endif
  	{
1d07171c5   Christoph Lameter   slub: disable int...
391
392
393
  		unsigned long flags;
  
  		local_irq_save(flags);
881db7fb0   Christoph Lameter   slub: Invert lock...
394
  		slab_lock(page);
b789ef518   Christoph Lameter   slub: Add cmpxchg...
395
396
397
  		if (page->freelist == freelist_old && page->counters == counters_old) {
  			page->freelist = freelist_new;
  			page->counters = counters_new;
881db7fb0   Christoph Lameter   slub: Invert lock...
398
  			slab_unlock(page);
1d07171c5   Christoph Lameter   slub: disable int...
399
  			local_irq_restore(flags);
b789ef518   Christoph Lameter   slub: Add cmpxchg...
400
401
  			return 1;
  		}
881db7fb0   Christoph Lameter   slub: Invert lock...
402
  		slab_unlock(page);
1d07171c5   Christoph Lameter   slub: disable int...
403
  		local_irq_restore(flags);
b789ef518   Christoph Lameter   slub: Add cmpxchg...
404
405
406
407
408
409
410
411
412
413
414
  	}
  
  	cpu_relax();
  	stat(s, CMPXCHG_DOUBLE_FAIL);
  
  #ifdef SLUB_DEBUG_CMPXCHG
  	printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
  #endif
  
  	return 0;
  }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
415
416
  #ifdef CONFIG_SLUB_DEBUG
  /*
5f80b13ae   Christoph Lameter   slub: get_map() f...
417
418
   * Determine a map of object in use on a page.
   *
881db7fb0   Christoph Lameter   slub: Invert lock...
419
   * Node listlock must be held to guarantee that the page does
5f80b13ae   Christoph Lameter   slub: get_map() f...
420
421
422
423
424
425
426
427
428
429
   * not vanish from under us.
   */
  static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
  {
  	void *p;
  	void *addr = page_address(page);
  
  	for (p = page->freelist; p; p = get_freepointer(s, p))
  		set_bit(slab_index(p, s, addr), map);
  }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
430
431
432
  /*
   * Debug settings:
   */
f0630fff5   Christoph Lameter   SLUB: support slu...
433
434
435
  #ifdef CONFIG_SLUB_DEBUG_ON
  static int slub_debug = DEBUG_DEFAULT_FLAGS;
  #else
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
436
  static int slub_debug;
f0630fff5   Christoph Lameter   SLUB: support slu...
437
  #endif
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
438
439
  
  static char *slub_debug_slabs;
fa5ec8a1f   David Rientjes   slub: add option ...
440
  static int disable_higher_order_debug;
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
441

7656c72b5   Christoph Lameter   SLUB: add macros ...
442
  /*
81819f0fc   Christoph Lameter   SLUB core
443
444
445
446
447
448
449
450
451
452
453
454
   * Object debugging
   */
  static void print_section(char *text, u8 *addr, unsigned int length)
  {
  	int i, offset;
  	int newline = 1;
  	char ascii[17];
  
  	ascii[16] = 0;
  
  	for (i = 0; i < length; i++) {
  		if (newline) {
249226847   Christoph Lameter   SLUB: change erro...
455
  			printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
81819f0fc   Christoph Lameter   SLUB core
456
457
  			newline = 0;
  		}
064287807   Pekka Enberg   SLUB: Fix coding ...
458
  		printk(KERN_CONT " %02x", addr[i]);
81819f0fc   Christoph Lameter   SLUB core
459
460
461
  		offset = i % 16;
  		ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
  		if (offset == 15) {
064287807   Pekka Enberg   SLUB: Fix coding ...
462
463
  			printk(KERN_CONT " %s
  ", ascii);
81819f0fc   Christoph Lameter   SLUB core
464
465
466
467
468
469
  			newline = 1;
  		}
  	}
  	if (!newline) {
  		i %= 16;
  		while (i < 16) {
064287807   Pekka Enberg   SLUB: Fix coding ...
470
  			printk(KERN_CONT "   ");
81819f0fc   Christoph Lameter   SLUB core
471
472
473
  			ascii[i] = ' ';
  			i++;
  		}
064287807   Pekka Enberg   SLUB: Fix coding ...
474
475
  		printk(KERN_CONT " %s
  ", ascii);
81819f0fc   Christoph Lameter   SLUB core
476
477
  	}
  }
81819f0fc   Christoph Lameter   SLUB core
478
479
480
481
482
483
484
485
486
487
488
489
490
491
  static struct track *get_track(struct kmem_cache *s, void *object,
  	enum track_item alloc)
  {
  	struct track *p;
  
  	if (s->offset)
  		p = object + s->offset + sizeof(void *);
  	else
  		p = object + s->inuse;
  
  	return p + alloc;
  }
  
  static void set_track(struct kmem_cache *s, void *object,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
492
  			enum track_item alloc, unsigned long addr)
81819f0fc   Christoph Lameter   SLUB core
493
  {
1a00df4a2   Akinobu Mita   slub: use get_tra...
494
  	struct track *p = get_track(s, object, alloc);
81819f0fc   Christoph Lameter   SLUB core
495

81819f0fc   Christoph Lameter   SLUB core
496
  	if (addr) {
d6543e393   Ben Greear   slub: Enable back...
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
  #ifdef CONFIG_STACKTRACE
  		struct stack_trace trace;
  		int i;
  
  		trace.nr_entries = 0;
  		trace.max_entries = TRACK_ADDRS_COUNT;
  		trace.entries = p->addrs;
  		trace.skip = 3;
  		save_stack_trace(&trace);
  
  		/* See rant in lockdep.c */
  		if (trace.nr_entries != 0 &&
  		    trace.entries[trace.nr_entries - 1] == ULONG_MAX)
  			trace.nr_entries--;
  
  		for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
  			p->addrs[i] = 0;
  #endif
81819f0fc   Christoph Lameter   SLUB core
515
516
  		p->addr = addr;
  		p->cpu = smp_processor_id();
88e4ccf29   Alexey Dobriyan   slub: current is ...
517
  		p->pid = current->pid;
81819f0fc   Christoph Lameter   SLUB core
518
519
520
521
  		p->when = jiffies;
  	} else
  		memset(p, 0, sizeof(struct track));
  }
81819f0fc   Christoph Lameter   SLUB core
522
523
  static void init_tracking(struct kmem_cache *s, void *object)
  {
249226847   Christoph Lameter   SLUB: change erro...
524
525
  	if (!(s->flags & SLAB_STORE_USER))
  		return;
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
526
527
  	set_track(s, object, TRACK_FREE, 0UL);
  	set_track(s, object, TRACK_ALLOC, 0UL);
81819f0fc   Christoph Lameter   SLUB core
528
529
530
531
532
533
  }
  
  static void print_track(const char *s, struct track *t)
  {
  	if (!t->addr)
  		return;
7daf705f3   Linus Torvalds   Start using the n...
534
535
  	printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d
  ",
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
536
  		s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
d6543e393   Ben Greear   slub: Enable back...
537
538
539
540
541
542
543
544
545
546
547
  #ifdef CONFIG_STACKTRACE
  	{
  		int i;
  		for (i = 0; i < TRACK_ADDRS_COUNT; i++)
  			if (t->addrs[i])
  				printk(KERN_ERR "\t%pS
  ", (void *)t->addrs[i]);
  			else
  				break;
  	}
  #endif
249226847   Christoph Lameter   SLUB: change erro...
548
549
550
551
552
553
554
555
556
557
558
559
560
  }
  
  static void print_tracking(struct kmem_cache *s, void *object)
  {
  	if (!(s->flags & SLAB_STORE_USER))
  		return;
  
  	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
  	print_track("Freed", get_track(s, object, TRACK_FREE));
  }
  
  static void print_page_info(struct page *page)
  {
39b264641   Christoph Lameter   slub: Store max n...
561
562
563
  	printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx
  ",
  		page, page->objects, page->inuse, page->freelist, page->flags);
249226847   Christoph Lameter   SLUB: change erro...
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
  
  }
  
  static void slab_bug(struct kmem_cache *s, char *fmt, ...)
  {
  	va_list args;
  	char buf[100];
  
  	va_start(args, fmt);
  	vsnprintf(buf, sizeof(buf), fmt, args);
  	va_end(args);
  	printk(KERN_ERR "========================================"
  			"=====================================
  ");
  	printk(KERN_ERR "BUG %s: %s
  ", s->name, buf);
  	printk(KERN_ERR "----------------------------------------"
  			"-------------------------------------
  
  ");
81819f0fc   Christoph Lameter   SLUB core
584
  }
249226847   Christoph Lameter   SLUB: change erro...
585
586
587
588
589
590
591
592
593
594
595
596
597
  static void slab_fix(struct kmem_cache *s, char *fmt, ...)
  {
  	va_list args;
  	char buf[100];
  
  	va_start(args, fmt);
  	vsnprintf(buf, sizeof(buf), fmt, args);
  	va_end(args);
  	printk(KERN_ERR "FIX %s: %s
  ", s->name, buf);
  }
  
  static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
81819f0fc   Christoph Lameter   SLUB core
598
599
  {
  	unsigned int off;	/* Offset of last byte */
a973e9dd1   Christoph Lameter   Revert "unique en...
600
  	u8 *addr = page_address(page);
249226847   Christoph Lameter   SLUB: change erro...
601
602
603
604
605
606
607
608
609
610
611
612
  
  	print_tracking(s, p);
  
  	print_page_info(page);
  
  	printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p
  
  ",
  			p, p - addr, get_freepointer(s, p));
  
  	if (p > addr + 16)
  		print_section("Bytes b4", p - 16, 16);
0ebd652b3   Pekka Enberg   slub: dump more d...
613
  	print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE));
81819f0fc   Christoph Lameter   SLUB core
614
615
616
617
  
  	if (s->flags & SLAB_RED_ZONE)
  		print_section("Redzone", p + s->objsize,
  			s->inuse - s->objsize);
81819f0fc   Christoph Lameter   SLUB core
618
619
620
621
  	if (s->offset)
  		off = s->offset + sizeof(void *);
  	else
  		off = s->inuse;
249226847   Christoph Lameter   SLUB: change erro...
622
  	if (s->flags & SLAB_STORE_USER)
81819f0fc   Christoph Lameter   SLUB core
623
  		off += 2 * sizeof(struct track);
81819f0fc   Christoph Lameter   SLUB core
624
625
626
  
  	if (off != s->size)
  		/* Beginning of the filler is the free pointer */
249226847   Christoph Lameter   SLUB: change erro...
627
628
629
  		print_section("Padding", p + off, s->size - off);
  
  	dump_stack();
81819f0fc   Christoph Lameter   SLUB core
630
631
632
633
634
  }
  
  static void object_err(struct kmem_cache *s, struct page *page,
  			u8 *object, char *reason)
  {
3dc506378   Christoph Lameter   slab_err: Pass pa...
635
  	slab_bug(s, "%s", reason);
249226847   Christoph Lameter   SLUB: change erro...
636
  	print_trailer(s, page, object);
81819f0fc   Christoph Lameter   SLUB core
637
  }
249226847   Christoph Lameter   SLUB: change erro...
638
  static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
81819f0fc   Christoph Lameter   SLUB core
639
640
641
  {
  	va_list args;
  	char buf[100];
249226847   Christoph Lameter   SLUB: change erro...
642
643
  	va_start(args, fmt);
  	vsnprintf(buf, sizeof(buf), fmt, args);
81819f0fc   Christoph Lameter   SLUB core
644
  	va_end(args);
3dc506378   Christoph Lameter   slab_err: Pass pa...
645
  	slab_bug(s, "%s", buf);
249226847   Christoph Lameter   SLUB: change erro...
646
  	print_page_info(page);
81819f0fc   Christoph Lameter   SLUB core
647
648
  	dump_stack();
  }
f7cb19336   Christoph Lameter   SLUB: Pass active...
649
  static void init_object(struct kmem_cache *s, void *object, u8 val)
81819f0fc   Christoph Lameter   SLUB core
650
651
652
653
654
  {
  	u8 *p = object;
  
  	if (s->flags & __OBJECT_POISON) {
  		memset(p, POISON_FREE, s->objsize - 1);
064287807   Pekka Enberg   SLUB: Fix coding ...
655
  		p[s->objsize - 1] = POISON_END;
81819f0fc   Christoph Lameter   SLUB core
656
657
658
  	}
  
  	if (s->flags & SLAB_RED_ZONE)
f7cb19336   Christoph Lameter   SLUB: Pass active...
659
  		memset(p + s->objsize, val, s->inuse - s->objsize);
81819f0fc   Christoph Lameter   SLUB core
660
  }
c4089f98e   Marcin Slusarz   slub: reduce over...
661
  static u8 *check_bytes8(u8 *start, u8 value, unsigned int bytes)
81819f0fc   Christoph Lameter   SLUB core
662
663
  {
  	while (bytes) {
c4089f98e   Marcin Slusarz   slub: reduce over...
664
  		if (*start != value)
249226847   Christoph Lameter   SLUB: change erro...
665
  			return start;
81819f0fc   Christoph Lameter   SLUB core
666
667
668
  		start++;
  		bytes--;
  	}
249226847   Christoph Lameter   SLUB: change erro...
669
670
  	return NULL;
  }
c4089f98e   Marcin Slusarz   slub: reduce over...
671
672
673
674
675
676
677
678
679
  static u8 *check_bytes(u8 *start, u8 value, unsigned int bytes)
  {
  	u64 value64;
  	unsigned int words, prefix;
  
  	if (bytes <= 16)
  		return check_bytes8(start, value, bytes);
  
  	value64 = value | value << 8 | value << 16 | value << 24;
ef62fb32b   Akinobu Mita   slub: fix check_b...
680
  	value64 = (value64 & 0xffffffff) | value64 << 32;
c4089f98e   Marcin Slusarz   slub: reduce over...
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
  	prefix = 8 - ((unsigned long)start) % 8;
  
  	if (prefix) {
  		u8 *r = check_bytes8(start, value, prefix);
  		if (r)
  			return r;
  		start += prefix;
  		bytes -= prefix;
  	}
  
  	words = bytes / 8;
  
  	while (words) {
  		if (*(u64 *)start != value64)
  			return check_bytes8(start, value, 8);
  		start += 8;
  		words--;
  	}
  
  	return check_bytes8(start, value, bytes % 8);
  }
249226847   Christoph Lameter   SLUB: change erro...
702
703
704
705
706
707
708
709
710
711
  static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
  						void *from, void *to)
  {
  	slab_fix(s, "Restoring 0x%p-0x%p=0x%x
  ", from, to - 1, data);
  	memset(from, data, to - from);
  }
  
  static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
  			u8 *object, char *what,
064287807   Pekka Enberg   SLUB: Fix coding ...
712
  			u8 *start, unsigned int value, unsigned int bytes)
249226847   Christoph Lameter   SLUB: change erro...
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
  {
  	u8 *fault;
  	u8 *end;
  
  	fault = check_bytes(start, value, bytes);
  	if (!fault)
  		return 1;
  
  	end = start + bytes;
  	while (end > fault && end[-1] == value)
  		end--;
  
  	slab_bug(s, "%s overwritten", what);
  	printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x
  ",
  					fault, end - 1, fault[0], value);
  	print_trailer(s, page, object);
  
  	restore_bytes(s, what, value, fault, end);
  	return 0;
81819f0fc   Christoph Lameter   SLUB core
733
  }
81819f0fc   Christoph Lameter   SLUB core
734
735
736
737
738
739
740
  /*
   * Object layout:
   *
   * object address
   * 	Bytes of the object to be managed.
   * 	If the freepointer may overlay the object then the free
   * 	pointer is the first word of the object.
672bba3a4   Christoph Lameter   SLUB: update comm...
741
   *
81819f0fc   Christoph Lameter   SLUB core
742
743
744
745
746
   * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
   * 	0xa5 (POISON_END)
   *
   * object + s->objsize
   * 	Padding to reach word boundary. This is also used for Redzoning.
672bba3a4   Christoph Lameter   SLUB: update comm...
747
748
749
   * 	Padding is extended by another word if Redzoning is enabled and
   * 	objsize == inuse.
   *
81819f0fc   Christoph Lameter   SLUB core
750
751
752
753
   * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
   * 	0xcc (RED_ACTIVE) for objects in use.
   *
   * object + s->inuse
672bba3a4   Christoph Lameter   SLUB: update comm...
754
755
   * 	Meta data starts here.
   *
81819f0fc   Christoph Lameter   SLUB core
756
757
   * 	A. Free pointer (if we cannot overwrite object on free)
   * 	B. Tracking data for SLAB_STORE_USER
672bba3a4   Christoph Lameter   SLUB: update comm...
758
   * 	C. Padding to reach required alignment boundary or at mininum
6446faa2f   Christoph Lameter   slub: Fix up comm...
759
   * 		one word if debugging is on to be able to detect writes
672bba3a4   Christoph Lameter   SLUB: update comm...
760
761
762
   * 		before the word boundary.
   *
   *	Padding is done using 0x5a (POISON_INUSE)
81819f0fc   Christoph Lameter   SLUB core
763
764
   *
   * object + s->size
672bba3a4   Christoph Lameter   SLUB: update comm...
765
   * 	Nothing is used beyond s->size.
81819f0fc   Christoph Lameter   SLUB core
766
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
767
768
   * If slabcaches are merged then the objsize and inuse boundaries are mostly
   * ignored. And therefore no slab options that rely on these boundaries
81819f0fc   Christoph Lameter   SLUB core
769
770
   * may be used with merged slabcaches.
   */
81819f0fc   Christoph Lameter   SLUB core
771
772
773
774
775
776
777
778
779
780
781
782
783
784
  static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
  {
  	unsigned long off = s->inuse;	/* The end of info */
  
  	if (s->offset)
  		/* Freepointer is placed after the object. */
  		off += sizeof(void *);
  
  	if (s->flags & SLAB_STORE_USER)
  		/* We also have user information there */
  		off += 2 * sizeof(struct track);
  
  	if (s->size == off)
  		return 1;
249226847   Christoph Lameter   SLUB: change erro...
785
786
  	return check_bytes_and_report(s, page, p, "Object padding",
  				p + off, POISON_INUSE, s->size - off);
81819f0fc   Christoph Lameter   SLUB core
787
  }
39b264641   Christoph Lameter   slub: Store max n...
788
  /* Check the pad bytes at the end of a slab page */
81819f0fc   Christoph Lameter   SLUB core
789
790
  static int slab_pad_check(struct kmem_cache *s, struct page *page)
  {
249226847   Christoph Lameter   SLUB: change erro...
791
792
793
794
795
  	u8 *start;
  	u8 *fault;
  	u8 *end;
  	int length;
  	int remainder;
81819f0fc   Christoph Lameter   SLUB core
796
797
798
  
  	if (!(s->flags & SLAB_POISON))
  		return 1;
a973e9dd1   Christoph Lameter   Revert "unique en...
799
  	start = page_address(page);
ab9a0f196   Lai Jiangshan   slub: automatical...
800
  	length = (PAGE_SIZE << compound_order(page)) - s->reserved;
39b264641   Christoph Lameter   slub: Store max n...
801
802
  	end = start + length;
  	remainder = length % s->size;
81819f0fc   Christoph Lameter   SLUB core
803
804
  	if (!remainder)
  		return 1;
39b264641   Christoph Lameter   slub: Store max n...
805
  	fault = check_bytes(end - remainder, POISON_INUSE, remainder);
249226847   Christoph Lameter   SLUB: change erro...
806
807
808
809
810
811
  	if (!fault)
  		return 1;
  	while (end > fault && end[-1] == POISON_INUSE)
  		end--;
  
  	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
39b264641   Christoph Lameter   slub: Store max n...
812
  	print_section("Padding", end - remainder, remainder);
249226847   Christoph Lameter   SLUB: change erro...
813

8a3d271de   Eric Dumazet   slub: fix slab_pa...
814
  	restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
249226847   Christoph Lameter   SLUB: change erro...
815
  	return 0;
81819f0fc   Christoph Lameter   SLUB core
816
817
818
  }
  
  static int check_object(struct kmem_cache *s, struct page *page,
f7cb19336   Christoph Lameter   SLUB: Pass active...
819
  					void *object, u8 val)
81819f0fc   Christoph Lameter   SLUB core
820
821
822
823
824
  {
  	u8 *p = object;
  	u8 *endobject = object + s->objsize;
  
  	if (s->flags & SLAB_RED_ZONE) {
249226847   Christoph Lameter   SLUB: change erro...
825
  		if (!check_bytes_and_report(s, page, object, "Redzone",
f7cb19336   Christoph Lameter   SLUB: Pass active...
826
  			endobject, val, s->inuse - s->objsize))
81819f0fc   Christoph Lameter   SLUB core
827
  			return 0;
81819f0fc   Christoph Lameter   SLUB core
828
  	} else {
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
829
830
831
832
  		if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
  			check_bytes_and_report(s, page, p, "Alignment padding",
  				endobject, POISON_INUSE, s->inuse - s->objsize);
  		}
81819f0fc   Christoph Lameter   SLUB core
833
834
835
  	}
  
  	if (s->flags & SLAB_POISON) {
f7cb19336   Christoph Lameter   SLUB: Pass active...
836
  		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
249226847   Christoph Lameter   SLUB: change erro...
837
838
839
  			(!check_bytes_and_report(s, page, p, "Poison", p,
  					POISON_FREE, s->objsize - 1) ||
  			 !check_bytes_and_report(s, page, p, "Poison",
064287807   Pekka Enberg   SLUB: Fix coding ...
840
  				p + s->objsize - 1, POISON_END, 1)))
81819f0fc   Christoph Lameter   SLUB core
841
  			return 0;
81819f0fc   Christoph Lameter   SLUB core
842
843
844
845
846
  		/*
  		 * check_pad_bytes cleans up on its own.
  		 */
  		check_pad_bytes(s, page, p);
  	}
f7cb19336   Christoph Lameter   SLUB: Pass active...
847
  	if (!s->offset && val == SLUB_RED_ACTIVE)
81819f0fc   Christoph Lameter   SLUB core
848
849
850
851
852
853
854
855
856
857
  		/*
  		 * Object and freepointer overlap. Cannot check
  		 * freepointer while object is allocated.
  		 */
  		return 1;
  
  	/* Check free pointer validity */
  	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
  		object_err(s, page, p, "Freepointer corrupt");
  		/*
9f6c708e5   Nick Andrew   slub: Fix incorre...
858
  		 * No choice but to zap it and thus lose the remainder
81819f0fc   Christoph Lameter   SLUB core
859
  		 * of the free objects in this slab. May cause
672bba3a4   Christoph Lameter   SLUB: update comm...
860
  		 * another error because the object count is now wrong.
81819f0fc   Christoph Lameter   SLUB core
861
  		 */
a973e9dd1   Christoph Lameter   Revert "unique en...
862
  		set_freepointer(s, p, NULL);
81819f0fc   Christoph Lameter   SLUB core
863
864
865
866
867
868
869
  		return 0;
  	}
  	return 1;
  }
  
  static int check_slab(struct kmem_cache *s, struct page *page)
  {
39b264641   Christoph Lameter   slub: Store max n...
870
  	int maxobj;
81819f0fc   Christoph Lameter   SLUB core
871
872
873
  	VM_BUG_ON(!irqs_disabled());
  
  	if (!PageSlab(page)) {
249226847   Christoph Lameter   SLUB: change erro...
874
  		slab_err(s, page, "Not a valid slab page");
81819f0fc   Christoph Lameter   SLUB core
875
876
  		return 0;
  	}
39b264641   Christoph Lameter   slub: Store max n...
877

ab9a0f196   Lai Jiangshan   slub: automatical...
878
  	maxobj = order_objects(compound_order(page), s->size, s->reserved);
39b264641   Christoph Lameter   slub: Store max n...
879
880
881
882
883
884
  	if (page->objects > maxobj) {
  		slab_err(s, page, "objects %u > max %u",
  			s->name, page->objects, maxobj);
  		return 0;
  	}
  	if (page->inuse > page->objects) {
249226847   Christoph Lameter   SLUB: change erro...
885
  		slab_err(s, page, "inuse %u > max %u",
39b264641   Christoph Lameter   slub: Store max n...
886
  			s->name, page->inuse, page->objects);
81819f0fc   Christoph Lameter   SLUB core
887
888
889
890
891
892
893
894
  		return 0;
  	}
  	/* Slab_pad_check fixes things up after itself */
  	slab_pad_check(s, page);
  	return 1;
  }
  
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
895
896
   * Determine if a certain object on a page is on the freelist. Must hold the
   * slab lock to guarantee that the chains are in a consistent state.
81819f0fc   Christoph Lameter   SLUB core
897
898
899
900
   */
  static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
  {
  	int nr = 0;
881db7fb0   Christoph Lameter   slub: Invert lock...
901
  	void *fp;
81819f0fc   Christoph Lameter   SLUB core
902
  	void *object = NULL;
224a88be4   Christoph Lameter   slub: for_each_ob...
903
  	unsigned long max_objects;
81819f0fc   Christoph Lameter   SLUB core
904

881db7fb0   Christoph Lameter   slub: Invert lock...
905
  	fp = page->freelist;
39b264641   Christoph Lameter   slub: Store max n...
906
  	while (fp && nr <= page->objects) {
81819f0fc   Christoph Lameter   SLUB core
907
908
909
910
911
912
  		if (fp == search)
  			return 1;
  		if (!check_valid_pointer(s, page, fp)) {
  			if (object) {
  				object_err(s, page, object,
  					"Freechain corrupt");
a973e9dd1   Christoph Lameter   Revert "unique en...
913
  				set_freepointer(s, object, NULL);
81819f0fc   Christoph Lameter   SLUB core
914
915
  				break;
  			} else {
249226847   Christoph Lameter   SLUB: change erro...
916
  				slab_err(s, page, "Freepointer corrupt");
a973e9dd1   Christoph Lameter   Revert "unique en...
917
  				page->freelist = NULL;
39b264641   Christoph Lameter   slub: Store max n...
918
  				page->inuse = page->objects;
249226847   Christoph Lameter   SLUB: change erro...
919
  				slab_fix(s, "Freelist cleared");
81819f0fc   Christoph Lameter   SLUB core
920
921
922
923
924
925
926
927
  				return 0;
  			}
  			break;
  		}
  		object = fp;
  		fp = get_freepointer(s, object);
  		nr++;
  	}
ab9a0f196   Lai Jiangshan   slub: automatical...
928
  	max_objects = order_objects(compound_order(page), s->size, s->reserved);
210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
929
930
  	if (max_objects > MAX_OBJS_PER_PAGE)
  		max_objects = MAX_OBJS_PER_PAGE;
224a88be4   Christoph Lameter   slub: for_each_ob...
931
932
933
934
935
936
937
  
  	if (page->objects != max_objects) {
  		slab_err(s, page, "Wrong number of objects. Found %d but "
  			"should be %d", page->objects, max_objects);
  		page->objects = max_objects;
  		slab_fix(s, "Number of objects adjusted.");
  	}
39b264641   Christoph Lameter   slub: Store max n...
938
  	if (page->inuse != page->objects - nr) {
70d71228a   Christoph Lameter   slub: remove obje...
939
  		slab_err(s, page, "Wrong object count. Counter is %d but "
39b264641   Christoph Lameter   slub: Store max n...
940
941
  			"counted were %d", page->inuse, page->objects - nr);
  		page->inuse = page->objects - nr;
249226847   Christoph Lameter   SLUB: change erro...
942
  		slab_fix(s, "Object count adjusted.");
81819f0fc   Christoph Lameter   SLUB core
943
944
945
  	}
  	return search == NULL;
  }
0121c619d   Christoph Lameter   slub: Whitespace ...
946
947
  static void trace(struct kmem_cache *s, struct page *page, void *object,
  								int alloc)
3ec097421   Christoph Lameter   SLUB: Simplify de...
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
  {
  	if (s->flags & SLAB_TRACE) {
  		printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p
  ",
  			s->name,
  			alloc ? "alloc" : "free",
  			object, page->inuse,
  			page->freelist);
  
  		if (!alloc)
  			print_section("Object", (void *)object, s->objsize);
  
  		dump_stack();
  	}
  }
643b11384   Christoph Lameter   slub: enable trac...
963
  /*
c016b0bde   Christoph Lameter   slub: Extract hoo...
964
965
966
967
968
   * Hooks for other subsystems that check memory allocations. In a typical
   * production configuration these hooks all should produce no code at all.
   */
  static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
  {
c1d508365   Christoph Lameter   slub: Move gfpfla...
969
  	flags &= gfp_allowed_mask;
c016b0bde   Christoph Lameter   slub: Extract hoo...
970
971
972
973
974
975
976
977
  	lockdep_trace_alloc(flags);
  	might_sleep_if(flags & __GFP_WAIT);
  
  	return should_failslab(s->objsize, flags, s->flags);
  }
  
  static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
  {
c1d508365   Christoph Lameter   slub: Move gfpfla...
978
  	flags &= gfp_allowed_mask;
b3d41885d   Eric Dumazet   slub: fix kmemche...
979
  	kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
c016b0bde   Christoph Lameter   slub: Extract hoo...
980
981
982
983
984
985
  	kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
  }
  
  static inline void slab_free_hook(struct kmem_cache *s, void *x)
  {
  	kmemleak_free_recursive(x, s->flags);
c016b0bde   Christoph Lameter   slub: Extract hoo...
986

d3f661d69   Christoph Lameter   slub: Get rid of ...
987
988
989
990
991
992
993
994
995
996
997
998
  	/*
  	 * Trouble is that we may no longer disable interupts in the fast path
  	 * So in order to make the debug calls that expect irqs to be
  	 * disabled we need to disable interrupts temporarily.
  	 */
  #if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
  	{
  		unsigned long flags;
  
  		local_irq_save(flags);
  		kmemcheck_slab_free(s, x, s->objsize);
  		debug_check_no_locks_freed(x, s->objsize);
d3f661d69   Christoph Lameter   slub: Get rid of ...
999
1000
1001
  		local_irq_restore(flags);
  	}
  #endif
f9b615de4   Thomas Gleixner   slub: Fix debugob...
1002
1003
  	if (!(s->flags & SLAB_DEBUG_OBJECTS))
  		debug_check_no_obj_freed(x, s->objsize);
c016b0bde   Christoph Lameter   slub: Extract hoo...
1004
1005
1006
  }
  
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
1007
   * Tracking of fully allocated slabs for debugging purposes.
5cc6eee8a   Christoph Lameter   slub: explicit li...
1008
1009
   *
   * list_lock must be held.
643b11384   Christoph Lameter   slub: enable trac...
1010
   */
5cc6eee8a   Christoph Lameter   slub: explicit li...
1011
1012
  static void add_full(struct kmem_cache *s,
  	struct kmem_cache_node *n, struct page *page)
643b11384   Christoph Lameter   slub: enable trac...
1013
  {
5cc6eee8a   Christoph Lameter   slub: explicit li...
1014
1015
  	if (!(s->flags & SLAB_STORE_USER))
  		return;
643b11384   Christoph Lameter   slub: enable trac...
1016
  	list_add(&page->lru, &n->full);
643b11384   Christoph Lameter   slub: enable trac...
1017
  }
5cc6eee8a   Christoph Lameter   slub: explicit li...
1018
1019
1020
  /*
   * list_lock must be held.
   */
643b11384   Christoph Lameter   slub: enable trac...
1021
1022
  static void remove_full(struct kmem_cache *s, struct page *page)
  {
643b11384   Christoph Lameter   slub: enable trac...
1023
1024
  	if (!(s->flags & SLAB_STORE_USER))
  		return;
643b11384   Christoph Lameter   slub: enable trac...
1025
  	list_del(&page->lru);
643b11384   Christoph Lameter   slub: enable trac...
1026
  }
0f389ec63   Christoph Lameter   slub: No need for...
1027
1028
1029
1030
1031
1032
1033
  /* Tracking of the number of slabs for debugging purposes */
  static inline unsigned long slabs_node(struct kmem_cache *s, int node)
  {
  	struct kmem_cache_node *n = get_node(s, node);
  
  	return atomic_long_read(&n->nr_slabs);
  }
26c02cf05   Alexander Beregalov   SLUB: fix build w...
1034
1035
1036
1037
  static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
  {
  	return atomic_long_read(&n->nr_slabs);
  }
205ab99dd   Christoph Lameter   slub: Update stat...
1038
  static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec63   Christoph Lameter   slub: No need for...
1039
1040
1041
1042
1043
1044
1045
1046
1047
  {
  	struct kmem_cache_node *n = get_node(s, node);
  
  	/*
  	 * May be called early in order to allocate a slab for the
  	 * kmem_cache_node structure. Solve the chicken-egg
  	 * dilemma by deferring the increment of the count during
  	 * bootstrap (see early_kmem_cache_node_alloc).
  	 */
7340cc841   Christoph Lameter   slub: reduce diff...
1048
  	if (n) {
0f389ec63   Christoph Lameter   slub: No need for...
1049
  		atomic_long_inc(&n->nr_slabs);
205ab99dd   Christoph Lameter   slub: Update stat...
1050
1051
  		atomic_long_add(objects, &n->total_objects);
  	}
0f389ec63   Christoph Lameter   slub: No need for...
1052
  }
205ab99dd   Christoph Lameter   slub: Update stat...
1053
  static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec63   Christoph Lameter   slub: No need for...
1054
1055
1056
1057
  {
  	struct kmem_cache_node *n = get_node(s, node);
  
  	atomic_long_dec(&n->nr_slabs);
205ab99dd   Christoph Lameter   slub: Update stat...
1058
  	atomic_long_sub(objects, &n->total_objects);
0f389ec63   Christoph Lameter   slub: No need for...
1059
1060
1061
  }
  
  /* Object debug checks for alloc/free paths */
3ec097421   Christoph Lameter   SLUB: Simplify de...
1062
1063
1064
1065
1066
  static void setup_object_debug(struct kmem_cache *s, struct page *page,
  								void *object)
  {
  	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
  		return;
f7cb19336   Christoph Lameter   SLUB: Pass active...
1067
  	init_object(s, object, SLUB_RED_INACTIVE);
3ec097421   Christoph Lameter   SLUB: Simplify de...
1068
1069
  	init_tracking(s, object);
  }
1537066c6   Christoph Lameter   slub: Force no in...
1070
  static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
1071
  					void *object, unsigned long addr)
81819f0fc   Christoph Lameter   SLUB core
1072
1073
1074
  {
  	if (!check_slab(s, page))
  		goto bad;
81819f0fc   Christoph Lameter   SLUB core
1075
1076
  	if (!check_valid_pointer(s, page, object)) {
  		object_err(s, page, object, "Freelist Pointer check fails");
70d71228a   Christoph Lameter   slub: remove obje...
1077
  		goto bad;
81819f0fc   Christoph Lameter   SLUB core
1078
  	}
f7cb19336   Christoph Lameter   SLUB: Pass active...
1079
  	if (!check_object(s, page, object, SLUB_RED_INACTIVE))
81819f0fc   Christoph Lameter   SLUB core
1080
  		goto bad;
81819f0fc   Christoph Lameter   SLUB core
1081

3ec097421   Christoph Lameter   SLUB: Simplify de...
1082
1083
1084
1085
  	/* Success perform special debug activities for allocs */
  	if (s->flags & SLAB_STORE_USER)
  		set_track(s, object, TRACK_ALLOC, addr);
  	trace(s, page, object, 1);
f7cb19336   Christoph Lameter   SLUB: Pass active...
1086
  	init_object(s, object, SLUB_RED_ACTIVE);
81819f0fc   Christoph Lameter   SLUB core
1087
  	return 1;
3ec097421   Christoph Lameter   SLUB: Simplify de...
1088

81819f0fc   Christoph Lameter   SLUB core
1089
1090
1091
1092
1093
  bad:
  	if (PageSlab(page)) {
  		/*
  		 * If this is a slab page then lets do the best we can
  		 * to avoid issues in the future. Marking all objects
672bba3a4   Christoph Lameter   SLUB: update comm...
1094
  		 * as used avoids touching the remaining objects.
81819f0fc   Christoph Lameter   SLUB core
1095
  		 */
249226847   Christoph Lameter   SLUB: change erro...
1096
  		slab_fix(s, "Marking all objects used");
39b264641   Christoph Lameter   slub: Store max n...
1097
  		page->inuse = page->objects;
a973e9dd1   Christoph Lameter   Revert "unique en...
1098
  		page->freelist = NULL;
81819f0fc   Christoph Lameter   SLUB core
1099
1100
1101
  	}
  	return 0;
  }
1537066c6   Christoph Lameter   slub: Force no in...
1102
1103
  static noinline int free_debug_processing(struct kmem_cache *s,
  		 struct page *page, void *object, unsigned long addr)
81819f0fc   Christoph Lameter   SLUB core
1104
  {
5c2e4bbbd   Christoph Lameter   slub: Disable int...
1105
1106
1107
1108
  	unsigned long flags;
  	int rc = 0;
  
  	local_irq_save(flags);
881db7fb0   Christoph Lameter   slub: Invert lock...
1109
  	slab_lock(page);
81819f0fc   Christoph Lameter   SLUB core
1110
1111
1112
1113
  	if (!check_slab(s, page))
  		goto fail;
  
  	if (!check_valid_pointer(s, page, object)) {
70d71228a   Christoph Lameter   slub: remove obje...
1114
  		slab_err(s, page, "Invalid object pointer 0x%p", object);
81819f0fc   Christoph Lameter   SLUB core
1115
1116
1117
1118
  		goto fail;
  	}
  
  	if (on_freelist(s, page, object)) {
249226847   Christoph Lameter   SLUB: change erro...
1119
  		object_err(s, page, object, "Object already free");
81819f0fc   Christoph Lameter   SLUB core
1120
1121
  		goto fail;
  	}
f7cb19336   Christoph Lameter   SLUB: Pass active...
1122
  	if (!check_object(s, page, object, SLUB_RED_ACTIVE))
5c2e4bbbd   Christoph Lameter   slub: Disable int...
1123
  		goto out;
81819f0fc   Christoph Lameter   SLUB core
1124
1125
  
  	if (unlikely(s != page->slab)) {
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
1126
  		if (!PageSlab(page)) {
70d71228a   Christoph Lameter   slub: remove obje...
1127
1128
  			slab_err(s, page, "Attempt to free object(0x%p) "
  				"outside of slab", object);
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
1129
  		} else if (!page->slab) {
81819f0fc   Christoph Lameter   SLUB core
1130
  			printk(KERN_ERR
70d71228a   Christoph Lameter   slub: remove obje...
1131
1132
  				"SLUB <none>: no slab for object 0x%p.
  ",
81819f0fc   Christoph Lameter   SLUB core
1133
  						object);
70d71228a   Christoph Lameter   slub: remove obje...
1134
  			dump_stack();
064287807   Pekka Enberg   SLUB: Fix coding ...
1135
  		} else
249226847   Christoph Lameter   SLUB: change erro...
1136
1137
  			object_err(s, page, object,
  					"page slab pointer corrupt.");
81819f0fc   Christoph Lameter   SLUB core
1138
1139
  		goto fail;
  	}
3ec097421   Christoph Lameter   SLUB: Simplify de...
1140

3ec097421   Christoph Lameter   SLUB: Simplify de...
1141
1142
1143
  	if (s->flags & SLAB_STORE_USER)
  		set_track(s, object, TRACK_FREE, addr);
  	trace(s, page, object, 0);
f7cb19336   Christoph Lameter   SLUB: Pass active...
1144
  	init_object(s, object, SLUB_RED_INACTIVE);
5c2e4bbbd   Christoph Lameter   slub: Disable int...
1145
1146
  	rc = 1;
  out:
881db7fb0   Christoph Lameter   slub: Invert lock...
1147
  	slab_unlock(page);
5c2e4bbbd   Christoph Lameter   slub: Disable int...
1148
1149
  	local_irq_restore(flags);
  	return rc;
3ec097421   Christoph Lameter   SLUB: Simplify de...
1150

81819f0fc   Christoph Lameter   SLUB core
1151
  fail:
249226847   Christoph Lameter   SLUB: change erro...
1152
  	slab_fix(s, "Object at 0x%p not freed", object);
5c2e4bbbd   Christoph Lameter   slub: Disable int...
1153
  	goto out;
81819f0fc   Christoph Lameter   SLUB core
1154
  }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1155
1156
  static int __init setup_slub_debug(char *str)
  {
f0630fff5   Christoph Lameter   SLUB: support slu...
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
  	slub_debug = DEBUG_DEFAULT_FLAGS;
  	if (*str++ != '=' || !*str)
  		/*
  		 * No options specified. Switch on full debugging.
  		 */
  		goto out;
  
  	if (*str == ',')
  		/*
  		 * No options but restriction on slabs. This means full
  		 * debugging for slabs matching a pattern.
  		 */
  		goto check_slabs;
fa5ec8a1f   David Rientjes   slub: add option ...
1170
1171
1172
1173
1174
1175
1176
1177
  	if (tolower(*str) == 'o') {
  		/*
  		 * Avoid enabling debugging on caches if its minimum order
  		 * would increase as a result.
  		 */
  		disable_higher_order_debug = 1;
  		goto out;
  	}
f0630fff5   Christoph Lameter   SLUB: support slu...
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
  	slub_debug = 0;
  	if (*str == '-')
  		/*
  		 * Switch off all debugging measures.
  		 */
  		goto out;
  
  	/*
  	 * Determine which debug features should be switched on
  	 */
064287807   Pekka Enberg   SLUB: Fix coding ...
1188
  	for (; *str && *str != ','; str++) {
f0630fff5   Christoph Lameter   SLUB: support slu...
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
  		switch (tolower(*str)) {
  		case 'f':
  			slub_debug |= SLAB_DEBUG_FREE;
  			break;
  		case 'z':
  			slub_debug |= SLAB_RED_ZONE;
  			break;
  		case 'p':
  			slub_debug |= SLAB_POISON;
  			break;
  		case 'u':
  			slub_debug |= SLAB_STORE_USER;
  			break;
  		case 't':
  			slub_debug |= SLAB_TRACE;
  			break;
4c13dd3b4   Dmitry Monakhov   failslab: add abi...
1205
1206
1207
  		case 'a':
  			slub_debug |= SLAB_FAILSLAB;
  			break;
f0630fff5   Christoph Lameter   SLUB: support slu...
1208
1209
  		default:
  			printk(KERN_ERR "slub_debug option '%c' "
064287807   Pekka Enberg   SLUB: Fix coding ...
1210
1211
  				"unknown. skipped
  ", *str);
f0630fff5   Christoph Lameter   SLUB: support slu...
1212
  		}
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1213
  	}
f0630fff5   Christoph Lameter   SLUB: support slu...
1214
  check_slabs:
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1215
1216
  	if (*str == ',')
  		slub_debug_slabs = str + 1;
f0630fff5   Christoph Lameter   SLUB: support slu...
1217
  out:
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1218
1219
1220
1221
  	return 1;
  }
  
  __setup("slub_debug", setup_slub_debug);
ba0268a8b   Christoph Lameter   SLUB: accurately ...
1222
1223
  static unsigned long kmem_cache_flags(unsigned long objsize,
  	unsigned long flags, const char *name,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
1224
  	void (*ctor)(void *))
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1225
1226
  {
  	/*
e153362a5   Christoph Lameter   slub: Remove objs...
1227
  	 * Enable debugging if selected on the kernel commandline.
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1228
  	 */
e153362a5   Christoph Lameter   slub: Remove objs...
1229
  	if (slub_debug && (!slub_debug_slabs ||
3de472138   David Rientjes   slub: use size an...
1230
1231
  		!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
  		flags |= slub_debug;
ba0268a8b   Christoph Lameter   SLUB: accurately ...
1232
1233
  
  	return flags;
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1234
1235
  }
  #else
3ec097421   Christoph Lameter   SLUB: Simplify de...
1236
1237
  static inline void setup_object_debug(struct kmem_cache *s,
  			struct page *page, void *object) {}
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1238

3ec097421   Christoph Lameter   SLUB: Simplify de...
1239
  static inline int alloc_debug_processing(struct kmem_cache *s,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
1240
  	struct page *page, void *object, unsigned long addr) { return 0; }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1241

3ec097421   Christoph Lameter   SLUB: Simplify de...
1242
  static inline int free_debug_processing(struct kmem_cache *s,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
1243
  	struct page *page, void *object, unsigned long addr) { return 0; }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1244

41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1245
1246
1247
  static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
  			{ return 1; }
  static inline int check_object(struct kmem_cache *s, struct page *page,
f7cb19336   Christoph Lameter   SLUB: Pass active...
1248
  			void *object, u8 val) { return 1; }
5cc6eee8a   Christoph Lameter   slub: explicit li...
1249
1250
  static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
  					struct page *page) {}
2cfb7455d   Christoph Lameter   slub: Rework allo...
1251
  static inline void remove_full(struct kmem_cache *s, struct page *page) {}
ba0268a8b   Christoph Lameter   SLUB: accurately ...
1252
1253
  static inline unsigned long kmem_cache_flags(unsigned long objsize,
  	unsigned long flags, const char *name,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
1254
  	void (*ctor)(void *))
ba0268a8b   Christoph Lameter   SLUB: accurately ...
1255
1256
1257
  {
  	return flags;
  }
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
1258
  #define slub_debug 0
0f389ec63   Christoph Lameter   slub: No need for...
1259

fdaa45e95   Ingo Molnar   slub: Fix build e...
1260
  #define disable_higher_order_debug 0
0f389ec63   Christoph Lameter   slub: No need for...
1261
1262
  static inline unsigned long slabs_node(struct kmem_cache *s, int node)
  							{ return 0; }
26c02cf05   Alexander Beregalov   SLUB: fix build w...
1263
1264
  static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
  							{ return 0; }
205ab99dd   Christoph Lameter   slub: Update stat...
1265
1266
1267
1268
  static inline void inc_slabs_node(struct kmem_cache *s, int node,
  							int objects) {}
  static inline void dec_slabs_node(struct kmem_cache *s, int node,
  							int objects) {}
7d550c56a   Christoph Lameter   slub: Add dummy f...
1269
1270
1271
1272
1273
1274
1275
1276
  
  static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
  							{ return 0; }
  
  static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
  		void *object) {}
  
  static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
1277
  #endif /* CONFIG_SLUB_DEBUG */
205ab99dd   Christoph Lameter   slub: Update stat...
1278

81819f0fc   Christoph Lameter   SLUB core
1279
1280
1281
  /*
   * Slab allocation and freeing
   */
65c3376aa   Christoph Lameter   slub: Fallback to...
1282
1283
1284
1285
  static inline struct page *alloc_slab_page(gfp_t flags, int node,
  					struct kmem_cache_order_objects oo)
  {
  	int order = oo_order(oo);
b1eeab676   Vegard Nossum   kmemcheck: add ho...
1286
  	flags |= __GFP_NOTRACK;
2154a3363   Christoph Lameter   slub: Use a const...
1287
  	if (node == NUMA_NO_NODE)
65c3376aa   Christoph Lameter   slub: Fallback to...
1288
1289
  		return alloc_pages(flags, order);
  	else
6b65aaf30   Minchan Kim   slub: Use alloc_p...
1290
  		return alloc_pages_exact_node(node, flags, order);
65c3376aa   Christoph Lameter   slub: Fallback to...
1291
  }
81819f0fc   Christoph Lameter   SLUB core
1292
1293
  static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
  {
064287807   Pekka Enberg   SLUB: Fix coding ...
1294
  	struct page *page;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
1295
  	struct kmem_cache_order_objects oo = s->oo;
ba52270d1   Pekka Enberg   SLUB: Don't pass ...
1296
  	gfp_t alloc_gfp;
81819f0fc   Christoph Lameter   SLUB core
1297

7e0528dad   Christoph Lameter   slub: Push irq di...
1298
1299
1300
1301
  	flags &= gfp_allowed_mask;
  
  	if (flags & __GFP_WAIT)
  		local_irq_enable();
b7a49f0d4   Christoph Lameter   slub: Determine g...
1302
  	flags |= s->allocflags;
e12ba74d8   Mel Gorman   Group short-lived...
1303

ba52270d1   Pekka Enberg   SLUB: Don't pass ...
1304
1305
1306
1307
1308
1309
1310
  	/*
  	 * Let the initial higher-order allocation fail under memory pressure
  	 * so we fall-back to the minimum order allocation.
  	 */
  	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
  
  	page = alloc_slab_page(alloc_gfp, node, oo);
65c3376aa   Christoph Lameter   slub: Fallback to...
1311
1312
1313
1314
1315
1316
1317
  	if (unlikely(!page)) {
  		oo = s->min;
  		/*
  		 * Allocation may have failed due to fragmentation.
  		 * Try a lower order alloc if possible
  		 */
  		page = alloc_slab_page(flags, node, oo);
81819f0fc   Christoph Lameter   SLUB core
1318

7e0528dad   Christoph Lameter   slub: Push irq di...
1319
1320
  		if (page)
  			stat(s, ORDER_FALLBACK);
65c3376aa   Christoph Lameter   slub: Fallback to...
1321
  	}
5a896d9e7   Vegard Nossum   slub: add hooks f...
1322

7e0528dad   Christoph Lameter   slub: Push irq di...
1323
1324
1325
1326
1327
  	if (flags & __GFP_WAIT)
  		local_irq_disable();
  
  	if (!page)
  		return NULL;
5a896d9e7   Vegard Nossum   slub: add hooks f...
1328
  	if (kmemcheck_enabled
5086c389c   Amerigo Wang   SLUB: Fix some co...
1329
  		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
b1eeab676   Vegard Nossum   kmemcheck: add ho...
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
  		int pages = 1 << oo_order(oo);
  
  		kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
  
  		/*
  		 * Objects from caches that have a constructor don't get
  		 * cleared when they're allocated, so we need to do it here.
  		 */
  		if (s->ctor)
  			kmemcheck_mark_uninitialized_pages(page, pages);
  		else
  			kmemcheck_mark_unallocated_pages(page, pages);
5a896d9e7   Vegard Nossum   slub: add hooks f...
1342
  	}
834f3d119   Christoph Lameter   slub: Add kmem_ca...
1343
  	page->objects = oo_objects(oo);
81819f0fc   Christoph Lameter   SLUB core
1344
1345
1346
  	mod_zone_page_state(page_zone(page),
  		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
  		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
65c3376aa   Christoph Lameter   slub: Fallback to...
1347
  		1 << oo_order(oo));
81819f0fc   Christoph Lameter   SLUB core
1348
1349
1350
1351
1352
1353
1354
  
  	return page;
  }
  
  static void setup_object(struct kmem_cache *s, struct page *page,
  				void *object)
  {
3ec097421   Christoph Lameter   SLUB: Simplify de...
1355
  	setup_object_debug(s, page, object);
4f1049345   Christoph Lameter   slab allocators: ...
1356
  	if (unlikely(s->ctor))
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
1357
  		s->ctor(object);
81819f0fc   Christoph Lameter   SLUB core
1358
1359
1360
1361
1362
  }
  
  static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
  {
  	struct page *page;
81819f0fc   Christoph Lameter   SLUB core
1363
  	void *start;
81819f0fc   Christoph Lameter   SLUB core
1364
1365
  	void *last;
  	void *p;
6cb062296   Christoph Lameter   Categorize GFP flags
1366
  	BUG_ON(flags & GFP_SLAB_BUG_MASK);
81819f0fc   Christoph Lameter   SLUB core
1367

6cb062296   Christoph Lameter   Categorize GFP flags
1368
1369
  	page = allocate_slab(s,
  		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
81819f0fc   Christoph Lameter   SLUB core
1370
1371
  	if (!page)
  		goto out;
205ab99dd   Christoph Lameter   slub: Update stat...
1372
  	inc_slabs_node(s, page_to_nid(page), page->objects);
81819f0fc   Christoph Lameter   SLUB core
1373
1374
  	page->slab = s;
  	page->flags |= 1 << PG_slab;
81819f0fc   Christoph Lameter   SLUB core
1375
1376
  
  	start = page_address(page);
81819f0fc   Christoph Lameter   SLUB core
1377
1378
  
  	if (unlikely(s->flags & SLAB_POISON))
834f3d119   Christoph Lameter   slub: Add kmem_ca...
1379
  		memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
81819f0fc   Christoph Lameter   SLUB core
1380
1381
  
  	last = start;
224a88be4   Christoph Lameter   slub: for_each_ob...
1382
  	for_each_object(p, s, start, page->objects) {
81819f0fc   Christoph Lameter   SLUB core
1383
1384
1385
1386
1387
  		setup_object(s, page, last);
  		set_freepointer(s, last, p);
  		last = p;
  	}
  	setup_object(s, page, last);
a973e9dd1   Christoph Lameter   Revert "unique en...
1388
  	set_freepointer(s, last, NULL);
81819f0fc   Christoph Lameter   SLUB core
1389
1390
1391
  
  	page->freelist = start;
  	page->inuse = 0;
8cb0a5068   Christoph Lameter   slub: Move page->...
1392
  	page->frozen = 1;
81819f0fc   Christoph Lameter   SLUB core
1393
  out:
81819f0fc   Christoph Lameter   SLUB core
1394
1395
1396
1397
1398
  	return page;
  }
  
  static void __free_slab(struct kmem_cache *s, struct page *page)
  {
834f3d119   Christoph Lameter   slub: Add kmem_ca...
1399
1400
  	int order = compound_order(page);
  	int pages = 1 << order;
81819f0fc   Christoph Lameter   SLUB core
1401

af537b0a6   Christoph Lameter   slub: Use kmem_ca...
1402
  	if (kmem_cache_debug(s)) {
81819f0fc   Christoph Lameter   SLUB core
1403
1404
1405
  		void *p;
  
  		slab_pad_check(s, page);
224a88be4   Christoph Lameter   slub: for_each_ob...
1406
1407
  		for_each_object(p, s, page_address(page),
  						page->objects)
f7cb19336   Christoph Lameter   SLUB: Pass active...
1408
  			check_object(s, page, p, SLUB_RED_INACTIVE);
81819f0fc   Christoph Lameter   SLUB core
1409
  	}
b1eeab676   Vegard Nossum   kmemcheck: add ho...
1410
  	kmemcheck_free_shadow(page, compound_order(page));
5a896d9e7   Vegard Nossum   slub: add hooks f...
1411

81819f0fc   Christoph Lameter   SLUB core
1412
1413
1414
  	mod_zone_page_state(page_zone(page),
  		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
  		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
064287807   Pekka Enberg   SLUB: Fix coding ...
1415
  		-pages);
81819f0fc   Christoph Lameter   SLUB core
1416

49bd5221c   Christoph Lameter   slub: Move map/fl...
1417
1418
  	__ClearPageSlab(page);
  	reset_page_mapcount(page);
1eb5ac646   Nick Piggin   mm: SLUB fix recl...
1419
1420
  	if (current->reclaim_state)
  		current->reclaim_state->reclaimed_slab += pages;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
1421
  	__free_pages(page, order);
81819f0fc   Christoph Lameter   SLUB core
1422
  }
da9a638c6   Lai Jiangshan   slub,rcu: don't a...
1423
1424
  #define need_reserve_slab_rcu						\
  	(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
81819f0fc   Christoph Lameter   SLUB core
1425
1426
1427
  static void rcu_free_slab(struct rcu_head *h)
  {
  	struct page *page;
da9a638c6   Lai Jiangshan   slub,rcu: don't a...
1428
1429
1430
1431
  	if (need_reserve_slab_rcu)
  		page = virt_to_head_page(h);
  	else
  		page = container_of((struct list_head *)h, struct page, lru);
81819f0fc   Christoph Lameter   SLUB core
1432
1433
1434
1435
1436
1437
  	__free_slab(page->slab, page);
  }
  
  static void free_slab(struct kmem_cache *s, struct page *page)
  {
  	if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
da9a638c6   Lai Jiangshan   slub,rcu: don't a...
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
  		struct rcu_head *head;
  
  		if (need_reserve_slab_rcu) {
  			int order = compound_order(page);
  			int offset = (PAGE_SIZE << order) - s->reserved;
  
  			VM_BUG_ON(s->reserved != sizeof(*head));
  			head = page_address(page) + offset;
  		} else {
  			/*
  			 * RCU free overloads the RCU head over the LRU
  			 */
  			head = (void *)&page->lru;
  		}
81819f0fc   Christoph Lameter   SLUB core
1452
1453
1454
1455
1456
1457
1458
1459
  
  		call_rcu(head, rcu_free_slab);
  	} else
  		__free_slab(s, page);
  }
  
  static void discard_slab(struct kmem_cache *s, struct page *page)
  {
205ab99dd   Christoph Lameter   slub: Update stat...
1460
  	dec_slabs_node(s, page_to_nid(page), page->objects);
81819f0fc   Christoph Lameter   SLUB core
1461
1462
1463
1464
  	free_slab(s, page);
  }
  
  /*
5cc6eee8a   Christoph Lameter   slub: explicit li...
1465
1466
1467
   * Management of partially allocated slabs.
   *
   * list_lock must be held.
81819f0fc   Christoph Lameter   SLUB core
1468
   */
5cc6eee8a   Christoph Lameter   slub: explicit li...
1469
  static inline void add_partial(struct kmem_cache_node *n,
7c2e132c5   Christoph Lameter   Add parameter to ...
1470
  				struct page *page, int tail)
81819f0fc   Christoph Lameter   SLUB core
1471
  {
e95eed571   Christoph Lameter   SLUB: Add MIN_PAR...
1472
  	n->nr_partial++;
7c2e132c5   Christoph Lameter   Add parameter to ...
1473
1474
1475
1476
  	if (tail)
  		list_add_tail(&page->lru, &n->partial);
  	else
  		list_add(&page->lru, &n->partial);
81819f0fc   Christoph Lameter   SLUB core
1477
  }
5cc6eee8a   Christoph Lameter   slub: explicit li...
1478
1479
1480
1481
  /*
   * list_lock must be held.
   */
  static inline void remove_partial(struct kmem_cache_node *n,
62e346a83   Christoph Lameter   slub: extract com...
1482
1483
1484
1485
1486
  					struct page *page)
  {
  	list_del(&page->lru);
  	n->nr_partial--;
  }
81819f0fc   Christoph Lameter   SLUB core
1487
  /*
5cc6eee8a   Christoph Lameter   slub: explicit li...
1488
1489
   * Lock slab, remove from the partial list and put the object into the
   * per cpu freelist.
81819f0fc   Christoph Lameter   SLUB core
1490
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
1491
   * Must hold list_lock.
81819f0fc   Christoph Lameter   SLUB core
1492
   */
881db7fb0   Christoph Lameter   slub: Invert lock...
1493
  static inline int acquire_slab(struct kmem_cache *s,
61728d1ef   Christoph Lameter   slub: Pass kmem_c...
1494
  		struct kmem_cache_node *n, struct page *page)
81819f0fc   Christoph Lameter   SLUB core
1495
  {
2cfb7455d   Christoph Lameter   slub: Rework allo...
1496
1497
1498
  	void *freelist;
  	unsigned long counters;
  	struct page new;
2cfb7455d   Christoph Lameter   slub: Rework allo...
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
  	/*
  	 * Zap the freelist and set the frozen bit.
  	 * The old freelist is the list of objects for the
  	 * per cpu allocation list.
  	 */
  	do {
  		freelist = page->freelist;
  		counters = page->counters;
  		new.counters = counters;
  		new.inuse = page->objects;
  
  		VM_BUG_ON(new.frozen);
  		new.frozen = 1;
1d07171c5   Christoph Lameter   slub: disable int...
1512
  	} while (!__cmpxchg_double_slab(s, page,
2cfb7455d   Christoph Lameter   slub: Rework allo...
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
  			freelist, counters,
  			NULL, new.counters,
  			"lock and freeze"));
  
  	remove_partial(n, page);
  
  	if (freelist) {
  		/* Populate the per cpu freelist */
  		this_cpu_write(s->cpu_slab->freelist, freelist);
  		this_cpu_write(s->cpu_slab->page, page);
  		this_cpu_write(s->cpu_slab->node, page_to_nid(page));
81819f0fc   Christoph Lameter   SLUB core
1524
  		return 1;
2cfb7455d   Christoph Lameter   slub: Rework allo...
1525
1526
1527
1528
1529
1530
1531
1532
1533
  	} else {
  		/*
  		 * Slab page came from the wrong list. No object to allocate
  		 * from. Put it onto the correct list and continue partial
  		 * scan.
  		 */
  		printk(KERN_ERR "SLUB: %s : Page without available objects on"
  			" partial list
  ", s->name);
2cfb7455d   Christoph Lameter   slub: Rework allo...
1534
  		return 0;
81819f0fc   Christoph Lameter   SLUB core
1535
  	}
81819f0fc   Christoph Lameter   SLUB core
1536
1537
1538
  }
  
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
1539
   * Try to allocate a partial slab from a specific node.
81819f0fc   Christoph Lameter   SLUB core
1540
   */
61728d1ef   Christoph Lameter   slub: Pass kmem_c...
1541
1542
  static struct page *get_partial_node(struct kmem_cache *s,
  					struct kmem_cache_node *n)
81819f0fc   Christoph Lameter   SLUB core
1543
1544
1545
1546
1547
1548
  {
  	struct page *page;
  
  	/*
  	 * Racy check. If we mistakenly see no partial slabs then we
  	 * just allocate an empty slab. If we mistakenly try to get a
672bba3a4   Christoph Lameter   SLUB: update comm...
1549
1550
  	 * partial slab and there is none available then get_partials()
  	 * will return NULL.
81819f0fc   Christoph Lameter   SLUB core
1551
1552
1553
1554
1555
1556
  	 */
  	if (!n || !n->nr_partial)
  		return NULL;
  
  	spin_lock(&n->list_lock);
  	list_for_each_entry(page, &n->partial, lru)
881db7fb0   Christoph Lameter   slub: Invert lock...
1557
  		if (acquire_slab(s, n, page))
81819f0fc   Christoph Lameter   SLUB core
1558
1559
1560
1561
1562
1563
1564
1565
  			goto out;
  	page = NULL;
  out:
  	spin_unlock(&n->list_lock);
  	return page;
  }
  
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
1566
   * Get a page from somewhere. Search in increasing NUMA distances.
81819f0fc   Christoph Lameter   SLUB core
1567
1568
1569
1570
1571
   */
  static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
  {
  #ifdef CONFIG_NUMA
  	struct zonelist *zonelist;
dd1a239f6   Mel Gorman   mm: have zonelist...
1572
  	struct zoneref *z;
54a6eb5c4   Mel Gorman   mm: use two zonel...
1573
1574
  	struct zone *zone;
  	enum zone_type high_zoneidx = gfp_zone(flags);
81819f0fc   Christoph Lameter   SLUB core
1575
1576
1577
  	struct page *page;
  
  	/*
672bba3a4   Christoph Lameter   SLUB: update comm...
1578
1579
1580
1581
  	 * The defrag ratio allows a configuration of the tradeoffs between
  	 * inter node defragmentation and node local allocations. A lower
  	 * defrag_ratio increases the tendency to do local allocations
  	 * instead of attempting to obtain partial slabs from other nodes.
81819f0fc   Christoph Lameter   SLUB core
1582
  	 *
672bba3a4   Christoph Lameter   SLUB: update comm...
1583
1584
1585
1586
  	 * If the defrag_ratio is set to 0 then kmalloc() always
  	 * returns node local objects. If the ratio is higher then kmalloc()
  	 * may return off node objects because partial slabs are obtained
  	 * from other nodes and filled up.
81819f0fc   Christoph Lameter   SLUB core
1587
  	 *
6446faa2f   Christoph Lameter   slub: Fix up comm...
1588
  	 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
672bba3a4   Christoph Lameter   SLUB: update comm...
1589
1590
1591
1592
1593
  	 * defrag_ratio = 1000) then every (well almost) allocation will
  	 * first attempt to defrag slab caches on other nodes. This means
  	 * scanning over all nodes to look for partial slabs which may be
  	 * expensive if we do it every time we are trying to find a slab
  	 * with available objects.
81819f0fc   Christoph Lameter   SLUB core
1594
  	 */
9824601ea   Christoph Lameter   SLUB: rename defr...
1595
1596
  	if (!s->remote_node_defrag_ratio ||
  			get_cycles() % 1024 > s->remote_node_defrag_ratio)
81819f0fc   Christoph Lameter   SLUB core
1597
  		return NULL;
c0ff7453b   Miao Xie   cpuset,mm: fix no...
1598
  	get_mems_allowed();
0e88460da   Mel Gorman   mm: introduce nod...
1599
  	zonelist = node_zonelist(slab_node(current->mempolicy), flags);
54a6eb5c4   Mel Gorman   mm: use two zonel...
1600
  	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
81819f0fc   Christoph Lameter   SLUB core
1601
  		struct kmem_cache_node *n;
54a6eb5c4   Mel Gorman   mm: use two zonel...
1602
  		n = get_node(s, zone_to_nid(zone));
81819f0fc   Christoph Lameter   SLUB core
1603

54a6eb5c4   Mel Gorman   mm: use two zonel...
1604
  		if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
3b89d7d88   David Rientjes   slub: move min_pa...
1605
  				n->nr_partial > s->min_partial) {
61728d1ef   Christoph Lameter   slub: Pass kmem_c...
1606
  			page = get_partial_node(s, n);
c0ff7453b   Miao Xie   cpuset,mm: fix no...
1607
1608
  			if (page) {
  				put_mems_allowed();
81819f0fc   Christoph Lameter   SLUB core
1609
  				return page;
c0ff7453b   Miao Xie   cpuset,mm: fix no...
1610
  			}
81819f0fc   Christoph Lameter   SLUB core
1611
1612
  		}
  	}
c0ff7453b   Miao Xie   cpuset,mm: fix no...
1613
  	put_mems_allowed();
81819f0fc   Christoph Lameter   SLUB core
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
  #endif
  	return NULL;
  }
  
  /*
   * Get a partial page, lock it and return it.
   */
  static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
  {
  	struct page *page;
2154a3363   Christoph Lameter   slub: Use a const...
1624
  	int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
81819f0fc   Christoph Lameter   SLUB core
1625

61728d1ef   Christoph Lameter   slub: Pass kmem_c...
1626
  	page = get_partial_node(s, get_node(s, searchnode));
33de04ec4   Christoph Lameter   slub: Use NUMA_NO...
1627
  	if (page || node != NUMA_NO_NODE)
81819f0fc   Christoph Lameter   SLUB core
1628
1629
1630
1631
  		return page;
  
  	return get_any_partial(s, flags);
  }
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
  #ifdef CONFIG_PREEMPT
  /*
   * Calculate the next globally unique transaction for disambiguiation
   * during cmpxchg. The transactions start with the cpu number and are then
   * incremented by CONFIG_NR_CPUS.
   */
  #define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
  #else
  /*
   * No preemption supported therefore also no need to check for
   * different cpus.
   */
  #define TID_STEP 1
  #endif
  
  static inline unsigned long next_tid(unsigned long tid)
  {
  	return tid + TID_STEP;
  }
  
  static inline unsigned int tid_to_cpu(unsigned long tid)
  {
  	return tid % TID_STEP;
  }
  
  static inline unsigned long tid_to_event(unsigned long tid)
  {
  	return tid / TID_STEP;
  }
  
  static inline unsigned int init_tid(int cpu)
  {
  	return cpu;
  }
  
  static inline void note_cmpxchg_failure(const char *n,
  		const struct kmem_cache *s, unsigned long tid)
  {
  #ifdef SLUB_DEBUG_CMPXCHG
  	unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
  
  	printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name);
  
  #ifdef CONFIG_PREEMPT
  	if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
  		printk("due to cpu change %d -> %d
  ",
  			tid_to_cpu(tid), tid_to_cpu(actual_tid));
  	else
  #endif
  	if (tid_to_event(tid) != tid_to_event(actual_tid))
  		printk("due to cpu running other code. Event %ld->%ld
  ",
  			tid_to_event(tid), tid_to_event(actual_tid));
  	else
  		printk("for unknown reason: actual=%lx was=%lx target=%lx
  ",
  			actual_tid, tid, next_tid(tid));
  #endif
4fdccdfbb   Christoph Lameter   slub: Add statist...
1691
  	stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
1692
  }
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
1693
1694
  void init_kmem_cache_cpus(struct kmem_cache *s)
  {
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
1695
1696
1697
1698
  	int cpu;
  
  	for_each_possible_cpu(cpu)
  		per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
1699
  }
81819f0fc   Christoph Lameter   SLUB core
1700
1701
1702
  /*
   * Remove the cpu slab
   */
2cfb7455d   Christoph Lameter   slub: Rework allo...
1703
1704
1705
1706
  
  /*
   * Remove the cpu slab
   */
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1707
  static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
81819f0fc   Christoph Lameter   SLUB core
1708
  {
2cfb7455d   Christoph Lameter   slub: Rework allo...
1709
  	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1710
  	struct page *page = c->page;
2cfb7455d   Christoph Lameter   slub: Rework allo...
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
  	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
  	int lock = 0;
  	enum slab_modes l = M_NONE, m = M_NONE;
  	void *freelist;
  	void *nextfree;
  	int tail = 0;
  	struct page new;
  	struct page old;
  
  	if (page->freelist) {
84e554e68   Christoph Lameter   SLUB: Make slub s...
1721
  		stat(s, DEACTIVATE_REMOTE_FREES);
2cfb7455d   Christoph Lameter   slub: Rework allo...
1722
1723
1724
1725
1726
1727
1728
  		tail = 1;
  	}
  
  	c->tid = next_tid(c->tid);
  	c->page = NULL;
  	freelist = c->freelist;
  	c->freelist = NULL;
894b8788d   Christoph Lameter   slub: support con...
1729
  	/*
2cfb7455d   Christoph Lameter   slub: Rework allo...
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
  	 * Stage one: Free all available per cpu objects back
  	 * to the page freelist while it is still frozen. Leave the
  	 * last one.
  	 *
  	 * There is no need to take the list->lock because the page
  	 * is still frozen.
  	 */
  	while (freelist && (nextfree = get_freepointer(s, freelist))) {
  		void *prior;
  		unsigned long counters;
  
  		do {
  			prior = page->freelist;
  			counters = page->counters;
  			set_freepointer(s, freelist, prior);
  			new.counters = counters;
  			new.inuse--;
  			VM_BUG_ON(!new.frozen);
1d07171c5   Christoph Lameter   slub: disable int...
1748
  		} while (!__cmpxchg_double_slab(s, page,
2cfb7455d   Christoph Lameter   slub: Rework allo...
1749
1750
1751
1752
1753
1754
  			prior, counters,
  			freelist, new.counters,
  			"drain percpu freelist"));
  
  		freelist = nextfree;
  	}
894b8788d   Christoph Lameter   slub: support con...
1755
  	/*
2cfb7455d   Christoph Lameter   slub: Rework allo...
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
  	 * Stage two: Ensure that the page is unfrozen while the
  	 * list presence reflects the actual number of objects
  	 * during unfreeze.
  	 *
  	 * We setup the list membership and then perform a cmpxchg
  	 * with the count. If there is a mismatch then the page
  	 * is not unfrozen but the page is on the wrong list.
  	 *
  	 * Then we restart the process which may have to remove
  	 * the page from the list that we just put it on again
  	 * because the number of objects in the slab may have
  	 * changed.
894b8788d   Christoph Lameter   slub: support con...
1768
  	 */
2cfb7455d   Christoph Lameter   slub: Rework allo...
1769
  redo:
894b8788d   Christoph Lameter   slub: support con...
1770

2cfb7455d   Christoph Lameter   slub: Rework allo...
1771
1772
1773
  	old.freelist = page->freelist;
  	old.counters = page->counters;
  	VM_BUG_ON(!old.frozen);
7c2e132c5   Christoph Lameter   Add parameter to ...
1774

2cfb7455d   Christoph Lameter   slub: Rework allo...
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
  	/* Determine target state of the slab */
  	new.counters = old.counters;
  	if (freelist) {
  		new.inuse--;
  		set_freepointer(s, freelist, old.freelist);
  		new.freelist = freelist;
  	} else
  		new.freelist = old.freelist;
  
  	new.frozen = 0;
81107188f   Christoph Lameter   slub: Fix partial...
1785
  	if (!new.inuse && n->nr_partial > s->min_partial)
2cfb7455d   Christoph Lameter   slub: Rework allo...
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
  		m = M_FREE;
  	else if (new.freelist) {
  		m = M_PARTIAL;
  		if (!lock) {
  			lock = 1;
  			/*
  			 * Taking the spinlock removes the possiblity
  			 * that acquire_slab() will see a slab page that
  			 * is frozen
  			 */
  			spin_lock(&n->list_lock);
  		}
  	} else {
  		m = M_FULL;
  		if (kmem_cache_debug(s) && !lock) {
  			lock = 1;
  			/*
  			 * This also ensures that the scanning of full
  			 * slabs from diagnostic functions will not see
  			 * any frozen slabs.
  			 */
  			spin_lock(&n->list_lock);
  		}
  	}
  
  	if (l != m) {
  
  		if (l == M_PARTIAL)
  
  			remove_partial(n, page);
  
  		else if (l == M_FULL)
894b8788d   Christoph Lameter   slub: support con...
1818

2cfb7455d   Christoph Lameter   slub: Rework allo...
1819
1820
1821
1822
1823
1824
1825
1826
  			remove_full(s, page);
  
  		if (m == M_PARTIAL) {
  
  			add_partial(n, page, tail);
  			stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
  
  		} else if (m == M_FULL) {
894b8788d   Christoph Lameter   slub: support con...
1827

2cfb7455d   Christoph Lameter   slub: Rework allo...
1828
1829
1830
1831
1832
1833
1834
  			stat(s, DEACTIVATE_FULL);
  			add_full(s, n, page);
  
  		}
  	}
  
  	l = m;
1d07171c5   Christoph Lameter   slub: disable int...
1835
  	if (!__cmpxchg_double_slab(s, page,
2cfb7455d   Christoph Lameter   slub: Rework allo...
1836
1837
1838
1839
  				old.freelist, old.counters,
  				new.freelist, new.counters,
  				"unfreezing slab"))
  		goto redo;
2cfb7455d   Christoph Lameter   slub: Rework allo...
1840
1841
1842
1843
1844
1845
1846
  	if (lock)
  		spin_unlock(&n->list_lock);
  
  	if (m == M_FREE) {
  		stat(s, DEACTIVATE_EMPTY);
  		discard_slab(s, page);
  		stat(s, FREE_SLAB);
894b8788d   Christoph Lameter   slub: support con...
1847
  	}
81819f0fc   Christoph Lameter   SLUB core
1848
  }
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1849
  static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
81819f0fc   Christoph Lameter   SLUB core
1850
  {
84e554e68   Christoph Lameter   SLUB: Make slub s...
1851
  	stat(s, CPUSLAB_FLUSH);
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1852
  	deactivate_slab(s, c);
81819f0fc   Christoph Lameter   SLUB core
1853
1854
1855
1856
  }
  
  /*
   * Flush cpu slab.
6446faa2f   Christoph Lameter   slub: Fix up comm...
1857
   *
81819f0fc   Christoph Lameter   SLUB core
1858
1859
   * Called from IPI handler with interrupts disabled.
   */
0c7100132   Christoph Lameter   SLUB: add some mo...
1860
  static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
81819f0fc   Christoph Lameter   SLUB core
1861
  {
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
1862
  	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
81819f0fc   Christoph Lameter   SLUB core
1863

dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1864
1865
  	if (likely(c && c->page))
  		flush_slab(s, c);
81819f0fc   Christoph Lameter   SLUB core
1866
1867
1868
1869
1870
  }
  
  static void flush_cpu_slab(void *d)
  {
  	struct kmem_cache *s = d;
81819f0fc   Christoph Lameter   SLUB core
1871

dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1872
  	__flush_cpu_slab(s, smp_processor_id());
81819f0fc   Christoph Lameter   SLUB core
1873
1874
1875
1876
  }
  
  static void flush_all(struct kmem_cache *s)
  {
15c8b6c1a   Jens Axboe   on_each_cpu(): ki...
1877
  	on_each_cpu(flush_cpu_slab, s, 1);
81819f0fc   Christoph Lameter   SLUB core
1878
1879
1880
  }
  
  /*
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1881
1882
1883
1884
1885
1886
   * Check if the objects in a per cpu structure fit numa
   * locality expectations.
   */
  static inline int node_match(struct kmem_cache_cpu *c, int node)
  {
  #ifdef CONFIG_NUMA
2154a3363   Christoph Lameter   slub: Use a const...
1887
  	if (node != NUMA_NO_NODE && c->node != node)
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1888
1889
1890
1891
  		return 0;
  #endif
  	return 1;
  }
781b2ba6e   Pekka Enberg   SLUB: Out-of-memo...
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
  static int count_free(struct page *page)
  {
  	return page->objects - page->inuse;
  }
  
  static unsigned long count_partial(struct kmem_cache_node *n,
  					int (*get_count)(struct page *))
  {
  	unsigned long flags;
  	unsigned long x = 0;
  	struct page *page;
  
  	spin_lock_irqsave(&n->list_lock, flags);
  	list_for_each_entry(page, &n->partial, lru)
  		x += get_count(page);
  	spin_unlock_irqrestore(&n->list_lock, flags);
  	return x;
  }
26c02cf05   Alexander Beregalov   SLUB: fix build w...
1910
1911
1912
1913
1914
1915
1916
1917
  static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
  {
  #ifdef CONFIG_SLUB_DEBUG
  	return atomic_long_read(&n->total_objects);
  #else
  	return 0;
  #endif
  }
781b2ba6e   Pekka Enberg   SLUB: Out-of-memo...
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
  static noinline void
  slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
  {
  	int node;
  
  	printk(KERN_WARNING
  		"SLUB: Unable to allocate memory on node %d (gfp=0x%x)
  ",
  		nid, gfpflags);
  	printk(KERN_WARNING "  cache: %s, object size: %d, buffer size: %d, "
  		"default order: %d, min order: %d
  ", s->name, s->objsize,
  		s->size, oo_order(s->oo), oo_order(s->min));
fa5ec8a1f   David Rientjes   slub: add option ...
1931
1932
1933
1934
  	if (oo_order(s->min) > get_order(s->objsize))
  		printk(KERN_WARNING "  %s debugging increased min order, use "
  		       "slub_debug=O to disable.
  ", s->name);
781b2ba6e   Pekka Enberg   SLUB: Out-of-memo...
1935
1936
1937
1938
1939
1940
1941
1942
  	for_each_online_node(node) {
  		struct kmem_cache_node *n = get_node(s, node);
  		unsigned long nr_slabs;
  		unsigned long nr_objs;
  		unsigned long nr_free;
  
  		if (!n)
  			continue;
26c02cf05   Alexander Beregalov   SLUB: fix build w...
1943
1944
1945
  		nr_free  = count_partial(n, count_free);
  		nr_slabs = node_nr_slabs(n);
  		nr_objs  = node_nr_objs(n);
781b2ba6e   Pekka Enberg   SLUB: Out-of-memo...
1946
1947
1948
1949
1950
1951
1952
  
  		printk(KERN_WARNING
  			"  node %d: slabs: %ld, objs: %ld, free: %ld
  ",
  			node, nr_slabs, nr_objs, nr_free);
  	}
  }
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
1953
  /*
894b8788d   Christoph Lameter   slub: support con...
1954
1955
1956
1957
   * Slow path. The lockless freelist is empty or we need to perform
   * debugging duties.
   *
   * Interrupts are disabled.
81819f0fc   Christoph Lameter   SLUB core
1958
   *
894b8788d   Christoph Lameter   slub: support con...
1959
1960
1961
   * Processing is still very fast if new objects have been freed to the
   * regular freelist. In that case we simply take over the regular freelist
   * as the lockless freelist and zap the regular freelist.
81819f0fc   Christoph Lameter   SLUB core
1962
   *
894b8788d   Christoph Lameter   slub: support con...
1963
1964
1965
   * If that is not working then we fall back to the partial lists. We take the
   * first element of the freelist as the object to allocate now and move the
   * rest of the freelist to the lockless freelist.
81819f0fc   Christoph Lameter   SLUB core
1966
   *
894b8788d   Christoph Lameter   slub: support con...
1967
   * And if we were unable to get a new slab from the partial slab lists then
6446faa2f   Christoph Lameter   slub: Fix up comm...
1968
1969
   * we need to allocate a new slab. This is the slowest path since it involves
   * a call to the page allocator and the setup of a new slab.
81819f0fc   Christoph Lameter   SLUB core
1970
   */
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
1971
1972
  static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
  			  unsigned long addr, struct kmem_cache_cpu *c)
81819f0fc   Christoph Lameter   SLUB core
1973
  {
81819f0fc   Christoph Lameter   SLUB core
1974
  	void **object;
01ad8a7bc   Christoph Lameter   slub: Eliminate r...
1975
  	struct page *page;
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
1976
  	unsigned long flags;
2cfb7455d   Christoph Lameter   slub: Rework allo...
1977
1978
  	struct page new;
  	unsigned long counters;
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
  
  	local_irq_save(flags);
  #ifdef CONFIG_PREEMPT
  	/*
  	 * We may have been preempted and rescheduled on a different
  	 * cpu before disabling interrupts. Need to reload cpu area
  	 * pointer.
  	 */
  	c = this_cpu_ptr(s->cpu_slab);
  #endif
81819f0fc   Christoph Lameter   SLUB core
1989

e72e9c23e   Linus Torvalds   Revert "SLUB: rem...
1990
1991
  	/* We handle __GFP_ZERO in the caller */
  	gfpflags &= ~__GFP_ZERO;
01ad8a7bc   Christoph Lameter   slub: Eliminate r...
1992
1993
  	page = c->page;
  	if (!page)
81819f0fc   Christoph Lameter   SLUB core
1994
  		goto new_slab;
fc59c0530   Christoph Lameter   slub: Get rid of ...
1995
  	if (unlikely(!node_match(c, node))) {
e36a2652d   Christoph Lameter   slub: Add statist...
1996
  		stat(s, ALLOC_NODE_MISMATCH);
fc59c0530   Christoph Lameter   slub: Get rid of ...
1997
1998
1999
  		deactivate_slab(s, c);
  		goto new_slab;
  	}
6446faa2f   Christoph Lameter   slub: Fix up comm...
2000

2cfb7455d   Christoph Lameter   slub: Rework allo...
2001
2002
2003
2004
2005
2006
  	stat(s, ALLOC_SLOWPATH);
  
  	do {
  		object = page->freelist;
  		counters = page->counters;
  		new.counters = counters;
2cfb7455d   Christoph Lameter   slub: Rework allo...
2007
  		VM_BUG_ON(!new.frozen);
03e404af2   Christoph Lameter   slub: fast releas...
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
  		/*
  		 * If there is no object left then we use this loop to
  		 * deactivate the slab which is simple since no objects
  		 * are left in the slab and therefore we do not need to
  		 * put the page back onto the partial list.
  		 *
  		 * If there are objects left then we retrieve them
  		 * and use them to refill the per cpu queue.
  		*/
  
  		new.inuse = page->objects;
  		new.frozen = object != NULL;
1d07171c5   Christoph Lameter   slub: disable int...
2020
  	} while (!__cmpxchg_double_slab(s, page,
2cfb7455d   Christoph Lameter   slub: Rework allo...
2021
2022
2023
  			object, counters,
  			NULL, new.counters,
  			"__slab_alloc"));
6446faa2f   Christoph Lameter   slub: Fix up comm...
2024

03e404af2   Christoph Lameter   slub: fast releas...
2025
2026
2027
  	if (unlikely(!object)) {
  		c->page = NULL;
  		stat(s, DEACTIVATE_BYPASS);
fc59c0530   Christoph Lameter   slub: Get rid of ...
2028
  		goto new_slab;
03e404af2   Christoph Lameter   slub: fast releas...
2029
  	}
6446faa2f   Christoph Lameter   slub: Fix up comm...
2030

84e554e68   Christoph Lameter   SLUB: Make slub s...
2031
  	stat(s, ALLOC_REFILL);
6446faa2f   Christoph Lameter   slub: Fix up comm...
2032

894b8788d   Christoph Lameter   slub: support con...
2033
  load_freelist:
4eade540f   Christoph Lameter   slub: Not necessa...
2034
  	VM_BUG_ON(!page->frozen);
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
2035
  	c->freelist = get_freepointer(s, object);
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2036
2037
  	c->tid = next_tid(c->tid);
  	local_irq_restore(flags);
81819f0fc   Christoph Lameter   SLUB core
2038
  	return object;
81819f0fc   Christoph Lameter   SLUB core
2039
  new_slab:
01ad8a7bc   Christoph Lameter   slub: Eliminate r...
2040
2041
  	page = get_partial(s, gfpflags, node);
  	if (page) {
84e554e68   Christoph Lameter   SLUB: Make slub s...
2042
  		stat(s, ALLOC_FROM_PARTIAL);
2cfb7455d   Christoph Lameter   slub: Rework allo...
2043
2044
2045
2046
  		object = c->freelist;
  
  		if (kmem_cache_debug(s))
  			goto debug;
894b8788d   Christoph Lameter   slub: support con...
2047
  		goto load_freelist;
81819f0fc   Christoph Lameter   SLUB core
2048
  	}
01ad8a7bc   Christoph Lameter   slub: Eliminate r...
2049
  	page = new_slab(s, gfpflags, node);
b811c202a   Christoph Lameter   SLUB: simplify IR...
2050

01ad8a7bc   Christoph Lameter   slub: Eliminate r...
2051
  	if (page) {
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
2052
  		c = __this_cpu_ptr(s->cpu_slab);
05aa34503   Christoph Lameter   SLUB: Fix memory ...
2053
  		if (c->page)
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
2054
  			flush_slab(s, c);
01ad8a7bc   Christoph Lameter   slub: Eliminate r...
2055

2cfb7455d   Christoph Lameter   slub: Rework allo...
2056
2057
2058
2059
2060
2061
2062
2063
2064
  		/*
  		 * No other reference to the page yet so we can
  		 * muck around with it freely without cmpxchg
  		 */
  		object = page->freelist;
  		page->freelist = NULL;
  		page->inuse = page->objects;
  
  		stat(s, ALLOC_SLAB);
bd07d87fd   David Rientjes   slub: avoid label...
2065
2066
  		c->node = page_to_nid(page);
  		c->page = page;
9e577e8b4   Christoph Lameter   slub: When alloca...
2067
2068
2069
  
  		if (kmem_cache_debug(s))
  			goto debug;
4b6f07504   Christoph Lameter   SLUB: Define func...
2070
  		goto load_freelist;
81819f0fc   Christoph Lameter   SLUB core
2071
  	}
95f859893   Pekka Enberg   SLUB: Don't print...
2072
2073
  	if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
  		slab_out_of_memory(s, gfpflags, node);
2fd66c517   Christoph Lameter   slub: Add missing...
2074
  	local_irq_restore(flags);
71c7a06ff   Christoph Lameter   slub: Fallback to...
2075
  	return NULL;
2cfb7455d   Christoph Lameter   slub: Rework allo...
2076

81819f0fc   Christoph Lameter   SLUB core
2077
  debug:
2cfb7455d   Christoph Lameter   slub: Rework allo...
2078
2079
  	if (!object || !alloc_debug_processing(s, page, object, addr))
  		goto new_slab;
894b8788d   Christoph Lameter   slub: support con...
2080

2cfb7455d   Christoph Lameter   slub: Rework allo...
2081
  	c->freelist = get_freepointer(s, object);
442b06bce   Christoph Lameter   slub: Remove node...
2082
2083
  	deactivate_slab(s, c);
  	c->page = NULL;
15b7c5142   Pekka Enberg   SLUB: Optimize sl...
2084
  	c->node = NUMA_NO_NODE;
a71ae47a2   Christoph Lameter   slub: Fix double ...
2085
2086
  	local_irq_restore(flags);
  	return object;
894b8788d   Christoph Lameter   slub: support con...
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
  }
  
  /*
   * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
   * have the fastpath folded into their functions. So no function call
   * overhead for requests that can be satisfied on the fastpath.
   *
   * The fastpath works by first checking if the lockless freelist can be used.
   * If not then __slab_alloc is called for slow processing.
   *
   * Otherwise we can simply pick the next object from the lockless free list.
   */
064287807   Pekka Enberg   SLUB: Fix coding ...
2099
  static __always_inline void *slab_alloc(struct kmem_cache *s,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
2100
  		gfp_t gfpflags, int node, unsigned long addr)
894b8788d   Christoph Lameter   slub: support con...
2101
  {
894b8788d   Christoph Lameter   slub: support con...
2102
  	void **object;
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
2103
  	struct kmem_cache_cpu *c;
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2104
  	unsigned long tid;
1f84260c8   Christoph Lameter   SLUB: Alternate f...
2105

c016b0bde   Christoph Lameter   slub: Extract hoo...
2106
  	if (slab_pre_alloc_hook(s, gfpflags))
773ff60e8   Akinobu Mita   SLUB: failslab su...
2107
  		return NULL;
1f84260c8   Christoph Lameter   SLUB: Alternate f...
2108

8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2109
  redo:
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2110
2111
2112
2113
2114
2115
2116
  
  	/*
  	 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
  	 * enabled. We may switch back and forth between cpus while
  	 * reading from one cpu area. That does not matter as long
  	 * as we end up on the original cpu again when doing the cmpxchg.
  	 */
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
2117
  	c = __this_cpu_ptr(s->cpu_slab);
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2118

8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2119
2120
2121
2122
2123
2124
2125
2126
  	/*
  	 * The transaction ids are globally unique per cpu and per operation on
  	 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
  	 * occurs on the right processor and that there was no operation on the
  	 * linked list in between.
  	 */
  	tid = c->tid;
  	barrier();
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2127

9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
2128
  	object = c->freelist;
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
2129
  	if (unlikely(!object || !node_match(c, node)))
894b8788d   Christoph Lameter   slub: support con...
2130

dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
2131
  		object = __slab_alloc(s, gfpflags, node, addr, c);
894b8788d   Christoph Lameter   slub: support con...
2132
2133
  
  	else {
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2134
  		/*
25985edce   Lucas De Marchi   Fix common misspe...
2135
  		 * The cmpxchg will only match if there was no additional
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
  		 * operation and if we are on the right processor.
  		 *
  		 * The cmpxchg does the following atomically (without lock semantics!)
  		 * 1. Relocate first pointer to the current per cpu area.
  		 * 2. Verify that tid and freelist have not been changed
  		 * 3. If they were not changed replace tid and freelist
  		 *
  		 * Since this is without lock semantics the protection is only against
  		 * code executing on this cpu *not* from access by other cpus.
  		 */
30106b8ce   Thomas Gleixner   slub: Fix the loc...
2146
  		if (unlikely(!irqsafe_cpu_cmpxchg_double(
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2147
2148
  				s->cpu_slab->freelist, s->cpu_slab->tid,
  				object, tid,
1393d9a18   Christoph Lameter   slub: Make CONFIG...
2149
  				get_freepointer_safe(s, object), next_tid(tid)))) {
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2150
2151
2152
2153
  
  			note_cmpxchg_failure("slab_alloc", s, tid);
  			goto redo;
  		}
84e554e68   Christoph Lameter   SLUB: Make slub s...
2154
  		stat(s, ALLOC_FASTPATH);
894b8788d   Christoph Lameter   slub: support con...
2155
  	}
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2156

74e2134ff   Pekka Enberg   SLUB: Fix __GFP_Z...
2157
  	if (unlikely(gfpflags & __GFP_ZERO) && object)
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
2158
  		memset(object, 0, s->objsize);
d07dbea46   Christoph Lameter   Slab allocators: ...
2159

c016b0bde   Christoph Lameter   slub: Extract hoo...
2160
  	slab_post_alloc_hook(s, gfpflags, object);
5a896d9e7   Vegard Nossum   slub: add hooks f...
2161

894b8788d   Christoph Lameter   slub: support con...
2162
  	return object;
81819f0fc   Christoph Lameter   SLUB core
2163
2164
2165
2166
  }
  
  void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
  {
2154a3363   Christoph Lameter   slub: Use a const...
2167
  	void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2168

ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
2169
  	trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2170
2171
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
2172
2173
  }
  EXPORT_SYMBOL(kmem_cache_alloc);
0f24f1287   Li Zefan   tracing, slab: De...
2174
  #ifdef CONFIG_TRACING
4a92379bd   Richard Kennedy   slub tracing: mov...
2175
2176
2177
2178
2179
2180
2181
2182
2183
  void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
  {
  	void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
  	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
  	return ret;
  }
  EXPORT_SYMBOL(kmem_cache_alloc_trace);
  
  void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2184
  {
4a92379bd   Richard Kennedy   slub tracing: mov...
2185
2186
2187
  	void *ret = kmalloc_order(size, flags, order);
  	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
  	return ret;
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2188
  }
4a92379bd   Richard Kennedy   slub tracing: mov...
2189
  EXPORT_SYMBOL(kmalloc_order_trace);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2190
  #endif
81819f0fc   Christoph Lameter   SLUB core
2191
2192
2193
  #ifdef CONFIG_NUMA
  void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
  {
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2194
  	void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
2195
2196
  	trace_kmem_cache_alloc_node(_RET_IP_, ret,
  				    s->objsize, s->size, gfpflags, node);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2197
2198
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
2199
2200
  }
  EXPORT_SYMBOL(kmem_cache_alloc_node);
81819f0fc   Christoph Lameter   SLUB core
2201

0f24f1287   Li Zefan   tracing, slab: De...
2202
  #ifdef CONFIG_TRACING
4a92379bd   Richard Kennedy   slub tracing: mov...
2203
  void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2204
  				    gfp_t gfpflags,
4a92379bd   Richard Kennedy   slub tracing: mov...
2205
  				    int node, size_t size)
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2206
  {
4a92379bd   Richard Kennedy   slub tracing: mov...
2207
2208
2209
2210
2211
  	void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
  
  	trace_kmalloc_node(_RET_IP_, ret,
  			   size, s->size, gfpflags, node);
  	return ret;
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2212
  }
4a92379bd   Richard Kennedy   slub tracing: mov...
2213
  EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2214
  #endif
5d1f57e4d   Namhyung Kim   slub: Move NUMA-r...
2215
  #endif
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2216

81819f0fc   Christoph Lameter   SLUB core
2217
  /*
894b8788d   Christoph Lameter   slub: support con...
2218
2219
   * Slow patch handling. This may still be called frequently since objects
   * have a longer lifetime than the cpu slabs in most processing loads.
81819f0fc   Christoph Lameter   SLUB core
2220
   *
894b8788d   Christoph Lameter   slub: support con...
2221
2222
2223
   * So we still attempt to reduce cache line usage. Just take the slab
   * lock and free the item. If there is no additional partial page
   * handling required then we can return immediately.
81819f0fc   Christoph Lameter   SLUB core
2224
   */
894b8788d   Christoph Lameter   slub: support con...
2225
  static void __slab_free(struct kmem_cache *s, struct page *page,
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
2226
  			void *x, unsigned long addr)
81819f0fc   Christoph Lameter   SLUB core
2227
2228
2229
  {
  	void *prior;
  	void **object = (void *)x;
2cfb7455d   Christoph Lameter   slub: Rework allo...
2230
2231
2232
2233
2234
  	int was_frozen;
  	int inuse;
  	struct page new;
  	unsigned long counters;
  	struct kmem_cache_node *n = NULL;
61728d1ef   Christoph Lameter   slub: Pass kmem_c...
2235
  	unsigned long uninitialized_var(flags);
81819f0fc   Christoph Lameter   SLUB core
2236

8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2237
  	stat(s, FREE_SLOWPATH);
81819f0fc   Christoph Lameter   SLUB core
2238

8dc16c6c0   Christoph Lameter   slub: Move debug ...
2239
  	if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr))
80f08c191   Christoph Lameter   slub: Avoid disab...
2240
  		return;
6446faa2f   Christoph Lameter   slub: Fix up comm...
2241

2cfb7455d   Christoph Lameter   slub: Rework allo...
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
  	do {
  		prior = page->freelist;
  		counters = page->counters;
  		set_freepointer(s, object, prior);
  		new.counters = counters;
  		was_frozen = new.frozen;
  		new.inuse--;
  		if ((!new.inuse || !prior) && !was_frozen && !n) {
                          n = get_node(s, page_to_nid(page));
  			/*
  			 * Speculatively acquire the list_lock.
  			 * If the cmpxchg does not succeed then we may
  			 * drop the list_lock without any processing.
  			 *
  			 * Otherwise the list_lock will synchronize with
  			 * other processors updating the list of slabs.
  			 */
80f08c191   Christoph Lameter   slub: Avoid disab...
2259
                          spin_lock_irqsave(&n->list_lock, flags);
2cfb7455d   Christoph Lameter   slub: Rework allo...
2260
2261
  		}
  		inuse = new.inuse;
81819f0fc   Christoph Lameter   SLUB core
2262

2cfb7455d   Christoph Lameter   slub: Rework allo...
2263
2264
2265
2266
  	} while (!cmpxchg_double_slab(s, page,
  		prior, counters,
  		object, new.counters,
  		"__slab_free"));
81819f0fc   Christoph Lameter   SLUB core
2267

2cfb7455d   Christoph Lameter   slub: Rework allo...
2268
2269
2270
2271
2272
2273
2274
  	if (likely(!n)) {
                  /*
  		 * The list lock was not taken therefore no list
  		 * activity can be necessary.
  		 */
                  if (was_frozen)
                          stat(s, FREE_FROZEN);
80f08c191   Christoph Lameter   slub: Avoid disab...
2275
                  return;
2cfb7455d   Christoph Lameter   slub: Rework allo...
2276
          }
81819f0fc   Christoph Lameter   SLUB core
2277
2278
  
  	/*
2cfb7455d   Christoph Lameter   slub: Rework allo...
2279
2280
  	 * was_frozen may have been set after we acquired the list_lock in
  	 * an earlier loop. So we need to check it here again.
81819f0fc   Christoph Lameter   SLUB core
2281
  	 */
2cfb7455d   Christoph Lameter   slub: Rework allo...
2282
2283
2284
2285
2286
  	if (was_frozen)
  		stat(s, FREE_FROZEN);
  	else {
  		if (unlikely(!inuse && n->nr_partial > s->min_partial))
                          goto slab_empty;
81819f0fc   Christoph Lameter   SLUB core
2287

2cfb7455d   Christoph Lameter   slub: Rework allo...
2288
2289
2290
2291
2292
2293
  		/*
  		 * Objects left in the slab. If it was not on the partial list before
  		 * then add it.
  		 */
  		if (unlikely(!prior)) {
  			remove_full(s, page);
130655ef0   Shaohua Li   slub: add slab wi...
2294
  			add_partial(n, page, 1);
2cfb7455d   Christoph Lameter   slub: Rework allo...
2295
2296
  			stat(s, FREE_ADD_PARTIAL);
  		}
8ff12cfc0   Christoph Lameter   SLUB: Support for...
2297
  	}
80f08c191   Christoph Lameter   slub: Avoid disab...
2298
  	spin_unlock_irqrestore(&n->list_lock, flags);
81819f0fc   Christoph Lameter   SLUB core
2299
2300
2301
  	return;
  
  slab_empty:
a973e9dd1   Christoph Lameter   Revert "unique en...
2302
  	if (prior) {
81819f0fc   Christoph Lameter   SLUB core
2303
  		/*
6fbabb20f   Christoph Lameter   slub: Fix full li...
2304
  		 * Slab on the partial list.
81819f0fc   Christoph Lameter   SLUB core
2305
  		 */
5cc6eee8a   Christoph Lameter   slub: explicit li...
2306
  		remove_partial(n, page);
84e554e68   Christoph Lameter   SLUB: Make slub s...
2307
  		stat(s, FREE_REMOVE_PARTIAL);
6fbabb20f   Christoph Lameter   slub: Fix full li...
2308
2309
2310
  	} else
  		/* Slab must be on the full list */
  		remove_full(s, page);
2cfb7455d   Christoph Lameter   slub: Rework allo...
2311

80f08c191   Christoph Lameter   slub: Avoid disab...
2312
  	spin_unlock_irqrestore(&n->list_lock, flags);
84e554e68   Christoph Lameter   SLUB: Make slub s...
2313
  	stat(s, FREE_SLAB);
81819f0fc   Christoph Lameter   SLUB core
2314
  	discard_slab(s, page);
81819f0fc   Christoph Lameter   SLUB core
2315
  }
894b8788d   Christoph Lameter   slub: support con...
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
  /*
   * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
   * can perform fastpath freeing without additional function calls.
   *
   * The fastpath is only possible if we are freeing to the current cpu slab
   * of this processor. This typically the case if we have just allocated
   * the item before.
   *
   * If fastpath is not possible then fall back to __slab_free where we deal
   * with all sorts of special processing.
   */
064287807   Pekka Enberg   SLUB: Fix coding ...
2327
  static __always_inline void slab_free(struct kmem_cache *s,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
2328
  			struct page *page, void *x, unsigned long addr)
894b8788d   Christoph Lameter   slub: support con...
2329
2330
  {
  	void **object = (void *)x;
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
2331
  	struct kmem_cache_cpu *c;
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2332
  	unsigned long tid;
1f84260c8   Christoph Lameter   SLUB: Alternate f...
2333

c016b0bde   Christoph Lameter   slub: Extract hoo...
2334
  	slab_free_hook(s, x);
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2335
  redo:
a24c5a0ea   Christoph Lameter   slub: Dont define...
2336

8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2337
2338
2339
2340
2341
2342
  	/*
  	 * Determine the currently cpus per cpu slab.
  	 * The cpu may change afterward. However that does not matter since
  	 * data is retrieved via this pointer. If we are on the same cpu
  	 * during the cmpxchg then the free will succedd.
  	 */
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
2343
  	c = __this_cpu_ptr(s->cpu_slab);
c016b0bde   Christoph Lameter   slub: Extract hoo...
2344

8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2345
2346
  	tid = c->tid;
  	barrier();
c016b0bde   Christoph Lameter   slub: Extract hoo...
2347

442b06bce   Christoph Lameter   slub: Remove node...
2348
  	if (likely(page == c->page)) {
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
2349
  		set_freepointer(s, object, c->freelist);
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2350

30106b8ce   Thomas Gleixner   slub: Fix the loc...
2351
  		if (unlikely(!irqsafe_cpu_cmpxchg_double(
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2352
2353
2354
2355
2356
2357
2358
  				s->cpu_slab->freelist, s->cpu_slab->tid,
  				c->freelist, tid,
  				object, next_tid(tid)))) {
  
  			note_cmpxchg_failure("slab_free", s, tid);
  			goto redo;
  		}
84e554e68   Christoph Lameter   SLUB: Make slub s...
2359
  		stat(s, FREE_FASTPATH);
894b8788d   Christoph Lameter   slub: support con...
2360
  	} else
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
2361
  		__slab_free(s, page, x, addr);
894b8788d   Christoph Lameter   slub: support con...
2362

894b8788d   Christoph Lameter   slub: support con...
2363
  }
81819f0fc   Christoph Lameter   SLUB core
2364
2365
  void kmem_cache_free(struct kmem_cache *s, void *x)
  {
77c5e2d01   Christoph Lameter   slub: fix object ...
2366
  	struct page *page;
81819f0fc   Christoph Lameter   SLUB core
2367

b49af68ff   Christoph Lameter   Add virt_to_head_...
2368
  	page = virt_to_head_page(x);
81819f0fc   Christoph Lameter   SLUB core
2369

ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
2370
  	slab_free(s, page, x, _RET_IP_);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
2371

ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
2372
  	trace_kmem_cache_free(_RET_IP_, x);
81819f0fc   Christoph Lameter   SLUB core
2373
2374
  }
  EXPORT_SYMBOL(kmem_cache_free);
81819f0fc   Christoph Lameter   SLUB core
2375
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
2376
2377
2378
2379
   * Object placement in a slab is made very easy because we always start at
   * offset 0. If we tune the size of the object to the alignment then we can
   * get the required alignment by putting one properly sized object after
   * another.
81819f0fc   Christoph Lameter   SLUB core
2380
2381
2382
2383
   *
   * Notice that the allocation order determines the sizes of the per cpu
   * caches. Each processor has always one slab available for allocations.
   * Increasing the allocation order reduces the number of times that slabs
672bba3a4   Christoph Lameter   SLUB: update comm...
2384
   * must be moved on and off the partial lists and is therefore a factor in
81819f0fc   Christoph Lameter   SLUB core
2385
   * locking overhead.
81819f0fc   Christoph Lameter   SLUB core
2386
2387
2388
2389
2390
2391
2392
2393
2394
   */
  
  /*
   * Mininum / Maximum order of slab pages. This influences locking overhead
   * and slab fragmentation. A higher order reduces the number of partial slabs
   * and increases the number of allocations possible without having to
   * take the list_lock.
   */
  static int slub_min_order;
114e9e89e   Christoph Lameter   slub: Drop DEFAUL...
2395
  static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
9b2cd506e   Christoph Lameter   slub: Calculate m...
2396
  static int slub_min_objects;
81819f0fc   Christoph Lameter   SLUB core
2397
2398
2399
  
  /*
   * Merge control. If this is set then no merging of slab caches will occur.
672bba3a4   Christoph Lameter   SLUB: update comm...
2400
   * (Could be removed. This was introduced to pacify the merge skeptics.)
81819f0fc   Christoph Lameter   SLUB core
2401
2402
2403
2404
   */
  static int slub_nomerge;
  
  /*
81819f0fc   Christoph Lameter   SLUB core
2405
2406
   * Calculate the order of allocation given an slab object size.
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
2407
2408
2409
2410
   * The order of allocation has significant impact on performance and other
   * system components. Generally order 0 allocations should be preferred since
   * order 0 does not cause fragmentation in the page allocator. Larger objects
   * be problematic to put into order 0 slabs because there may be too much
c124f5b54   Christoph Lameter   slub: pack object...
2411
   * unused space left. We go to a higher order if more than 1/16th of the slab
672bba3a4   Christoph Lameter   SLUB: update comm...
2412
2413
2414
2415
2416
2417
   * would be wasted.
   *
   * In order to reach satisfactory performance we must ensure that a minimum
   * number of objects is in one slab. Otherwise we may generate too much
   * activity on the partial lists which requires taking the list_lock. This is
   * less a concern for large slabs though which are rarely used.
81819f0fc   Christoph Lameter   SLUB core
2418
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
2419
2420
2421
2422
   * slub_max_order specifies the order where we begin to stop considering the
   * number of objects in a slab as critical. If we reach slub_max_order then
   * we try to keep the page order as low as possible. So we accept more waste
   * of space in favor of a small page order.
81819f0fc   Christoph Lameter   SLUB core
2423
   *
672bba3a4   Christoph Lameter   SLUB: update comm...
2424
2425
2426
2427
   * Higher order allocations also allow the placement of more objects in a
   * slab and thereby reduce object handling overhead. If the user has
   * requested a higher mininum order then we start with that one instead of
   * the smallest order which will fit the object.
81819f0fc   Christoph Lameter   SLUB core
2428
   */
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2429
  static inline int slab_order(int size, int min_objects,
ab9a0f196   Lai Jiangshan   slub: automatical...
2430
  				int max_order, int fract_leftover, int reserved)
81819f0fc   Christoph Lameter   SLUB core
2431
2432
2433
  {
  	int order;
  	int rem;
6300ea750   Christoph Lameter   SLUB: ensure that...
2434
  	int min_order = slub_min_order;
81819f0fc   Christoph Lameter   SLUB core
2435

ab9a0f196   Lai Jiangshan   slub: automatical...
2436
  	if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
210b5c061   Cyrill Gorcunov   SLUB: cleanup - d...
2437
  		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
39b264641   Christoph Lameter   slub: Store max n...
2438

6300ea750   Christoph Lameter   SLUB: ensure that...
2439
  	for (order = max(min_order,
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2440
2441
  				fls(min_objects * size - 1) - PAGE_SHIFT);
  			order <= max_order; order++) {
81819f0fc   Christoph Lameter   SLUB core
2442

5e6d444ea   Christoph Lameter   SLUB: rework slab...
2443
  		unsigned long slab_size = PAGE_SIZE << order;
81819f0fc   Christoph Lameter   SLUB core
2444

ab9a0f196   Lai Jiangshan   slub: automatical...
2445
  		if (slab_size < min_objects * size + reserved)
81819f0fc   Christoph Lameter   SLUB core
2446
  			continue;
ab9a0f196   Lai Jiangshan   slub: automatical...
2447
  		rem = (slab_size - reserved) % size;
81819f0fc   Christoph Lameter   SLUB core
2448

5e6d444ea   Christoph Lameter   SLUB: rework slab...
2449
  		if (rem <= slab_size / fract_leftover)
81819f0fc   Christoph Lameter   SLUB core
2450
2451
2452
  			break;
  
  	}
672bba3a4   Christoph Lameter   SLUB: update comm...
2453

81819f0fc   Christoph Lameter   SLUB core
2454
2455
  	return order;
  }
ab9a0f196   Lai Jiangshan   slub: automatical...
2456
  static inline int calculate_order(int size, int reserved)
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2457
2458
2459
2460
  {
  	int order;
  	int min_objects;
  	int fraction;
e8120ff1f   Zhang Yanmin   SLUB: Fix default...
2461
  	int max_objects;
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
  
  	/*
  	 * Attempt to find best configuration for a slab. This
  	 * works by first attempting to generate a layout with
  	 * the best configuration and backing off gradually.
  	 *
  	 * First we reduce the acceptable waste in a slab. Then
  	 * we reduce the minimum objects required in a slab.
  	 */
  	min_objects = slub_min_objects;
9b2cd506e   Christoph Lameter   slub: Calculate m...
2472
2473
  	if (!min_objects)
  		min_objects = 4 * (fls(nr_cpu_ids) + 1);
ab9a0f196   Lai Jiangshan   slub: automatical...
2474
  	max_objects = order_objects(slub_max_order, size, reserved);
e8120ff1f   Zhang Yanmin   SLUB: Fix default...
2475
  	min_objects = min(min_objects, max_objects);
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2476
  	while (min_objects > 1) {
c124f5b54   Christoph Lameter   slub: pack object...
2477
  		fraction = 16;
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2478
2479
  		while (fraction >= 4) {
  			order = slab_order(size, min_objects,
ab9a0f196   Lai Jiangshan   slub: automatical...
2480
  					slub_max_order, fraction, reserved);
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2481
2482
2483
2484
  			if (order <= slub_max_order)
  				return order;
  			fraction /= 2;
  		}
5086c389c   Amerigo Wang   SLUB: Fix some co...
2485
  		min_objects--;
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2486
2487
2488
2489
2490
2491
  	}
  
  	/*
  	 * We were unable to place multiple objects in a slab. Now
  	 * lets see if we can place a single object there.
  	 */
ab9a0f196   Lai Jiangshan   slub: automatical...
2492
  	order = slab_order(size, 1, slub_max_order, 1, reserved);
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2493
2494
2495
2496
2497
2498
  	if (order <= slub_max_order)
  		return order;
  
  	/*
  	 * Doh this slab cannot be placed using slub_max_order.
  	 */
ab9a0f196   Lai Jiangshan   slub: automatical...
2499
  	order = slab_order(size, 1, MAX_ORDER, 1, reserved);
818cf5909   David Rientjes   slub: enforce MAX...
2500
  	if (order < MAX_ORDER)
5e6d444ea   Christoph Lameter   SLUB: rework slab...
2501
2502
2503
  		return order;
  	return -ENOSYS;
  }
81819f0fc   Christoph Lameter   SLUB core
2504
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
2505
   * Figure out what the alignment of the objects will be.
81819f0fc   Christoph Lameter   SLUB core
2506
2507
2508
2509
2510
   */
  static unsigned long calculate_alignment(unsigned long flags,
  		unsigned long align, unsigned long size)
  {
  	/*
6446faa2f   Christoph Lameter   slub: Fix up comm...
2511
2512
  	 * If the user wants hardware cache aligned objects then follow that
  	 * suggestion if the object is sufficiently large.
81819f0fc   Christoph Lameter   SLUB core
2513
  	 *
6446faa2f   Christoph Lameter   slub: Fix up comm...
2514
2515
  	 * The hardware cache alignment cannot override the specified
  	 * alignment though. If that is greater then use it.
81819f0fc   Christoph Lameter   SLUB core
2516
  	 */
b62103867   Nick Piggin   slub: Do not cros...
2517
2518
2519
2520
2521
2522
  	if (flags & SLAB_HWCACHE_ALIGN) {
  		unsigned long ralign = cache_line_size();
  		while (size <= ralign / 2)
  			ralign /= 2;
  		align = max(align, ralign);
  	}
81819f0fc   Christoph Lameter   SLUB core
2523
2524
  
  	if (align < ARCH_SLAB_MINALIGN)
b62103867   Nick Piggin   slub: Do not cros...
2525
  		align = ARCH_SLAB_MINALIGN;
81819f0fc   Christoph Lameter   SLUB core
2526
2527
2528
  
  	return ALIGN(align, sizeof(void *));
  }
5595cffc8   Pekka Enberg   SLUB: dynamic per...
2529
2530
  static void
  init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
81819f0fc   Christoph Lameter   SLUB core
2531
2532
  {
  	n->nr_partial = 0;
81819f0fc   Christoph Lameter   SLUB core
2533
2534
  	spin_lock_init(&n->list_lock);
  	INIT_LIST_HEAD(&n->partial);
8ab1372fa   Christoph Lameter   SLUB: Fix CONFIG_...
2535
  #ifdef CONFIG_SLUB_DEBUG
0f389ec63   Christoph Lameter   slub: No need for...
2536
  	atomic_long_set(&n->nr_slabs, 0);
02b71b701   Salman Qazi   slub: fixed unini...
2537
  	atomic_long_set(&n->total_objects, 0);
643b11384   Christoph Lameter   slub: enable trac...
2538
  	INIT_LIST_HEAD(&n->full);
8ab1372fa   Christoph Lameter   SLUB: Fix CONFIG_...
2539
  #endif
81819f0fc   Christoph Lameter   SLUB core
2540
  }
55136592f   Christoph Lameter   slub: Remove dyna...
2541
  static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2542
  {
6c182dc0d   Christoph Lameter   slub: Remove stat...
2543
2544
  	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
  			SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2545

8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2546
  	/*
d4d84fef6   Chris Metcalf   slub: always alig...
2547
2548
  	 * Must align to double word boundary for the double cmpxchg
  	 * instructions to work; see __pcpu_double_call_return_bool().
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2549
  	 */
d4d84fef6   Chris Metcalf   slub: always alig...
2550
2551
  	s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
  				     2 * sizeof(void *));
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2552
2553
2554
2555
2556
  
  	if (!s->cpu_slab)
  		return 0;
  
  	init_kmem_cache_cpus(s);
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2557

8a5ec0ba4   Christoph Lameter   Lockless (and pre...
2558
  	return 1;
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2559
  }
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2560

51df11428   Christoph Lameter   slub: Dynamically...
2561
  static struct kmem_cache *kmem_cache_node;
81819f0fc   Christoph Lameter   SLUB core
2562
2563
2564
2565
2566
2567
  /*
   * No kmalloc_node yet so do it by hand. We know that this is the first
   * slab on the node for this slabcache. There are no concurrent accesses
   * possible.
   *
   * Note that this function only works on the kmalloc_node_cache
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2568
2569
   * when allocating for the kmalloc_node_cache. This is used for bootstrapping
   * memory on a fresh node that has no slab structures yet.
81819f0fc   Christoph Lameter   SLUB core
2570
   */
55136592f   Christoph Lameter   slub: Remove dyna...
2571
  static void early_kmem_cache_node_alloc(int node)
81819f0fc   Christoph Lameter   SLUB core
2572
2573
2574
  {
  	struct page *page;
  	struct kmem_cache_node *n;
51df11428   Christoph Lameter   slub: Dynamically...
2575
  	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
81819f0fc   Christoph Lameter   SLUB core
2576

51df11428   Christoph Lameter   slub: Dynamically...
2577
  	page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
81819f0fc   Christoph Lameter   SLUB core
2578
2579
  
  	BUG_ON(!page);
a2f92ee7e   Christoph Lameter   SLUB: do not fail...
2580
2581
2582
2583
2584
2585
2586
2587
  	if (page_to_nid(page) != node) {
  		printk(KERN_ERR "SLUB: Unable to allocate memory from "
  				"node %d
  ", node);
  		printk(KERN_ERR "SLUB: Allocating a useless per node structure "
  				"in order to be able to continue
  ");
  	}
81819f0fc   Christoph Lameter   SLUB core
2588
2589
  	n = page->freelist;
  	BUG_ON(!n);
51df11428   Christoph Lameter   slub: Dynamically...
2590
  	page->freelist = get_freepointer(kmem_cache_node, n);
81819f0fc   Christoph Lameter   SLUB core
2591
  	page->inuse++;
8cb0a5068   Christoph Lameter   slub: Move page->...
2592
  	page->frozen = 0;
51df11428   Christoph Lameter   slub: Dynamically...
2593
  	kmem_cache_node->node[node] = n;
8ab1372fa   Christoph Lameter   SLUB: Fix CONFIG_...
2594
  #ifdef CONFIG_SLUB_DEBUG
f7cb19336   Christoph Lameter   SLUB: Pass active...
2595
  	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
51df11428   Christoph Lameter   slub: Dynamically...
2596
  	init_tracking(kmem_cache_node, n);
8ab1372fa   Christoph Lameter   SLUB: Fix CONFIG_...
2597
  #endif
51df11428   Christoph Lameter   slub: Dynamically...
2598
2599
  	init_kmem_cache_node(n, kmem_cache_node);
  	inc_slabs_node(kmem_cache_node, node, page->objects);
6446faa2f   Christoph Lameter   slub: Fix up comm...
2600

7c2e132c5   Christoph Lameter   Add parameter to ...
2601
  	add_partial(n, page, 0);
81819f0fc   Christoph Lameter   SLUB core
2602
2603
2604
2605
2606
  }
  
  static void free_kmem_cache_nodes(struct kmem_cache *s)
  {
  	int node;
f64dc58c5   Christoph Lameter   Memoryless nodes:...
2607
  	for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0fc   Christoph Lameter   SLUB core
2608
  		struct kmem_cache_node *n = s->node[node];
51df11428   Christoph Lameter   slub: Dynamically...
2609

73367bd8e   Alexander Duyck   slub: move kmem_c...
2610
  		if (n)
51df11428   Christoph Lameter   slub: Dynamically...
2611
  			kmem_cache_free(kmem_cache_node, n);
81819f0fc   Christoph Lameter   SLUB core
2612
2613
2614
  		s->node[node] = NULL;
  	}
  }
55136592f   Christoph Lameter   slub: Remove dyna...
2615
  static int init_kmem_cache_nodes(struct kmem_cache *s)
81819f0fc   Christoph Lameter   SLUB core
2616
2617
  {
  	int node;
81819f0fc   Christoph Lameter   SLUB core
2618

f64dc58c5   Christoph Lameter   Memoryless nodes:...
2619
  	for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0fc   Christoph Lameter   SLUB core
2620
  		struct kmem_cache_node *n;
73367bd8e   Alexander Duyck   slub: move kmem_c...
2621
  		if (slab_state == DOWN) {
55136592f   Christoph Lameter   slub: Remove dyna...
2622
  			early_kmem_cache_node_alloc(node);
73367bd8e   Alexander Duyck   slub: move kmem_c...
2623
2624
  			continue;
  		}
51df11428   Christoph Lameter   slub: Dynamically...
2625
  		n = kmem_cache_alloc_node(kmem_cache_node,
55136592f   Christoph Lameter   slub: Remove dyna...
2626
  						GFP_KERNEL, node);
81819f0fc   Christoph Lameter   SLUB core
2627

73367bd8e   Alexander Duyck   slub: move kmem_c...
2628
2629
2630
  		if (!n) {
  			free_kmem_cache_nodes(s);
  			return 0;
81819f0fc   Christoph Lameter   SLUB core
2631
  		}
73367bd8e   Alexander Duyck   slub: move kmem_c...
2632

81819f0fc   Christoph Lameter   SLUB core
2633
  		s->node[node] = n;
5595cffc8   Pekka Enberg   SLUB: dynamic per...
2634
  		init_kmem_cache_node(n, s);
81819f0fc   Christoph Lameter   SLUB core
2635
2636
2637
  	}
  	return 1;
  }
81819f0fc   Christoph Lameter   SLUB core
2638

c0bdb232b   David Rientjes   slub: rename calc...
2639
  static void set_min_partial(struct kmem_cache *s, unsigned long min)
3b89d7d88   David Rientjes   slub: move min_pa...
2640
2641
2642
2643
2644
2645
2646
  {
  	if (min < MIN_PARTIAL)
  		min = MIN_PARTIAL;
  	else if (min > MAX_PARTIAL)
  		min = MAX_PARTIAL;
  	s->min_partial = min;
  }
81819f0fc   Christoph Lameter   SLUB core
2647
2648
2649
2650
  /*
   * calculate_sizes() determines the order and the distribution of data within
   * a slab object.
   */
06b285dc3   Christoph Lameter   slub: Make the or...
2651
  static int calculate_sizes(struct kmem_cache *s, int forced_order)
81819f0fc   Christoph Lameter   SLUB core
2652
2653
2654
2655
  {
  	unsigned long flags = s->flags;
  	unsigned long size = s->objsize;
  	unsigned long align = s->align;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
2656
  	int order;
81819f0fc   Christoph Lameter   SLUB core
2657
2658
  
  	/*
d8b42bf54   Christoph Lameter   slub: Rearrange #...
2659
2660
2661
2662
2663
2664
2665
2666
  	 * Round up object size to the next word boundary. We can only
  	 * place the free pointer at word boundaries and this determines
  	 * the possible location of the free pointer.
  	 */
  	size = ALIGN(size, sizeof(void *));
  
  #ifdef CONFIG_SLUB_DEBUG
  	/*
81819f0fc   Christoph Lameter   SLUB core
2667
2668
2669
2670
2671
  	 * Determine if we can poison the object itself. If the user of
  	 * the slab may touch the object after free or before allocation
  	 * then we should never poison the object itself.
  	 */
  	if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
c59def9f2   Christoph Lameter   Slab allocators: ...
2672
  			!s->ctor)
81819f0fc   Christoph Lameter   SLUB core
2673
2674
2675
  		s->flags |= __OBJECT_POISON;
  	else
  		s->flags &= ~__OBJECT_POISON;
81819f0fc   Christoph Lameter   SLUB core
2676
2677
  
  	/*
672bba3a4   Christoph Lameter   SLUB: update comm...
2678
  	 * If we are Redzoning then check if there is some space between the
81819f0fc   Christoph Lameter   SLUB core
2679
  	 * end of the object and the free pointer. If not then add an
672bba3a4   Christoph Lameter   SLUB: update comm...
2680
  	 * additional word to have some bytes to store Redzone information.
81819f0fc   Christoph Lameter   SLUB core
2681
2682
2683
  	 */
  	if ((flags & SLAB_RED_ZONE) && size == s->objsize)
  		size += sizeof(void *);
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
2684
  #endif
81819f0fc   Christoph Lameter   SLUB core
2685
2686
  
  	/*
672bba3a4   Christoph Lameter   SLUB: update comm...
2687
2688
  	 * With that we have determined the number of bytes in actual use
  	 * by the object. This is the potential offset to the free pointer.
81819f0fc   Christoph Lameter   SLUB core
2689
2690
2691
2692
  	 */
  	s->inuse = size;
  
  	if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
c59def9f2   Christoph Lameter   Slab allocators: ...
2693
  		s->ctor)) {
81819f0fc   Christoph Lameter   SLUB core
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
  		/*
  		 * Relocate free pointer after the object if it is not
  		 * permitted to overwrite the first word of the object on
  		 * kmem_cache_free.
  		 *
  		 * This is the case if we do RCU, have a constructor or
  		 * destructor or are poisoning the objects.
  		 */
  		s->offset = size;
  		size += sizeof(void *);
  	}
c12b3c625   Christoph Lameter   SLUB Debug: Fix o...
2705
  #ifdef CONFIG_SLUB_DEBUG
81819f0fc   Christoph Lameter   SLUB core
2706
2707
2708
2709
2710
2711
  	if (flags & SLAB_STORE_USER)
  		/*
  		 * Need to store information about allocs and frees after
  		 * the object.
  		 */
  		size += 2 * sizeof(struct track);
be7b3fbce   Christoph Lameter   SLUB: after objec...
2712
  	if (flags & SLAB_RED_ZONE)
81819f0fc   Christoph Lameter   SLUB core
2713
2714
2715
2716
  		/*
  		 * Add some empty padding so that we can catch
  		 * overwrites from earlier objects rather than let
  		 * tracking information or the free pointer be
0211a9c85   Frederik Schwarzer   trivial: fix an -...
2717
  		 * corrupted if a user writes before the start
81819f0fc   Christoph Lameter   SLUB core
2718
2719
2720
  		 * of the object.
  		 */
  		size += sizeof(void *);
41ecc55b8   Christoph Lameter   SLUB: add CONFIG_...
2721
  #endif
672bba3a4   Christoph Lameter   SLUB: update comm...
2722

81819f0fc   Christoph Lameter   SLUB core
2723
2724
  	/*
  	 * Determine the alignment based on various parameters that the
65c02d4cf   Christoph Lameter   SLUB: add support...
2725
2726
  	 * user specified and the dynamic determination of cache line size
  	 * on bootup.
81819f0fc   Christoph Lameter   SLUB core
2727
2728
  	 */
  	align = calculate_alignment(flags, align, s->objsize);
dcb0ce1bd   Zhang, Yanmin   slub: change kmem...
2729
  	s->align = align;
81819f0fc   Christoph Lameter   SLUB core
2730
2731
2732
2733
2734
2735
2736
2737
  
  	/*
  	 * SLUB stores one object immediately after another beginning from
  	 * offset 0. In order to align the objects we have to simply size
  	 * each object to conform to the alignment.
  	 */
  	size = ALIGN(size, align);
  	s->size = size;
06b285dc3   Christoph Lameter   slub: Make the or...
2738
2739
2740
  	if (forced_order >= 0)
  		order = forced_order;
  	else
ab9a0f196   Lai Jiangshan   slub: automatical...
2741
  		order = calculate_order(size, s->reserved);
81819f0fc   Christoph Lameter   SLUB core
2742

834f3d119   Christoph Lameter   slub: Add kmem_ca...
2743
  	if (order < 0)
81819f0fc   Christoph Lameter   SLUB core
2744
  		return 0;
b7a49f0d4   Christoph Lameter   slub: Determine g...
2745
  	s->allocflags = 0;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
2746
  	if (order)
b7a49f0d4   Christoph Lameter   slub: Determine g...
2747
2748
2749
2750
2751
2752
2753
  		s->allocflags |= __GFP_COMP;
  
  	if (s->flags & SLAB_CACHE_DMA)
  		s->allocflags |= SLUB_DMA;
  
  	if (s->flags & SLAB_RECLAIM_ACCOUNT)
  		s->allocflags |= __GFP_RECLAIMABLE;
81819f0fc   Christoph Lameter   SLUB core
2754
2755
2756
  	/*
  	 * Determine the number of objects per slab
  	 */
ab9a0f196   Lai Jiangshan   slub: automatical...
2757
2758
  	s->oo = oo_make(order, size, s->reserved);
  	s->min = oo_make(get_order(size), size, s->reserved);
205ab99dd   Christoph Lameter   slub: Update stat...
2759
2760
  	if (oo_objects(s->oo) > oo_objects(s->max))
  		s->max = s->oo;
81819f0fc   Christoph Lameter   SLUB core
2761

834f3d119   Christoph Lameter   slub: Add kmem_ca...
2762
  	return !!oo_objects(s->oo);
81819f0fc   Christoph Lameter   SLUB core
2763
2764
  
  }
55136592f   Christoph Lameter   slub: Remove dyna...
2765
  static int kmem_cache_open(struct kmem_cache *s,
81819f0fc   Christoph Lameter   SLUB core
2766
2767
  		const char *name, size_t size,
  		size_t align, unsigned long flags,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
2768
  		void (*ctor)(void *))
81819f0fc   Christoph Lameter   SLUB core
2769
2770
2771
2772
  {
  	memset(s, 0, kmem_size);
  	s->name = name;
  	s->ctor = ctor;
81819f0fc   Christoph Lameter   SLUB core
2773
  	s->objsize = size;
81819f0fc   Christoph Lameter   SLUB core
2774
  	s->align = align;
ba0268a8b   Christoph Lameter   SLUB: accurately ...
2775
  	s->flags = kmem_cache_flags(size, flags, name, ctor);
ab9a0f196   Lai Jiangshan   slub: automatical...
2776
  	s->reserved = 0;
81819f0fc   Christoph Lameter   SLUB core
2777

da9a638c6   Lai Jiangshan   slub,rcu: don't a...
2778
2779
  	if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
  		s->reserved = sizeof(struct rcu_head);
81819f0fc   Christoph Lameter   SLUB core
2780

06b285dc3   Christoph Lameter   slub: Make the or...
2781
  	if (!calculate_sizes(s, -1))
81819f0fc   Christoph Lameter   SLUB core
2782
  		goto error;
3de472138   David Rientjes   slub: use size an...
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
  	if (disable_higher_order_debug) {
  		/*
  		 * Disable debugging flags that store metadata if the min slab
  		 * order increased.
  		 */
  		if (get_order(s->size) > get_order(s->objsize)) {
  			s->flags &= ~DEBUG_METADATA_FLAGS;
  			s->offset = 0;
  			if (!calculate_sizes(s, -1))
  				goto error;
  		}
  	}
81819f0fc   Christoph Lameter   SLUB core
2795

b789ef518   Christoph Lameter   slub: Add cmpxchg...
2796
2797
2798
2799
2800
  #ifdef CONFIG_CMPXCHG_DOUBLE
  	if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
  		/* Enable fast mode */
  		s->flags |= __CMPXCHG_DOUBLE;
  #endif
3b89d7d88   David Rientjes   slub: move min_pa...
2801
2802
2803
2804
  	/*
  	 * The larger the object size is, the more pages we want on the partial
  	 * list to avoid pounding the page allocator excessively.
  	 */
c0bdb232b   David Rientjes   slub: rename calc...
2805
  	set_min_partial(s, ilog2(s->size));
81819f0fc   Christoph Lameter   SLUB core
2806
2807
  	s->refcount = 1;
  #ifdef CONFIG_NUMA
e2cb96b7e   Christoph Lameter   slub: Disable NUM...
2808
  	s->remote_node_defrag_ratio = 1000;
81819f0fc   Christoph Lameter   SLUB core
2809
  #endif
55136592f   Christoph Lameter   slub: Remove dyna...
2810
  	if (!init_kmem_cache_nodes(s))
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
2811
  		goto error;
81819f0fc   Christoph Lameter   SLUB core
2812

55136592f   Christoph Lameter   slub: Remove dyna...
2813
  	if (alloc_kmem_cache_cpus(s))
81819f0fc   Christoph Lameter   SLUB core
2814
  		return 1;
ff12059ed   Christoph Lameter   SLUB: this_cpu: R...
2815

4c93c355d   Christoph Lameter   SLUB: Place kmem_...
2816
  	free_kmem_cache_nodes(s);
81819f0fc   Christoph Lameter   SLUB core
2817
2818
2819
2820
2821
  error:
  	if (flags & SLAB_PANIC)
  		panic("Cannot create slab %s size=%lu realsize=%u "
  			"order=%u offset=%u flags=%lx
  ",
834f3d119   Christoph Lameter   slub: Add kmem_ca...
2822
  			s->name, (unsigned long)size, s->size, oo_order(s->oo),
81819f0fc   Christoph Lameter   SLUB core
2823
2824
2825
  			s->offset, flags);
  	return 0;
  }
81819f0fc   Christoph Lameter   SLUB core
2826
2827
  
  /*
81819f0fc   Christoph Lameter   SLUB core
2828
2829
2830
2831
2832
2833
2834
   * Determine the size of a slab object
   */
  unsigned int kmem_cache_size(struct kmem_cache *s)
  {
  	return s->objsize;
  }
  EXPORT_SYMBOL(kmem_cache_size);
33b12c381   Christoph Lameter   slub: Dump list o...
2835
2836
2837
2838
2839
2840
  static void list_slab_objects(struct kmem_cache *s, struct page *page,
  							const char *text)
  {
  #ifdef CONFIG_SLUB_DEBUG
  	void *addr = page_address(page);
  	void *p;
a5dd5c117   Namhyung Kim   slub: Fix signedn...
2841
2842
  	unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
  				     sizeof(long), GFP_ATOMIC);
bbd7d57bf   Eric Dumazet   slub: Potential s...
2843
2844
  	if (!map)
  		return;
33b12c381   Christoph Lameter   slub: Dump list o...
2845
2846
  	slab_err(s, page, "%s", text);
  	slab_lock(page);
33b12c381   Christoph Lameter   slub: Dump list o...
2847

5f80b13ae   Christoph Lameter   slub: get_map() f...
2848
  	get_map(s, page, map);
33b12c381   Christoph Lameter   slub: Dump list o...
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
  	for_each_object(p, s, addr, page->objects) {
  
  		if (!test_bit(slab_index(p, s, addr), map)) {
  			printk(KERN_ERR "INFO: Object 0x%p @offset=%tu
  ",
  							p, p - addr);
  			print_tracking(s, p);
  		}
  	}
  	slab_unlock(page);
bbd7d57bf   Eric Dumazet   slub: Potential s...
2859
  	kfree(map);
33b12c381   Christoph Lameter   slub: Dump list o...
2860
2861
  #endif
  }
81819f0fc   Christoph Lameter   SLUB core
2862
  /*
599870b17   Christoph Lameter   slub: free_list()...
2863
   * Attempt to free all partial slabs on a node.
81819f0fc   Christoph Lameter   SLUB core
2864
   */
599870b17   Christoph Lameter   slub: free_list()...
2865
  static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
81819f0fc   Christoph Lameter   SLUB core
2866
  {
81819f0fc   Christoph Lameter   SLUB core
2867
2868
2869
2870
  	unsigned long flags;
  	struct page *page, *h;
  
  	spin_lock_irqsave(&n->list_lock, flags);
33b12c381   Christoph Lameter   slub: Dump list o...
2871
  	list_for_each_entry_safe(page, h, &n->partial, lru) {
81819f0fc   Christoph Lameter   SLUB core
2872
  		if (!page->inuse) {
5cc6eee8a   Christoph Lameter   slub: explicit li...
2873
  			remove_partial(n, page);
81819f0fc   Christoph Lameter   SLUB core
2874
  			discard_slab(s, page);
33b12c381   Christoph Lameter   slub: Dump list o...
2875
2876
2877
  		} else {
  			list_slab_objects(s, page,
  				"Objects remaining on kmem_cache_close()");
599870b17   Christoph Lameter   slub: free_list()...
2878
  		}
33b12c381   Christoph Lameter   slub: Dump list o...
2879
  	}
81819f0fc   Christoph Lameter   SLUB core
2880
  	spin_unlock_irqrestore(&n->list_lock, flags);
81819f0fc   Christoph Lameter   SLUB core
2881
2882
2883
  }
  
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
2884
   * Release all resources used by a slab cache.
81819f0fc   Christoph Lameter   SLUB core
2885
   */
0c7100132   Christoph Lameter   SLUB: add some mo...
2886
  static inline int kmem_cache_close(struct kmem_cache *s)
81819f0fc   Christoph Lameter   SLUB core
2887
2888
2889
2890
  {
  	int node;
  
  	flush_all(s);
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
2891
  	free_percpu(s->cpu_slab);
81819f0fc   Christoph Lameter   SLUB core
2892
  	/* Attempt to free all objects */
f64dc58c5   Christoph Lameter   Memoryless nodes:...
2893
  	for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0fc   Christoph Lameter   SLUB core
2894
  		struct kmem_cache_node *n = get_node(s, node);
599870b17   Christoph Lameter   slub: free_list()...
2895
2896
  		free_partial(s, n);
  		if (n->nr_partial || slabs_node(s, node))
81819f0fc   Christoph Lameter   SLUB core
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
  			return 1;
  	}
  	free_kmem_cache_nodes(s);
  	return 0;
  }
  
  /*
   * Close a cache and release the kmem_cache structure
   * (must be used for caches created using kmem_cache_create)
   */
  void kmem_cache_destroy(struct kmem_cache *s)
  {
  	down_write(&slub_lock);
  	s->refcount--;
  	if (!s->refcount) {
  		list_del(&s->list);
d629d8195   Pekka Enberg   slub: improve kme...
2913
2914
2915
2916
2917
2918
  		if (kmem_cache_close(s)) {
  			printk(KERN_ERR "SLUB %s: %s called for cache that "
  				"still has objects.
  ", s->name, __func__);
  			dump_stack();
  		}
d76b1590e   Eric Dumazet   slub: Fix kmem_ca...
2919
2920
  		if (s->flags & SLAB_DESTROY_BY_RCU)
  			rcu_barrier();
81819f0fc   Christoph Lameter   SLUB core
2921
  		sysfs_slab_remove(s);
2bce64858   Christoph Lameter   slub: Allow remov...
2922
2923
  	}
  	up_write(&slub_lock);
81819f0fc   Christoph Lameter   SLUB core
2924
2925
2926
2927
2928
2929
  }
  EXPORT_SYMBOL(kmem_cache_destroy);
  
  /********************************************************************
   *		Kmalloc subsystem
   *******************************************************************/
51df11428   Christoph Lameter   slub: Dynamically...
2930
  struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
81819f0fc   Christoph Lameter   SLUB core
2931
  EXPORT_SYMBOL(kmalloc_caches);
51df11428   Christoph Lameter   slub: Dynamically...
2932
  static struct kmem_cache *kmem_cache;
55136592f   Christoph Lameter   slub: Remove dyna...
2933
  #ifdef CONFIG_ZONE_DMA
51df11428   Christoph Lameter   slub: Dynamically...
2934
  static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
55136592f   Christoph Lameter   slub: Remove dyna...
2935
  #endif
81819f0fc   Christoph Lameter   SLUB core
2936
2937
  static int __init setup_slub_min_order(char *str)
  {
064287807   Pekka Enberg   SLUB: Fix coding ...
2938
  	get_option(&str, &slub_min_order);
81819f0fc   Christoph Lameter   SLUB core
2939
2940
2941
2942
2943
2944
2945
2946
  
  	return 1;
  }
  
  __setup("slub_min_order=", setup_slub_min_order);
  
  static int __init setup_slub_max_order(char *str)
  {
064287807   Pekka Enberg   SLUB: Fix coding ...
2947
  	get_option(&str, &slub_max_order);
818cf5909   David Rientjes   slub: enforce MAX...
2948
  	slub_max_order = min(slub_max_order, MAX_ORDER - 1);
81819f0fc   Christoph Lameter   SLUB core
2949
2950
2951
2952
2953
2954
2955
2956
  
  	return 1;
  }
  
  __setup("slub_max_order=", setup_slub_max_order);
  
  static int __init setup_slub_min_objects(char *str)
  {
064287807   Pekka Enberg   SLUB: Fix coding ...
2957
  	get_option(&str, &slub_min_objects);
81819f0fc   Christoph Lameter   SLUB core
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
  
  	return 1;
  }
  
  __setup("slub_min_objects=", setup_slub_min_objects);
  
  static int __init setup_slub_nomerge(char *str)
  {
  	slub_nomerge = 1;
  	return 1;
  }
  
  __setup("slub_nomerge", setup_slub_nomerge);
51df11428   Christoph Lameter   slub: Dynamically...
2971
2972
  static struct kmem_cache *__init create_kmalloc_cache(const char *name,
  						int size, unsigned int flags)
81819f0fc   Christoph Lameter   SLUB core
2973
  {
51df11428   Christoph Lameter   slub: Dynamically...
2974
2975
2976
  	struct kmem_cache *s;
  
  	s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
83b519e8b   Pekka Enberg   slab: setup alloc...
2977
2978
2979
2980
  	/*
  	 * This function is called with IRQs disabled during early-boot on
  	 * single CPU so there's no need to take slub_lock here.
  	 */
55136592f   Christoph Lameter   slub: Remove dyna...
2981
  	if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
319d1e240   Christoph Lameter   slub: Drop fallba...
2982
  								flags, NULL))
81819f0fc   Christoph Lameter   SLUB core
2983
2984
2985
  		goto panic;
  
  	list_add(&s->list, &slab_caches);
51df11428   Christoph Lameter   slub: Dynamically...
2986
  	return s;
81819f0fc   Christoph Lameter   SLUB core
2987
2988
2989
2990
  
  panic:
  	panic("Creation of kmalloc slab %s size=%d failed.
  ", name, size);
51df11428   Christoph Lameter   slub: Dynamically...
2991
  	return NULL;
81819f0fc   Christoph Lameter   SLUB core
2992
  }
f1b263393   Christoph Lameter   SLUB: faster more...
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
  /*
   * Conversion table for small slabs sizes / 8 to the index in the
   * kmalloc array. This is necessary for slabs < 192 since we have non power
   * of two cache sizes there. The size of larger slabs can be determined using
   * fls.
   */
  static s8 size_index[24] = {
  	3,	/* 8 */
  	4,	/* 16 */
  	5,	/* 24 */
  	5,	/* 32 */
  	6,	/* 40 */
  	6,	/* 48 */
  	6,	/* 56 */
  	6,	/* 64 */
  	1,	/* 72 */
  	1,	/* 80 */
  	1,	/* 88 */
  	1,	/* 96 */
  	7,	/* 104 */
  	7,	/* 112 */
  	7,	/* 120 */
  	7,	/* 128 */
  	2,	/* 136 */
  	2,	/* 144 */
  	2,	/* 152 */
  	2,	/* 160 */
  	2,	/* 168 */
  	2,	/* 176 */
  	2,	/* 184 */
  	2	/* 192 */
  };
acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
3025
3026
3027
3028
  static inline int size_index_elem(size_t bytes)
  {
  	return (bytes - 1) / 8;
  }
81819f0fc   Christoph Lameter   SLUB core
3029
3030
  static struct kmem_cache *get_slab(size_t size, gfp_t flags)
  {
f1b263393   Christoph Lameter   SLUB: faster more...
3031
  	int index;
81819f0fc   Christoph Lameter   SLUB core
3032

f1b263393   Christoph Lameter   SLUB: faster more...
3033
3034
3035
  	if (size <= 192) {
  		if (!size)
  			return ZERO_SIZE_PTR;
81819f0fc   Christoph Lameter   SLUB core
3036

acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
3037
  		index = size_index[size_index_elem(size)];
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3038
  	} else
f1b263393   Christoph Lameter   SLUB: faster more...
3039
  		index = fls(size - 1);
81819f0fc   Christoph Lameter   SLUB core
3040
3041
  
  #ifdef CONFIG_ZONE_DMA
f1b263393   Christoph Lameter   SLUB: faster more...
3042
  	if (unlikely((flags & SLUB_DMA)))
51df11428   Christoph Lameter   slub: Dynamically...
3043
  		return kmalloc_dma_caches[index];
f1b263393   Christoph Lameter   SLUB: faster more...
3044

81819f0fc   Christoph Lameter   SLUB core
3045
  #endif
51df11428   Christoph Lameter   slub: Dynamically...
3046
  	return kmalloc_caches[index];
81819f0fc   Christoph Lameter   SLUB core
3047
3048
3049
3050
  }
  
  void *__kmalloc(size_t size, gfp_t flags)
  {
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3051
  	struct kmem_cache *s;
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3052
  	void *ret;
81819f0fc   Christoph Lameter   SLUB core
3053

ffadd4d0f   Christoph Lameter   SLUB: Introduce a...
3054
  	if (unlikely(size > SLUB_MAX_SIZE))
eada35efc   Pekka Enberg   slub: kmalloc pag...
3055
  		return kmalloc_large(size, flags);
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3056
3057
3058
3059
  
  	s = get_slab(size, flags);
  
  	if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f9132   Christoph Lameter   Slab allocators: ...
3060
  		return s;
2154a3363   Christoph Lameter   slub: Use a const...
3061
  	ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3062

ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
3063
  	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3064
3065
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
3066
3067
  }
  EXPORT_SYMBOL(__kmalloc);
5d1f57e4d   Namhyung Kim   slub: Move NUMA-r...
3068
  #ifdef CONFIG_NUMA
f619cfe1b   Christoph Lameter   slub: Add kmalloc...
3069
3070
  static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
  {
b1eeab676   Vegard Nossum   kmemcheck: add ho...
3071
  	struct page *page;
e4f7c0b44   Catalin Marinas   kmemleak: Trace t...
3072
  	void *ptr = NULL;
f619cfe1b   Christoph Lameter   slub: Add kmalloc...
3073

b1eeab676   Vegard Nossum   kmemcheck: add ho...
3074
3075
  	flags |= __GFP_COMP | __GFP_NOTRACK;
  	page = alloc_pages_node(node, flags, get_order(size));
f619cfe1b   Christoph Lameter   slub: Add kmalloc...
3076
  	if (page)
e4f7c0b44   Catalin Marinas   kmemleak: Trace t...
3077
3078
3079
3080
  		ptr = page_address(page);
  
  	kmemleak_alloc(ptr, size, 1, flags);
  	return ptr;
f619cfe1b   Christoph Lameter   slub: Add kmalloc...
3081
  }
81819f0fc   Christoph Lameter   SLUB core
3082
3083
  void *__kmalloc_node(size_t size, gfp_t flags, int node)
  {
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3084
  	struct kmem_cache *s;
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3085
  	void *ret;
81819f0fc   Christoph Lameter   SLUB core
3086

057685cf5   Ingo Molnar   Merge branch 'for...
3087
  	if (unlikely(size > SLUB_MAX_SIZE)) {
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3088
  		ret = kmalloc_large_node(size, flags, node);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
3089
3090
3091
  		trace_kmalloc_node(_RET_IP_, ret,
  				   size, PAGE_SIZE << get_order(size),
  				   flags, node);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3092
3093
3094
  
  		return ret;
  	}
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3095
3096
3097
3098
  
  	s = get_slab(size, flags);
  
  	if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f9132   Christoph Lameter   Slab allocators: ...
3099
  		return s;
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3100
  	ret = slab_alloc(s, flags, node, _RET_IP_);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
3101
  	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
5b882be4e   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3102
3103
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
3104
3105
3106
3107
3108
3109
  }
  EXPORT_SYMBOL(__kmalloc_node);
  #endif
  
  size_t ksize(const void *object)
  {
272c1d21d   Christoph Lameter   SLUB: return ZERO...
3110
  	struct page *page;
81819f0fc   Christoph Lameter   SLUB core
3111

ef8b4520b   Christoph Lameter   Slab allocators: ...
3112
  	if (unlikely(object == ZERO_SIZE_PTR))
272c1d21d   Christoph Lameter   SLUB: return ZERO...
3113
  		return 0;
294a80a8e   Vegard Nossum   SLUB's ksize() fa...
3114
  	page = virt_to_head_page(object);
294a80a8e   Vegard Nossum   SLUB's ksize() fa...
3115

76994412f   Pekka Enberg   slub: ksize() abu...
3116
3117
  	if (unlikely(!PageSlab(page))) {
  		WARN_ON(!PageCompound(page));
294a80a8e   Vegard Nossum   SLUB's ksize() fa...
3118
  		return PAGE_SIZE << compound_order(page);
76994412f   Pekka Enberg   slub: ksize() abu...
3119
  	}
81819f0fc   Christoph Lameter   SLUB core
3120

b3d41885d   Eric Dumazet   slub: fix kmemche...
3121
  	return slab_ksize(page->slab);
81819f0fc   Christoph Lameter   SLUB core
3122
  }
b1aabecd5   Kirill A. Shutemov   mm: Export symbol...
3123
  EXPORT_SYMBOL(ksize);
81819f0fc   Christoph Lameter   SLUB core
3124

d18a90dd8   Ben Greear   slub: Add method ...
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
  #ifdef CONFIG_SLUB_DEBUG
  bool verify_mem_not_deleted(const void *x)
  {
  	struct page *page;
  	void *object = (void *)x;
  	unsigned long flags;
  	bool rv;
  
  	if (unlikely(ZERO_OR_NULL_PTR(x)))
  		return false;
  
  	local_irq_save(flags);
  
  	page = virt_to_head_page(x);
  	if (unlikely(!PageSlab(page))) {
  		/* maybe it was from stack? */
  		rv = true;
  		goto out_unlock;
  	}
  
  	slab_lock(page);
  	if (on_freelist(page->slab, page, object)) {
  		object_err(page->slab, page, object, "Object is on free-list");
  		rv = false;
  	} else {
  		rv = true;
  	}
  	slab_unlock(page);
  
  out_unlock:
  	local_irq_restore(flags);
  	return rv;
  }
  EXPORT_SYMBOL(verify_mem_not_deleted);
  #endif
81819f0fc   Christoph Lameter   SLUB core
3160
3161
  void kfree(const void *x)
  {
81819f0fc   Christoph Lameter   SLUB core
3162
  	struct page *page;
5bb983b0c   Christoph Lameter   SLUB: Deal with a...
3163
  	void *object = (void *)x;
81819f0fc   Christoph Lameter   SLUB core
3164

2121db74b   Pekka Enberg   kmemtrace: trace ...
3165
  	trace_kfree(_RET_IP_, x);
2408c5503   Satyam Sharma   {slub, slob}: use...
3166
  	if (unlikely(ZERO_OR_NULL_PTR(x)))
81819f0fc   Christoph Lameter   SLUB core
3167
  		return;
b49af68ff   Christoph Lameter   Add virt_to_head_...
3168
  	page = virt_to_head_page(x);
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3169
  	if (unlikely(!PageSlab(page))) {
0937502af   Christoph Lameter   slub: Add check f...
3170
  		BUG_ON(!PageCompound(page));
e4f7c0b44   Catalin Marinas   kmemleak: Trace t...
3171
  		kmemleak_free(x);
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3172
3173
3174
  		put_page(page);
  		return;
  	}
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
3175
  	slab_free(page->slab, page, object, _RET_IP_);
81819f0fc   Christoph Lameter   SLUB core
3176
3177
  }
  EXPORT_SYMBOL(kfree);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3178
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
3179
3180
3181
3182
3183
3184
3185
3186
   * kmem_cache_shrink removes empty slabs from the partial lists and sorts
   * the remaining slabs by the number of items in use. The slabs with the
   * most items in use come first. New allocations will then fill those up
   * and thus they can be removed from the partial lists.
   *
   * The slabs with the least items are placed last. This results in them
   * being allocated from last increasing the chance that the last objects
   * are freed in them.
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3187
3188
3189
3190
3191
3192
3193
3194
   */
  int kmem_cache_shrink(struct kmem_cache *s)
  {
  	int node;
  	int i;
  	struct kmem_cache_node *n;
  	struct page *page;
  	struct page *t;
205ab99dd   Christoph Lameter   slub: Update stat...
3195
  	int objects = oo_objects(s->max);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3196
  	struct list_head *slabs_by_inuse =
834f3d119   Christoph Lameter   slub: Add kmem_ca...
3197
  		kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3198
3199
3200
3201
3202
3203
  	unsigned long flags;
  
  	if (!slabs_by_inuse)
  		return -ENOMEM;
  
  	flush_all(s);
f64dc58c5   Christoph Lameter   Memoryless nodes:...
3204
  	for_each_node_state(node, N_NORMAL_MEMORY) {
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3205
3206
3207
3208
  		n = get_node(s, node);
  
  		if (!n->nr_partial)
  			continue;
834f3d119   Christoph Lameter   slub: Add kmem_ca...
3209
  		for (i = 0; i < objects; i++)
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3210
3211
3212
3213
3214
  			INIT_LIST_HEAD(slabs_by_inuse + i);
  
  		spin_lock_irqsave(&n->list_lock, flags);
  
  		/*
672bba3a4   Christoph Lameter   SLUB: update comm...
3215
  		 * Build lists indexed by the items in use in each slab.
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3216
  		 *
672bba3a4   Christoph Lameter   SLUB: update comm...
3217
3218
  		 * Note that concurrent frees may occur while we hold the
  		 * list_lock. page->inuse here is the upper limit.
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3219
3220
  		 */
  		list_for_each_entry_safe(page, t, &n->partial, lru) {
881db7fb0   Christoph Lameter   slub: Invert lock...
3221
  			if (!page->inuse) {
5cc6eee8a   Christoph Lameter   slub: explicit li...
3222
  				remove_partial(n, page);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3223
3224
  				discard_slab(s, page);
  			} else {
fcda3d89b   Christoph Lameter   SLUB: Remove chec...
3225
3226
  				list_move(&page->lru,
  				slabs_by_inuse + page->inuse);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3227
3228
  			}
  		}
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3229
  		/*
672bba3a4   Christoph Lameter   SLUB: update comm...
3230
3231
  		 * Rebuild the partial list with the slabs filled up most
  		 * first and the least used slabs at the end.
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3232
  		 */
834f3d119   Christoph Lameter   slub: Add kmem_ca...
3233
  		for (i = objects - 1; i >= 0; i--)
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3234
  			list_splice(slabs_by_inuse + i, n->partial.prev);
2086d26a0   Christoph Lameter   SLUB: Free slabs ...
3235
3236
3237
3238
3239
3240
3241
  		spin_unlock_irqrestore(&n->list_lock, flags);
  	}
  
  	kfree(slabs_by_inuse);
  	return 0;
  }
  EXPORT_SYMBOL(kmem_cache_shrink);
92a5bbc11   Pekka Enberg   SLUB: Fix memory ...
3242
  #if defined(CONFIG_MEMORY_HOTPLUG)
b9049e234   Yasunori Goto   memory hotplug: m...
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
  static int slab_mem_going_offline_callback(void *arg)
  {
  	struct kmem_cache *s;
  
  	down_read(&slub_lock);
  	list_for_each_entry(s, &slab_caches, list)
  		kmem_cache_shrink(s);
  	up_read(&slub_lock);
  
  	return 0;
  }
  
  static void slab_mem_offline_callback(void *arg)
  {
  	struct kmem_cache_node *n;
  	struct kmem_cache *s;
  	struct memory_notify *marg = arg;
  	int offline_node;
  
  	offline_node = marg->status_change_nid;
  
  	/*
  	 * If the node still has available memory. we need kmem_cache_node
  	 * for it yet.
  	 */
  	if (offline_node < 0)
  		return;
  
  	down_read(&slub_lock);
  	list_for_each_entry(s, &slab_caches, list) {
  		n = get_node(s, offline_node);
  		if (n) {
  			/*
  			 * if n->nr_slabs > 0, slabs still exist on the node
  			 * that is going down. We were unable to free them,
c9404c9c3   Adam Buchbinder   Fix misspelling o...
3278
  			 * and offline_pages() function shouldn't call this
b9049e234   Yasunori Goto   memory hotplug: m...
3279
3280
  			 * callback. So, we must fail.
  			 */
0f389ec63   Christoph Lameter   slub: No need for...
3281
  			BUG_ON(slabs_node(s, offline_node));
b9049e234   Yasunori Goto   memory hotplug: m...
3282
3283
  
  			s->node[offline_node] = NULL;
8de66a0c0   Christoph Lameter   slub: Fix up miss...
3284
  			kmem_cache_free(kmem_cache_node, n);
b9049e234   Yasunori Goto   memory hotplug: m...
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
  		}
  	}
  	up_read(&slub_lock);
  }
  
  static int slab_mem_going_online_callback(void *arg)
  {
  	struct kmem_cache_node *n;
  	struct kmem_cache *s;
  	struct memory_notify *marg = arg;
  	int nid = marg->status_change_nid;
  	int ret = 0;
  
  	/*
  	 * If the node's memory is already available, then kmem_cache_node is
  	 * already created. Nothing to do.
  	 */
  	if (nid < 0)
  		return 0;
  
  	/*
0121c619d   Christoph Lameter   slub: Whitespace ...
3306
  	 * We are bringing a node online. No memory is available yet. We must
b9049e234   Yasunori Goto   memory hotplug: m...
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
  	 * allocate a kmem_cache_node structure in order to bring the node
  	 * online.
  	 */
  	down_read(&slub_lock);
  	list_for_each_entry(s, &slab_caches, list) {
  		/*
  		 * XXX: kmem_cache_alloc_node will fallback to other nodes
  		 *      since memory is not yet available from the node that
  		 *      is brought up.
  		 */
8de66a0c0   Christoph Lameter   slub: Fix up miss...
3317
  		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
b9049e234   Yasunori Goto   memory hotplug: m...
3318
3319
3320
3321
  		if (!n) {
  			ret = -ENOMEM;
  			goto out;
  		}
5595cffc8   Pekka Enberg   SLUB: dynamic per...
3322
  		init_kmem_cache_node(n, s);
b9049e234   Yasunori Goto   memory hotplug: m...
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
  		s->node[nid] = n;
  	}
  out:
  	up_read(&slub_lock);
  	return ret;
  }
  
  static int slab_memory_callback(struct notifier_block *self,
  				unsigned long action, void *arg)
  {
  	int ret = 0;
  
  	switch (action) {
  	case MEM_GOING_ONLINE:
  		ret = slab_mem_going_online_callback(arg);
  		break;
  	case MEM_GOING_OFFLINE:
  		ret = slab_mem_going_offline_callback(arg);
  		break;
  	case MEM_OFFLINE:
  	case MEM_CANCEL_ONLINE:
  		slab_mem_offline_callback(arg);
  		break;
  	case MEM_ONLINE:
  	case MEM_CANCEL_OFFLINE:
  		break;
  	}
dc19f9db3   KAMEZAWA Hiroyuki   memcg: memory hot...
3350
3351
3352
3353
  	if (ret)
  		ret = notifier_from_errno(ret);
  	else
  		ret = NOTIFY_OK;
b9049e234   Yasunori Goto   memory hotplug: m...
3354
3355
3356
3357
  	return ret;
  }
  
  #endif /* CONFIG_MEMORY_HOTPLUG */
81819f0fc   Christoph Lameter   SLUB core
3358
3359
3360
  /********************************************************************
   *			Basic setup of slabs
   *******************************************************************/
51df11428   Christoph Lameter   slub: Dynamically...
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
  /*
   * Used for early kmem_cache structures that were allocated using
   * the page allocator
   */
  
  static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
  {
  	int node;
  
  	list_add(&s->list, &slab_caches);
  	s->refcount = -1;
  
  	for_each_node_state(node, N_NORMAL_MEMORY) {
  		struct kmem_cache_node *n = get_node(s, node);
  		struct page *p;
  
  		if (n) {
  			list_for_each_entry(p, &n->partial, lru)
  				p->slab = s;
607bf324a   Li Zefan   slub: Fix a typo ...
3380
  #ifdef CONFIG_SLUB_DEBUG
51df11428   Christoph Lameter   slub: Dynamically...
3381
3382
3383
3384
3385
3386
  			list_for_each_entry(p, &n->full, lru)
  				p->slab = s;
  #endif
  		}
  	}
  }
81819f0fc   Christoph Lameter   SLUB core
3387
3388
3389
  void __init kmem_cache_init(void)
  {
  	int i;
4b356be01   Christoph Lameter   SLUB: minimum ali...
3390
  	int caches = 0;
51df11428   Christoph Lameter   slub: Dynamically...
3391
3392
  	struct kmem_cache *temp_kmem_cache;
  	int order;
51df11428   Christoph Lameter   slub: Dynamically...
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
  	struct kmem_cache *temp_kmem_cache_node;
  	unsigned long kmalloc_size;
  
  	kmem_size = offsetof(struct kmem_cache, node) +
  				nr_node_ids * sizeof(struct kmem_cache_node *);
  
  	/* Allocate two kmem_caches from the page allocator */
  	kmalloc_size = ALIGN(kmem_size, cache_line_size());
  	order = get_order(2 * kmalloc_size);
  	kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order);
81819f0fc   Christoph Lameter   SLUB core
3403
3404
  	/*
  	 * Must first have the slab cache available for the allocations of the
672bba3a4   Christoph Lameter   SLUB: update comm...
3405
  	 * struct kmem_cache_node's. There is special bootstrap code in
81819f0fc   Christoph Lameter   SLUB core
3406
3407
  	 * kmem_cache_open for slab_state == DOWN.
  	 */
51df11428   Christoph Lameter   slub: Dynamically...
3408
3409
3410
3411
3412
  	kmem_cache_node = (void *)kmem_cache + kmalloc_size;
  
  	kmem_cache_open(kmem_cache_node, "kmem_cache_node",
  		sizeof(struct kmem_cache_node),
  		0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
b9049e234   Yasunori Goto   memory hotplug: m...
3413

0c40ba4fd   Nadia Derbey   ipc: define the s...
3414
  	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
81819f0fc   Christoph Lameter   SLUB core
3415
3416
3417
  
  	/* Able to allocate the per node structures */
  	slab_state = PARTIAL;
51df11428   Christoph Lameter   slub: Dynamically...
3418
3419
3420
3421
3422
  	temp_kmem_cache = kmem_cache;
  	kmem_cache_open(kmem_cache, "kmem_cache", kmem_size,
  		0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
  	kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
  	memcpy(kmem_cache, temp_kmem_cache, kmem_size);
81819f0fc   Christoph Lameter   SLUB core
3423

51df11428   Christoph Lameter   slub: Dynamically...
3424
3425
3426
3427
3428
3429
  	/*
  	 * Allocate kmem_cache_node properly from the kmem_cache slab.
  	 * kmem_cache_node is separately allocated so no need to
  	 * update any list pointers.
  	 */
  	temp_kmem_cache_node = kmem_cache_node;
81819f0fc   Christoph Lameter   SLUB core
3430

51df11428   Christoph Lameter   slub: Dynamically...
3431
3432
3433
3434
3435
3436
  	kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
  	memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size);
  
  	kmem_cache_bootstrap_fixup(kmem_cache_node);
  
  	caches++;
51df11428   Christoph Lameter   slub: Dynamically...
3437
3438
3439
3440
3441
3442
  	kmem_cache_bootstrap_fixup(kmem_cache);
  	caches++;
  	/* Free temporary boot structure */
  	free_pages((unsigned long)temp_kmem_cache, order);
  
  	/* Now we can use the kmem_cache to allocate kmalloc slabs */
f1b263393   Christoph Lameter   SLUB: faster more...
3443
3444
3445
3446
  
  	/*
  	 * Patch up the size_index table if we have strange large alignment
  	 * requirements for the kmalloc array. This is only the case for
6446faa2f   Christoph Lameter   slub: Fix up comm...
3447
  	 * MIPS it seems. The standard arches will not generate any code here.
f1b263393   Christoph Lameter   SLUB: faster more...
3448
3449
3450
3451
3452
3453
3454
3455
3456
  	 *
  	 * Largest permitted alignment is 256 bytes due to the way we
  	 * handle the index determination for the smaller caches.
  	 *
  	 * Make sure that nothing crazy happens if someone starts tinkering
  	 * around with ARCH_KMALLOC_MINALIGN
  	 */
  	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
  		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
3457
3458
3459
3460
3461
3462
  	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
  		int elem = size_index_elem(i);
  		if (elem >= ARRAY_SIZE(size_index))
  			break;
  		size_index[elem] = KMALLOC_SHIFT_LOW;
  	}
f1b263393   Christoph Lameter   SLUB: faster more...
3463

acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
3464
3465
3466
3467
3468
3469
3470
3471
  	if (KMALLOC_MIN_SIZE == 64) {
  		/*
  		 * The 96 byte size cache is not used if the alignment
  		 * is 64 byte.
  		 */
  		for (i = 64 + 8; i <= 96; i += 8)
  			size_index[size_index_elem(i)] = 7;
  	} else if (KMALLOC_MIN_SIZE == 128) {
41d54d3bf   Christoph Lameter   slub: Do not use ...
3472
3473
3474
3475
3476
3477
  		/*
  		 * The 192 byte sized cache is not used if the alignment
  		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
  		 * instead.
  		 */
  		for (i = 128 + 8; i <= 192; i += 8)
acdfcd04d   Aaro Koskinen   SLUB: fix ARCH_KM...
3478
  			size_index[size_index_elem(i)] = 8;
41d54d3bf   Christoph Lameter   slub: Do not use ...
3479
  	}
51df11428   Christoph Lameter   slub: Dynamically...
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
  	/* Caches that are not of the two-to-the-power-of size */
  	if (KMALLOC_MIN_SIZE <= 32) {
  		kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
  		caches++;
  	}
  
  	if (KMALLOC_MIN_SIZE <= 64) {
  		kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
  		caches++;
  	}
  
  	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
  		kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
  		caches++;
  	}
81819f0fc   Christoph Lameter   SLUB core
3495
3496
3497
  	slab_state = UP;
  
  	/* Provide the correct kmalloc names now that the caches are up */
84c1cf624   Pekka Enberg   SLUB: Fix merged ...
3498
3499
3500
3501
3502
3503
3504
3505
3506
  	if (KMALLOC_MIN_SIZE <= 32) {
  		kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT);
  		BUG_ON(!kmalloc_caches[1]->name);
  	}
  
  	if (KMALLOC_MIN_SIZE <= 64) {
  		kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT);
  		BUG_ON(!kmalloc_caches[2]->name);
  	}
d7278bd7d   Christoph Lameter   slub: Check kaspr...
3507
3508
3509
3510
  	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
  		char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
  
  		BUG_ON(!s);
51df11428   Christoph Lameter   slub: Dynamically...
3511
  		kmalloc_caches[i]->name = s;
d7278bd7d   Christoph Lameter   slub: Check kaspr...
3512
  	}
81819f0fc   Christoph Lameter   SLUB core
3513
3514
3515
  
  #ifdef CONFIG_SMP
  	register_cpu_notifier(&slab_notifier);
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
3516
  #endif
81819f0fc   Christoph Lameter   SLUB core
3517

55136592f   Christoph Lameter   slub: Remove dyna...
3518
  #ifdef CONFIG_ZONE_DMA
51df11428   Christoph Lameter   slub: Dynamically...
3519
3520
  	for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
  		struct kmem_cache *s = kmalloc_caches[i];
55136592f   Christoph Lameter   slub: Remove dyna...
3521

51df11428   Christoph Lameter   slub: Dynamically...
3522
  		if (s && s->size) {
55136592f   Christoph Lameter   slub: Remove dyna...
3523
3524
3525
3526
  			char *name = kasprintf(GFP_NOWAIT,
  				 "dma-kmalloc-%d", s->objsize);
  
  			BUG_ON(!name);
51df11428   Christoph Lameter   slub: Dynamically...
3527
3528
  			kmalloc_dma_caches[i] = create_kmalloc_cache(name,
  				s->objsize, SLAB_CACHE_DMA);
55136592f   Christoph Lameter   slub: Remove dyna...
3529
3530
3531
  		}
  	}
  #endif
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
3532
3533
  	printk(KERN_INFO
  		"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
4b356be01   Christoph Lameter   SLUB: minimum ali...
3534
3535
3536
  		" CPUs=%d, Nodes=%d
  ",
  		caches, cache_line_size(),
81819f0fc   Christoph Lameter   SLUB core
3537
3538
3539
  		slub_min_order, slub_max_order, slub_min_objects,
  		nr_cpu_ids, nr_node_ids);
  }
7e85ee0c1   Pekka Enberg   slab,slub: don't ...
3540
3541
  void __init kmem_cache_init_late(void)
  {
7e85ee0c1   Pekka Enberg   slab,slub: don't ...
3542
  }
81819f0fc   Christoph Lameter   SLUB core
3543
3544
3545
3546
3547
3548
3549
  /*
   * Find a mergeable slab cache
   */
  static int slab_unmergeable(struct kmem_cache *s)
  {
  	if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
  		return 1;
c59def9f2   Christoph Lameter   Slab allocators: ...
3550
  	if (s->ctor)
81819f0fc   Christoph Lameter   SLUB core
3551
  		return 1;
8ffa68755   Christoph Lameter   SLUB: Fix NUMA / ...
3552
3553
3554
3555
3556
  	/*
  	 * We may have set a slab to be unmergeable during bootstrap.
  	 */
  	if (s->refcount < 0)
  		return 1;
81819f0fc   Christoph Lameter   SLUB core
3557
3558
3559
3560
  	return 0;
  }
  
  static struct kmem_cache *find_mergeable(size_t size,
ba0268a8b   Christoph Lameter   SLUB: accurately ...
3561
  		size_t align, unsigned long flags, const char *name,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
3562
  		void (*ctor)(void *))
81819f0fc   Christoph Lameter   SLUB core
3563
  {
5b95a4acf   Christoph Lameter   SLUB: use list_fo...
3564
  	struct kmem_cache *s;
81819f0fc   Christoph Lameter   SLUB core
3565
3566
3567
  
  	if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
  		return NULL;
c59def9f2   Christoph Lameter   Slab allocators: ...
3568
  	if (ctor)
81819f0fc   Christoph Lameter   SLUB core
3569
3570
3571
3572
3573
  		return NULL;
  
  	size = ALIGN(size, sizeof(void *));
  	align = calculate_alignment(flags, align, size);
  	size = ALIGN(size, align);
ba0268a8b   Christoph Lameter   SLUB: accurately ...
3574
  	flags = kmem_cache_flags(size, flags, name, NULL);
81819f0fc   Christoph Lameter   SLUB core
3575

5b95a4acf   Christoph Lameter   SLUB: use list_fo...
3576
  	list_for_each_entry(s, &slab_caches, list) {
81819f0fc   Christoph Lameter   SLUB core
3577
3578
3579
3580
3581
  		if (slab_unmergeable(s))
  			continue;
  
  		if (size > s->size)
  			continue;
ba0268a8b   Christoph Lameter   SLUB: accurately ...
3582
  		if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
81819f0fc   Christoph Lameter   SLUB core
3583
3584
3585
3586
3587
  				continue;
  		/*
  		 * Check if alignment is compatible.
  		 * Courtesy of Adrian Drzewiecki
  		 */
064287807   Pekka Enberg   SLUB: Fix coding ...
3588
  		if ((s->size & ~(align - 1)) != s->size)
81819f0fc   Christoph Lameter   SLUB core
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
  			continue;
  
  		if (s->size - size >= sizeof(void *))
  			continue;
  
  		return s;
  	}
  	return NULL;
  }
  
  struct kmem_cache *kmem_cache_create(const char *name, size_t size,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
3600
  		size_t align, unsigned long flags, void (*ctor)(void *))
81819f0fc   Christoph Lameter   SLUB core
3601
3602
  {
  	struct kmem_cache *s;
84c1cf624   Pekka Enberg   SLUB: Fix merged ...
3603
  	char *n;
81819f0fc   Christoph Lameter   SLUB core
3604

fe1ff49d0   Benjamin Herrenschmidt   mm: kmem_cache_cr...
3605
3606
  	if (WARN_ON(!name))
  		return NULL;
81819f0fc   Christoph Lameter   SLUB core
3607
  	down_write(&slub_lock);
ba0268a8b   Christoph Lameter   SLUB: accurately ...
3608
  	s = find_mergeable(size, align, flags, name, ctor);
81819f0fc   Christoph Lameter   SLUB core
3609
3610
3611
3612
3613
3614
3615
3616
  	if (s) {
  		s->refcount++;
  		/*
  		 * Adjust the object sizes so that we clear
  		 * the complete object on kzalloc.
  		 */
  		s->objsize = max(s->objsize, (int)size);
  		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
6446faa2f   Christoph Lameter   slub: Fix up comm...
3617

7b8f3b66d   David Rientjes   slub: avoid leaki...
3618
  		if (sysfs_slab_alias(s, name)) {
7b8f3b66d   David Rientjes   slub: avoid leaki...
3619
  			s->refcount--;
81819f0fc   Christoph Lameter   SLUB core
3620
  			goto err;
7b8f3b66d   David Rientjes   slub: avoid leaki...
3621
  		}
2bce64858   Christoph Lameter   slub: Allow remov...
3622
  		up_write(&slub_lock);
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
3623
3624
  		return s;
  	}
6446faa2f   Christoph Lameter   slub: Fix up comm...
3625

84c1cf624   Pekka Enberg   SLUB: Fix merged ...
3626
3627
3628
  	n = kstrdup(name, GFP_KERNEL);
  	if (!n)
  		goto err;
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
3629
3630
  	s = kmalloc(kmem_size, GFP_KERNEL);
  	if (s) {
84c1cf624   Pekka Enberg   SLUB: Fix merged ...
3631
  		if (kmem_cache_open(s, n,
c59def9f2   Christoph Lameter   Slab allocators: ...
3632
  				size, align, flags, ctor)) {
81819f0fc   Christoph Lameter   SLUB core
3633
  			list_add(&s->list, &slab_caches);
7b8f3b66d   David Rientjes   slub: avoid leaki...
3634
  			if (sysfs_slab_add(s)) {
7b8f3b66d   David Rientjes   slub: avoid leaki...
3635
  				list_del(&s->list);
84c1cf624   Pekka Enberg   SLUB: Fix merged ...
3636
  				kfree(n);
7b8f3b66d   David Rientjes   slub: avoid leaki...
3637
  				kfree(s);
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
3638
  				goto err;
7b8f3b66d   David Rientjes   slub: avoid leaki...
3639
  			}
2bce64858   Christoph Lameter   slub: Allow remov...
3640
  			up_write(&slub_lock);
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
3641
3642
  			return s;
  		}
84c1cf624   Pekka Enberg   SLUB: Fix merged ...
3643
  		kfree(n);
a0e1d1be2   Christoph Lameter   SLUB: Move sysfs ...
3644
  		kfree(s);
81819f0fc   Christoph Lameter   SLUB core
3645
  	}
68cee4f11   Pavel Emelyanov   slub: Fix slub_lo...
3646
  err:
81819f0fc   Christoph Lameter   SLUB core
3647
  	up_write(&slub_lock);
81819f0fc   Christoph Lameter   SLUB core
3648

81819f0fc   Christoph Lameter   SLUB core
3649
3650
3651
3652
3653
3654
3655
3656
  	if (flags & SLAB_PANIC)
  		panic("Cannot create slabcache %s
  ", name);
  	else
  		s = NULL;
  	return s;
  }
  EXPORT_SYMBOL(kmem_cache_create);
81819f0fc   Christoph Lameter   SLUB core
3657
  #ifdef CONFIG_SMP
27390bc33   Christoph Lameter   SLUB: fix locking...
3658
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
3659
3660
   * Use the cpu notifier to insure that the cpu slabs are flushed when
   * necessary.
81819f0fc   Christoph Lameter   SLUB core
3661
3662
3663
3664
3665
   */
  static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
  		unsigned long action, void *hcpu)
  {
  	long cpu = (long)hcpu;
5b95a4acf   Christoph Lameter   SLUB: use list_fo...
3666
3667
  	struct kmem_cache *s;
  	unsigned long flags;
81819f0fc   Christoph Lameter   SLUB core
3668
3669
3670
  
  	switch (action) {
  	case CPU_UP_CANCELED:
8bb784428   Rafael J. Wysocki   Add suspend-relat...
3671
  	case CPU_UP_CANCELED_FROZEN:
81819f0fc   Christoph Lameter   SLUB core
3672
  	case CPU_DEAD:
8bb784428   Rafael J. Wysocki   Add suspend-relat...
3673
  	case CPU_DEAD_FROZEN:
5b95a4acf   Christoph Lameter   SLUB: use list_fo...
3674
3675
3676
3677
3678
3679
3680
  		down_read(&slub_lock);
  		list_for_each_entry(s, &slab_caches, list) {
  			local_irq_save(flags);
  			__flush_cpu_slab(s, cpu);
  			local_irq_restore(flags);
  		}
  		up_read(&slub_lock);
81819f0fc   Christoph Lameter   SLUB core
3681
3682
3683
3684
3685
3686
  		break;
  	default:
  		break;
  	}
  	return NOTIFY_OK;
  }
064287807   Pekka Enberg   SLUB: Fix coding ...
3687
  static struct notifier_block __cpuinitdata slab_notifier = {
3adbefee6   Ingo Molnar   SLUB: fix checkpa...
3688
  	.notifier_call = slab_cpuup_callback
064287807   Pekka Enberg   SLUB: Fix coding ...
3689
  };
81819f0fc   Christoph Lameter   SLUB core
3690
3691
  
  #endif
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
3692
  void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
81819f0fc   Christoph Lameter   SLUB core
3693
  {
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3694
  	struct kmem_cache *s;
94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3695
  	void *ret;
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3696

ffadd4d0f   Christoph Lameter   SLUB: Introduce a...
3697
  	if (unlikely(size > SLUB_MAX_SIZE))
eada35efc   Pekka Enberg   slub: kmalloc pag...
3698
  		return kmalloc_large(size, gfpflags);
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3699
  	s = get_slab(size, gfpflags);
81819f0fc   Christoph Lameter   SLUB core
3700

2408c5503   Satyam Sharma   {slub, slob}: use...
3701
  	if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f9132   Christoph Lameter   Slab allocators: ...
3702
  		return s;
81819f0fc   Christoph Lameter   SLUB core
3703

2154a3363   Christoph Lameter   slub: Use a const...
3704
  	ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller);
94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3705

25985edce   Lucas De Marchi   Fix common misspe...
3706
  	/* Honor the call site pointer we received. */
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
3707
  	trace_kmalloc(caller, ret, size, s->size, gfpflags);
94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3708
3709
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
3710
  }
5d1f57e4d   Namhyung Kim   slub: Move NUMA-r...
3711
  #ifdef CONFIG_NUMA
81819f0fc   Christoph Lameter   SLUB core
3712
  void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
3713
  					int node, unsigned long caller)
81819f0fc   Christoph Lameter   SLUB core
3714
  {
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3715
  	struct kmem_cache *s;
94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3716
  	void *ret;
aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3717

d3e14aa33   Xiaotian Feng   slub: __kmalloc_n...
3718
3719
3720
3721
3722
3723
3724
3725
3726
  	if (unlikely(size > SLUB_MAX_SIZE)) {
  		ret = kmalloc_large_node(size, gfpflags, node);
  
  		trace_kmalloc_node(caller, ret,
  				   size, PAGE_SIZE << get_order(size),
  				   gfpflags, node);
  
  		return ret;
  	}
eada35efc   Pekka Enberg   slub: kmalloc pag...
3727

aadb4bc4a   Christoph Lameter   SLUB: direct pass...
3728
  	s = get_slab(size, gfpflags);
81819f0fc   Christoph Lameter   SLUB core
3729

2408c5503   Satyam Sharma   {slub, slob}: use...
3730
  	if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f9132   Christoph Lameter   Slab allocators: ...
3731
  		return s;
81819f0fc   Christoph Lameter   SLUB core
3732

94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3733
  	ret = slab_alloc(s, gfpflags, node, caller);
25985edce   Lucas De Marchi   Fix common misspe...
3734
  	/* Honor the call site pointer we received. */
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
3735
  	trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
94b528d05   Eduard - Gabriel Munteanu   kmemtrace: SLUB h...
3736
3737
  
  	return ret;
81819f0fc   Christoph Lameter   SLUB core
3738
  }
5d1f57e4d   Namhyung Kim   slub: Move NUMA-r...
3739
  #endif
81819f0fc   Christoph Lameter   SLUB core
3740

ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
3741
  #ifdef CONFIG_SYSFS
205ab99dd   Christoph Lameter   slub: Update stat...
3742
3743
3744
3745
3746
3747
3748
3749
3750
  static int count_inuse(struct page *page)
  {
  	return page->inuse;
  }
  
  static int count_total(struct page *page)
  {
  	return page->objects;
  }
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
3751
  #endif
205ab99dd   Christoph Lameter   slub: Update stat...
3752

ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
3753
  #ifdef CONFIG_SLUB_DEBUG
434e245dd   Christoph Lameter   SLUB: Do not allo...
3754
3755
  static int validate_slab(struct kmem_cache *s, struct page *page,
  						unsigned long *map)
53e15af03   Christoph Lameter   slub: validation ...
3756
3757
  {
  	void *p;
a973e9dd1   Christoph Lameter   Revert "unique en...
3758
  	void *addr = page_address(page);
53e15af03   Christoph Lameter   slub: validation ...
3759
3760
3761
3762
3763
3764
  
  	if (!check_slab(s, page) ||
  			!on_freelist(s, page, NULL))
  		return 0;
  
  	/* Now we know that a valid freelist exists */
39b264641   Christoph Lameter   slub: Store max n...
3765
  	bitmap_zero(map, page->objects);
53e15af03   Christoph Lameter   slub: validation ...
3766

5f80b13ae   Christoph Lameter   slub: get_map() f...
3767
3768
3769
3770
3771
  	get_map(s, page, map);
  	for_each_object(p, s, addr, page->objects) {
  		if (test_bit(slab_index(p, s, addr), map))
  			if (!check_object(s, page, p, SLUB_RED_INACTIVE))
  				return 0;
53e15af03   Christoph Lameter   slub: validation ...
3772
  	}
224a88be4   Christoph Lameter   slub: for_each_ob...
3773
  	for_each_object(p, s, addr, page->objects)
7656c72b5   Christoph Lameter   SLUB: add macros ...
3774
  		if (!test_bit(slab_index(p, s, addr), map))
37d57443d   Tero Roponen   slub: Fix a crash...
3775
  			if (!check_object(s, page, p, SLUB_RED_ACTIVE))
53e15af03   Christoph Lameter   slub: validation ...
3776
3777
3778
  				return 0;
  	return 1;
  }
434e245dd   Christoph Lameter   SLUB: Do not allo...
3779
3780
  static void validate_slab_slab(struct kmem_cache *s, struct page *page,
  						unsigned long *map)
53e15af03   Christoph Lameter   slub: validation ...
3781
  {
881db7fb0   Christoph Lameter   slub: Invert lock...
3782
3783
3784
  	slab_lock(page);
  	validate_slab(s, page, map);
  	slab_unlock(page);
53e15af03   Christoph Lameter   slub: validation ...
3785
  }
434e245dd   Christoph Lameter   SLUB: Do not allo...
3786
3787
  static int validate_slab_node(struct kmem_cache *s,
  		struct kmem_cache_node *n, unsigned long *map)
53e15af03   Christoph Lameter   slub: validation ...
3788
3789
3790
3791
3792
3793
3794
3795
  {
  	unsigned long count = 0;
  	struct page *page;
  	unsigned long flags;
  
  	spin_lock_irqsave(&n->list_lock, flags);
  
  	list_for_each_entry(page, &n->partial, lru) {
434e245dd   Christoph Lameter   SLUB: Do not allo...
3796
  		validate_slab_slab(s, page, map);
53e15af03   Christoph Lameter   slub: validation ...
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
  		count++;
  	}
  	if (count != n->nr_partial)
  		printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
  			"counter=%ld
  ", s->name, count, n->nr_partial);
  
  	if (!(s->flags & SLAB_STORE_USER))
  		goto out;
  
  	list_for_each_entry(page, &n->full, lru) {
434e245dd   Christoph Lameter   SLUB: Do not allo...
3808
  		validate_slab_slab(s, page, map);
53e15af03   Christoph Lameter   slub: validation ...
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
  		count++;
  	}
  	if (count != atomic_long_read(&n->nr_slabs))
  		printk(KERN_ERR "SLUB: %s %ld slabs counted but "
  			"counter=%ld
  ", s->name, count,
  			atomic_long_read(&n->nr_slabs));
  
  out:
  	spin_unlock_irqrestore(&n->list_lock, flags);
  	return count;
  }
434e245dd   Christoph Lameter   SLUB: Do not allo...
3821
  static long validate_slab_cache(struct kmem_cache *s)
53e15af03   Christoph Lameter   slub: validation ...
3822
3823
3824
  {
  	int node;
  	unsigned long count = 0;
205ab99dd   Christoph Lameter   slub: Update stat...
3825
  	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
434e245dd   Christoph Lameter   SLUB: Do not allo...
3826
3827
3828
3829
  				sizeof(unsigned long), GFP_KERNEL);
  
  	if (!map)
  		return -ENOMEM;
53e15af03   Christoph Lameter   slub: validation ...
3830
3831
  
  	flush_all(s);
f64dc58c5   Christoph Lameter   Memoryless nodes:...
3832
  	for_each_node_state(node, N_NORMAL_MEMORY) {
53e15af03   Christoph Lameter   slub: validation ...
3833
  		struct kmem_cache_node *n = get_node(s, node);
434e245dd   Christoph Lameter   SLUB: Do not allo...
3834
  		count += validate_slab_node(s, n, map);
53e15af03   Christoph Lameter   slub: validation ...
3835
  	}
434e245dd   Christoph Lameter   SLUB: Do not allo...
3836
  	kfree(map);
53e15af03   Christoph Lameter   slub: validation ...
3837
3838
  	return count;
  }
88a420e4e   Christoph Lameter   slub: add ability...
3839
  /*
672bba3a4   Christoph Lameter   SLUB: update comm...
3840
   * Generate lists of code addresses where slabcache objects are allocated
88a420e4e   Christoph Lameter   slub: add ability...
3841
3842
3843
3844
3845
   * and freed.
   */
  
  struct location {
  	unsigned long count;
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
3846
  	unsigned long addr;
45edfa580   Christoph Lameter   SLUB: include lif...
3847
3848
3849
3850
3851
  	long long sum_time;
  	long min_time;
  	long max_time;
  	long min_pid;
  	long max_pid;
174596a0b   Rusty Russell   cpumask: convert mm/
3852
  	DECLARE_BITMAP(cpus, NR_CPUS);
45edfa580   Christoph Lameter   SLUB: include lif...
3853
  	nodemask_t nodes;
88a420e4e   Christoph Lameter   slub: add ability...
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
  };
  
  struct loc_track {
  	unsigned long max;
  	unsigned long count;
  	struct location *loc;
  };
  
  static void free_loc_track(struct loc_track *t)
  {
  	if (t->max)
  		free_pages((unsigned long)t->loc,
  			get_order(sizeof(struct location) * t->max));
  }
68dff6a9a   Christoph Lameter   SLUB slab validat...
3868
  static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
88a420e4e   Christoph Lameter   slub: add ability...
3869
3870
3871
  {
  	struct location *l;
  	int order;
88a420e4e   Christoph Lameter   slub: add ability...
3872
  	order = get_order(sizeof(struct location) * max);
68dff6a9a   Christoph Lameter   SLUB slab validat...
3873
  	l = (void *)__get_free_pages(flags, order);
88a420e4e   Christoph Lameter   slub: add ability...
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
  	if (!l)
  		return 0;
  
  	if (t->count) {
  		memcpy(l, t->loc, sizeof(struct location) * t->count);
  		free_loc_track(t);
  	}
  	t->max = max;
  	t->loc = l;
  	return 1;
  }
  
  static int add_location(struct loc_track *t, struct kmem_cache *s,
45edfa580   Christoph Lameter   SLUB: include lif...
3887
  				const struct track *track)
88a420e4e   Christoph Lameter   slub: add ability...
3888
3889
3890
  {
  	long start, end, pos;
  	struct location *l;
ce71e27c6   Eduard - Gabriel Munteanu   SLUB: Replace __b...
3891
  	unsigned long caddr;
45edfa580   Christoph Lameter   SLUB: include lif...
3892
  	unsigned long age = jiffies - track->when;
88a420e4e   Christoph Lameter   slub: add ability...
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
  
  	start = -1;
  	end = t->count;
  
  	for ( ; ; ) {
  		pos = start + (end - start + 1) / 2;
  
  		/*
  		 * There is nothing at "end". If we end up there
  		 * we need to add something to before end.
  		 */
  		if (pos == end)
  			break;
  
  		caddr = t->loc[pos].addr;
45edfa580   Christoph Lameter   SLUB: include lif...
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
  		if (track->addr == caddr) {
  
  			l = &t->loc[pos];
  			l->count++;
  			if (track->when) {
  				l->sum_time += age;
  				if (age < l->min_time)
  					l->min_time = age;
  				if (age > l->max_time)
  					l->max_time = age;
  
  				if (track->pid < l->min_pid)
  					l->min_pid = track->pid;
  				if (track->pid > l->max_pid)
  					l->max_pid = track->pid;
174596a0b   Rusty Russell   cpumask: convert mm/
3923
3924
  				cpumask_set_cpu(track->cpu,
  						to_cpumask(l->cpus));
45edfa580   Christoph Lameter   SLUB: include lif...
3925
3926
  			}
  			node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4e   Christoph Lameter   slub: add ability...
3927
3928
  			return 1;
  		}
45edfa580   Christoph Lameter   SLUB: include lif...
3929
  		if (track->addr < caddr)
88a420e4e   Christoph Lameter   slub: add ability...
3930
3931
3932
3933
3934
3935
  			end = pos;
  		else
  			start = pos;
  	}
  
  	/*
672bba3a4   Christoph Lameter   SLUB: update comm...
3936
  	 * Not found. Insert new tracking element.
88a420e4e   Christoph Lameter   slub: add ability...
3937
  	 */
68dff6a9a   Christoph Lameter   SLUB slab validat...
3938
  	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
88a420e4e   Christoph Lameter   slub: add ability...
3939
3940
3941
3942
3943
3944
3945
3946
  		return 0;
  
  	l = t->loc + pos;
  	if (pos < t->count)
  		memmove(l + 1, l,
  			(t->count - pos) * sizeof(struct location));
  	t->count++;
  	l->count = 1;
45edfa580   Christoph Lameter   SLUB: include lif...
3947
3948
3949
3950
3951
3952
  	l->addr = track->addr;
  	l->sum_time = age;
  	l->min_time = age;
  	l->max_time = age;
  	l->min_pid = track->pid;
  	l->max_pid = track->pid;
174596a0b   Rusty Russell   cpumask: convert mm/
3953
3954
  	cpumask_clear(to_cpumask(l->cpus));
  	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
45edfa580   Christoph Lameter   SLUB: include lif...
3955
3956
  	nodes_clear(l->nodes);
  	node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4e   Christoph Lameter   slub: add ability...
3957
3958
3959
3960
  	return 1;
  }
  
  static void process_slab(struct loc_track *t, struct kmem_cache *s,
bbd7d57bf   Eric Dumazet   slub: Potential s...
3961
  		struct page *page, enum track_item alloc,
a5dd5c117   Namhyung Kim   slub: Fix signedn...
3962
  		unsigned long *map)
88a420e4e   Christoph Lameter   slub: add ability...
3963
  {
a973e9dd1   Christoph Lameter   Revert "unique en...
3964
  	void *addr = page_address(page);
88a420e4e   Christoph Lameter   slub: add ability...
3965
  	void *p;
39b264641   Christoph Lameter   slub: Store max n...
3966
  	bitmap_zero(map, page->objects);
5f80b13ae   Christoph Lameter   slub: get_map() f...
3967
  	get_map(s, page, map);
88a420e4e   Christoph Lameter   slub: add ability...
3968

224a88be4   Christoph Lameter   slub: for_each_ob...
3969
  	for_each_object(p, s, addr, page->objects)
45edfa580   Christoph Lameter   SLUB: include lif...
3970
3971
  		if (!test_bit(slab_index(p, s, addr), map))
  			add_location(t, s, get_track(s, p, alloc));
88a420e4e   Christoph Lameter   slub: add ability...
3972
3973
3974
3975
3976
  }
  
  static int list_locations(struct kmem_cache *s, char *buf,
  					enum track_item alloc)
  {
e374d4835   Harvey Harrison   slub: fix shadowe...
3977
  	int len = 0;
88a420e4e   Christoph Lameter   slub: add ability...
3978
  	unsigned long i;
68dff6a9a   Christoph Lameter   SLUB slab validat...
3979
  	struct loc_track t = { 0, 0, NULL };
88a420e4e   Christoph Lameter   slub: add ability...
3980
  	int node;
bbd7d57bf   Eric Dumazet   slub: Potential s...
3981
3982
  	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
  				     sizeof(unsigned long), GFP_KERNEL);
88a420e4e   Christoph Lameter   slub: add ability...
3983

bbd7d57bf   Eric Dumazet   slub: Potential s...
3984
3985
3986
  	if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
  				     GFP_TEMPORARY)) {
  		kfree(map);
68dff6a9a   Christoph Lameter   SLUB slab validat...
3987
3988
  		return sprintf(buf, "Out of memory
  ");
bbd7d57bf   Eric Dumazet   slub: Potential s...
3989
  	}
88a420e4e   Christoph Lameter   slub: add ability...
3990
3991
  	/* Push back cpu slabs */
  	flush_all(s);
f64dc58c5   Christoph Lameter   Memoryless nodes:...
3992
  	for_each_node_state(node, N_NORMAL_MEMORY) {
88a420e4e   Christoph Lameter   slub: add ability...
3993
3994
3995
  		struct kmem_cache_node *n = get_node(s, node);
  		unsigned long flags;
  		struct page *page;
9e86943b6   Christoph Lameter   SLUB: use atomic_...
3996
  		if (!atomic_long_read(&n->nr_slabs))
88a420e4e   Christoph Lameter   slub: add ability...
3997
3998
3999
4000
  			continue;
  
  		spin_lock_irqsave(&n->list_lock, flags);
  		list_for_each_entry(page, &n->partial, lru)
bbd7d57bf   Eric Dumazet   slub: Potential s...
4001
  			process_slab(&t, s, page, alloc, map);
88a420e4e   Christoph Lameter   slub: add ability...
4002
  		list_for_each_entry(page, &n->full, lru)
bbd7d57bf   Eric Dumazet   slub: Potential s...
4003
  			process_slab(&t, s, page, alloc, map);
88a420e4e   Christoph Lameter   slub: add ability...
4004
4005
4006
4007
  		spin_unlock_irqrestore(&n->list_lock, flags);
  	}
  
  	for (i = 0; i < t.count; i++) {
45edfa580   Christoph Lameter   SLUB: include lif...
4008
  		struct location *l = &t.loc[i];
88a420e4e   Christoph Lameter   slub: add ability...
4009

9c2462472   Hugh Dickins   KSYM_SYMBOL_LEN f...
4010
  		if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
88a420e4e   Christoph Lameter   slub: add ability...
4011
  			break;
e374d4835   Harvey Harrison   slub: fix shadowe...
4012
  		len += sprintf(buf + len, "%7ld ", l->count);
45edfa580   Christoph Lameter   SLUB: include lif...
4013
4014
  
  		if (l->addr)
62c70bce8   Joe Perches   mm: convert sprin...
4015
  			len += sprintf(buf + len, "%pS", (void *)l->addr);
88a420e4e   Christoph Lameter   slub: add ability...
4016
  		else
e374d4835   Harvey Harrison   slub: fix shadowe...
4017
  			len += sprintf(buf + len, "<not-available>");
45edfa580   Christoph Lameter   SLUB: include lif...
4018
4019
  
  		if (l->sum_time != l->min_time) {
e374d4835   Harvey Harrison   slub: fix shadowe...
4020
  			len += sprintf(buf + len, " age=%ld/%ld/%ld",
f8bd2258e   Roman Zippel   remove div_long_l...
4021
4022
4023
  				l->min_time,
  				(long)div_u64(l->sum_time, l->count),
  				l->max_time);
45edfa580   Christoph Lameter   SLUB: include lif...
4024
  		} else
e374d4835   Harvey Harrison   slub: fix shadowe...
4025
  			len += sprintf(buf + len, " age=%ld",
45edfa580   Christoph Lameter   SLUB: include lif...
4026
4027
4028
  				l->min_time);
  
  		if (l->min_pid != l->max_pid)
e374d4835   Harvey Harrison   slub: fix shadowe...
4029
  			len += sprintf(buf + len, " pid=%ld-%ld",
45edfa580   Christoph Lameter   SLUB: include lif...
4030
4031
  				l->min_pid, l->max_pid);
  		else
e374d4835   Harvey Harrison   slub: fix shadowe...
4032
  			len += sprintf(buf + len, " pid=%ld",
45edfa580   Christoph Lameter   SLUB: include lif...
4033
  				l->min_pid);
174596a0b   Rusty Russell   cpumask: convert mm/
4034
4035
  		if (num_online_cpus() > 1 &&
  				!cpumask_empty(to_cpumask(l->cpus)) &&
e374d4835   Harvey Harrison   slub: fix shadowe...
4036
4037
4038
  				len < PAGE_SIZE - 60) {
  			len += sprintf(buf + len, " cpus=");
  			len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
174596a0b   Rusty Russell   cpumask: convert mm/
4039
  						 to_cpumask(l->cpus));
45edfa580   Christoph Lameter   SLUB: include lif...
4040
  		}
62bc62a87   Christoph Lameter   page allocator: u...
4041
  		if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
e374d4835   Harvey Harrison   slub: fix shadowe...
4042
4043
4044
  				len < PAGE_SIZE - 60) {
  			len += sprintf(buf + len, " nodes=");
  			len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
45edfa580   Christoph Lameter   SLUB: include lif...
4045
4046
  					l->nodes);
  		}
e374d4835   Harvey Harrison   slub: fix shadowe...
4047
4048
  		len += sprintf(buf + len, "
  ");
88a420e4e   Christoph Lameter   slub: add ability...
4049
4050
4051
  	}
  
  	free_loc_track(&t);
bbd7d57bf   Eric Dumazet   slub: Potential s...
4052
  	kfree(map);
88a420e4e   Christoph Lameter   slub: add ability...
4053
  	if (!t.count)
e374d4835   Harvey Harrison   slub: fix shadowe...
4054
4055
4056
  		len += sprintf(buf, "No data
  ");
  	return len;
88a420e4e   Christoph Lameter   slub: add ability...
4057
  }
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4058
  #endif
88a420e4e   Christoph Lameter   slub: add ability...
4059

a5a84755c   Christoph Lameter   slub: Move functi...
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
  #ifdef SLUB_RESILIENCY_TEST
  static void resiliency_test(void)
  {
  	u8 *p;
  
  	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10);
  
  	printk(KERN_ERR "SLUB resiliency testing
  ");
  	printk(KERN_ERR "-----------------------
  ");
  	printk(KERN_ERR "A. Corruption after allocation
  ");
  
  	p = kzalloc(16, GFP_KERNEL);
  	p[16] = 0x12;
  	printk(KERN_ERR "
  1. kmalloc-16: Clobber Redzone/next pointer"
  			" 0x12->0x%p
  
  ", p + 16);
  
  	validate_slab_cache(kmalloc_caches[4]);
  
  	/* Hmmm... The next two are dangerous */
  	p = kzalloc(32, GFP_KERNEL);
  	p[32 + sizeof(void *)] = 0x34;
  	printk(KERN_ERR "
  2. kmalloc-32: Clobber next pointer/next slab"
  			" 0x34 -> -0x%p
  ", p);
  	printk(KERN_ERR
  		"If allocated object is overwritten then not detectable
  
  ");
  
  	validate_slab_cache(kmalloc_caches[5]);
  	p = kzalloc(64, GFP_KERNEL);
  	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
  	*p = 0x56;
  	printk(KERN_ERR "
  3. kmalloc-64: corrupting random byte 0x56->0x%p
  ",
  									p);
  	printk(KERN_ERR
  		"If allocated object is overwritten then not detectable
  
  ");
  	validate_slab_cache(kmalloc_caches[6]);
  
  	printk(KERN_ERR "
  B. Corruption after free
  ");
  	p = kzalloc(128, GFP_KERNEL);
  	kfree(p);
  	*p = 0x78;
  	printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p
  
  ", p);
  	validate_slab_cache(kmalloc_caches[7]);
  
  	p = kzalloc(256, GFP_KERNEL);
  	kfree(p);
  	p[50] = 0x9a;
  	printk(KERN_ERR "
  2. kmalloc-256: Clobber 50th byte 0x9a->0x%p
  
  ",
  			p);
  	validate_slab_cache(kmalloc_caches[8]);
  
  	p = kzalloc(512, GFP_KERNEL);
  	kfree(p);
  	p[512] = 0xab;
  	printk(KERN_ERR "
  3. kmalloc-512: Clobber redzone 0xab->0x%p
  
  ", p);
  	validate_slab_cache(kmalloc_caches[9]);
  }
  #else
  #ifdef CONFIG_SYSFS
  static void resiliency_test(void) {};
  #endif
  #endif
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4145
  #ifdef CONFIG_SYSFS
81819f0fc   Christoph Lameter   SLUB core
4146
  enum slab_stat_type {
205ab99dd   Christoph Lameter   slub: Update stat...
4147
4148
4149
4150
4151
  	SL_ALL,			/* All slabs */
  	SL_PARTIAL,		/* Only partially allocated slabs */
  	SL_CPU,			/* Only slabs used for cpu caches */
  	SL_OBJECTS,		/* Determine allocated objects not slabs */
  	SL_TOTAL		/* Determine object capacity not slabs */
81819f0fc   Christoph Lameter   SLUB core
4152
  };
205ab99dd   Christoph Lameter   slub: Update stat...
4153
  #define SO_ALL		(1 << SL_ALL)
81819f0fc   Christoph Lameter   SLUB core
4154
4155
4156
  #define SO_PARTIAL	(1 << SL_PARTIAL)
  #define SO_CPU		(1 << SL_CPU)
  #define SO_OBJECTS	(1 << SL_OBJECTS)
205ab99dd   Christoph Lameter   slub: Update stat...
4157
  #define SO_TOTAL	(1 << SL_TOTAL)
81819f0fc   Christoph Lameter   SLUB core
4158

62e5c4b4d   Cyrill Gorcunov   slub: fix possibl...
4159
4160
  static ssize_t show_slab_objects(struct kmem_cache *s,
  			    char *buf, unsigned long flags)
81819f0fc   Christoph Lameter   SLUB core
4161
4162
  {
  	unsigned long total = 0;
81819f0fc   Christoph Lameter   SLUB core
4163
4164
4165
4166
4167
4168
  	int node;
  	int x;
  	unsigned long *nodes;
  	unsigned long *per_cpu;
  
  	nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
62e5c4b4d   Cyrill Gorcunov   slub: fix possibl...
4169
4170
  	if (!nodes)
  		return -ENOMEM;
81819f0fc   Christoph Lameter   SLUB core
4171
  	per_cpu = nodes + nr_node_ids;
205ab99dd   Christoph Lameter   slub: Update stat...
4172
4173
  	if (flags & SO_CPU) {
  		int cpu;
81819f0fc   Christoph Lameter   SLUB core
4174

205ab99dd   Christoph Lameter   slub: Update stat...
4175
  		for_each_possible_cpu(cpu) {
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
4176
  			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
4177

205ab99dd   Christoph Lameter   slub: Update stat...
4178
4179
4180
4181
4182
4183
4184
4185
  			if (!c || c->node < 0)
  				continue;
  
  			if (c->page) {
  					if (flags & SO_TOTAL)
  						x = c->page->objects;
  				else if (flags & SO_OBJECTS)
  					x = c->page->inuse;
81819f0fc   Christoph Lameter   SLUB core
4186
4187
  				else
  					x = 1;
205ab99dd   Christoph Lameter   slub: Update stat...
4188

81819f0fc   Christoph Lameter   SLUB core
4189
  				total += x;
205ab99dd   Christoph Lameter   slub: Update stat...
4190
  				nodes[c->node] += x;
81819f0fc   Christoph Lameter   SLUB core
4191
  			}
205ab99dd   Christoph Lameter   slub: Update stat...
4192
  			per_cpu[c->node]++;
81819f0fc   Christoph Lameter   SLUB core
4193
4194
  		}
  	}
04d94879c   Christoph Lameter   slub: Avoid use o...
4195
  	lock_memory_hotplug();
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4196
  #ifdef CONFIG_SLUB_DEBUG
205ab99dd   Christoph Lameter   slub: Update stat...
4197
4198
4199
4200
4201
4202
4203
4204
4205
  	if (flags & SO_ALL) {
  		for_each_node_state(node, N_NORMAL_MEMORY) {
  			struct kmem_cache_node *n = get_node(s, node);
  
  		if (flags & SO_TOTAL)
  			x = atomic_long_read(&n->total_objects);
  		else if (flags & SO_OBJECTS)
  			x = atomic_long_read(&n->total_objects) -
  				count_partial(n, count_free);
81819f0fc   Christoph Lameter   SLUB core
4206

81819f0fc   Christoph Lameter   SLUB core
4207
  			else
205ab99dd   Christoph Lameter   slub: Update stat...
4208
  				x = atomic_long_read(&n->nr_slabs);
81819f0fc   Christoph Lameter   SLUB core
4209
4210
4211
  			total += x;
  			nodes[node] += x;
  		}
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4212
4213
4214
  	} else
  #endif
  	if (flags & SO_PARTIAL) {
205ab99dd   Christoph Lameter   slub: Update stat...
4215
4216
  		for_each_node_state(node, N_NORMAL_MEMORY) {
  			struct kmem_cache_node *n = get_node(s, node);
81819f0fc   Christoph Lameter   SLUB core
4217

205ab99dd   Christoph Lameter   slub: Update stat...
4218
4219
4220
4221
  			if (flags & SO_TOTAL)
  				x = count_partial(n, count_total);
  			else if (flags & SO_OBJECTS)
  				x = count_partial(n, count_inuse);
81819f0fc   Christoph Lameter   SLUB core
4222
  			else
205ab99dd   Christoph Lameter   slub: Update stat...
4223
  				x = n->nr_partial;
81819f0fc   Christoph Lameter   SLUB core
4224
4225
4226
4227
  			total += x;
  			nodes[node] += x;
  		}
  	}
81819f0fc   Christoph Lameter   SLUB core
4228
4229
  	x = sprintf(buf, "%lu", total);
  #ifdef CONFIG_NUMA
f64dc58c5   Christoph Lameter   Memoryless nodes:...
4230
  	for_each_node_state(node, N_NORMAL_MEMORY)
81819f0fc   Christoph Lameter   SLUB core
4231
4232
4233
4234
  		if (nodes[node])
  			x += sprintf(buf + x, " N%d=%lu",
  					node, nodes[node]);
  #endif
04d94879c   Christoph Lameter   slub: Avoid use o...
4235
  	unlock_memory_hotplug();
81819f0fc   Christoph Lameter   SLUB core
4236
4237
4238
4239
  	kfree(nodes);
  	return x + sprintf(buf + x, "
  ");
  }
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4240
  #ifdef CONFIG_SLUB_DEBUG
81819f0fc   Christoph Lameter   SLUB core
4241
4242
4243
  static int any_slab_objects(struct kmem_cache *s)
  {
  	int node;
81819f0fc   Christoph Lameter   SLUB core
4244

dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
4245
  	for_each_online_node(node) {
81819f0fc   Christoph Lameter   SLUB core
4246
  		struct kmem_cache_node *n = get_node(s, node);
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
4247
4248
  		if (!n)
  			continue;
4ea33e2dc   Benjamin Herrenschmidt   slub: fix atomic ...
4249
  		if (atomic_long_read(&n->total_objects))
81819f0fc   Christoph Lameter   SLUB core
4250
4251
4252
4253
  			return 1;
  	}
  	return 0;
  }
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4254
  #endif
81819f0fc   Christoph Lameter   SLUB core
4255
4256
  
  #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
497888cf6   Phil Carmody   treewide: fix pot...
4257
  #define to_slab(n) container_of(n, struct kmem_cache, kobj)
81819f0fc   Christoph Lameter   SLUB core
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
  
  struct slab_attribute {
  	struct attribute attr;
  	ssize_t (*show)(struct kmem_cache *s, char *buf);
  	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
  };
  
  #define SLAB_ATTR_RO(_name) \
  	static struct slab_attribute _name##_attr = __ATTR_RO(_name)
  
  #define SLAB_ATTR(_name) \
  	static struct slab_attribute _name##_attr =  \
  	__ATTR(_name, 0644, _name##_show, _name##_store)
81819f0fc   Christoph Lameter   SLUB core
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293
  static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", s->size);
  }
  SLAB_ATTR_RO(slab_size);
  
  static ssize_t align_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", s->align);
  }
  SLAB_ATTR_RO(align);
  
  static ssize_t object_size_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", s->objsize);
  }
  SLAB_ATTR_RO(object_size);
  
  static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
  {
834f3d119   Christoph Lameter   slub: Add kmem_ca...
4294
4295
  	return sprintf(buf, "%d
  ", oo_objects(s->oo));
81819f0fc   Christoph Lameter   SLUB core
4296
4297
  }
  SLAB_ATTR_RO(objs_per_slab);
06b285dc3   Christoph Lameter   slub: Make the or...
4298
4299
4300
  static ssize_t order_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
0121c619d   Christoph Lameter   slub: Whitespace ...
4301
4302
4303
4304
4305
4306
  	unsigned long order;
  	int err;
  
  	err = strict_strtoul(buf, 10, &order);
  	if (err)
  		return err;
06b285dc3   Christoph Lameter   slub: Make the or...
4307
4308
4309
4310
4311
4312
4313
  
  	if (order > slub_max_order || order < slub_min_order)
  		return -EINVAL;
  
  	calculate_sizes(s, order);
  	return length;
  }
81819f0fc   Christoph Lameter   SLUB core
4314
4315
  static ssize_t order_show(struct kmem_cache *s, char *buf)
  {
834f3d119   Christoph Lameter   slub: Add kmem_ca...
4316
4317
  	return sprintf(buf, "%d
  ", oo_order(s->oo));
81819f0fc   Christoph Lameter   SLUB core
4318
  }
06b285dc3   Christoph Lameter   slub: Make the or...
4319
  SLAB_ATTR(order);
81819f0fc   Christoph Lameter   SLUB core
4320

73d342b16   David Rientjes   slub: add min_par...
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
  static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%lu
  ", s->min_partial);
  }
  
  static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
  				 size_t length)
  {
  	unsigned long min;
  	int err;
  
  	err = strict_strtoul(buf, 10, &min);
  	if (err)
  		return err;
c0bdb232b   David Rientjes   slub: rename calc...
4336
  	set_min_partial(s, min);
73d342b16   David Rientjes   slub: add min_par...
4337
4338
4339
  	return length;
  }
  SLAB_ATTR(min_partial);
81819f0fc   Christoph Lameter   SLUB core
4340
4341
  static ssize_t ctor_show(struct kmem_cache *s, char *buf)
  {
62c70bce8   Joe Perches   mm: convert sprin...
4342
4343
4344
4345
  	if (!s->ctor)
  		return 0;
  	return sprintf(buf, "%pS
  ", s->ctor);
81819f0fc   Christoph Lameter   SLUB core
4346
4347
  }
  SLAB_ATTR_RO(ctor);
81819f0fc   Christoph Lameter   SLUB core
4348
4349
4350
4351
4352
4353
  static ssize_t aliases_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", s->refcount - 1);
  }
  SLAB_ATTR_RO(aliases);
81819f0fc   Christoph Lameter   SLUB core
4354
4355
  static ssize_t partial_show(struct kmem_cache *s, char *buf)
  {
d9acf4b7b   Christoph Lameter   slub: rename slab...
4356
  	return show_slab_objects(s, buf, SO_PARTIAL);
81819f0fc   Christoph Lameter   SLUB core
4357
4358
4359
4360
4361
  }
  SLAB_ATTR_RO(partial);
  
  static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
  {
d9acf4b7b   Christoph Lameter   slub: rename slab...
4362
  	return show_slab_objects(s, buf, SO_CPU);
81819f0fc   Christoph Lameter   SLUB core
4363
4364
4365
4366
4367
  }
  SLAB_ATTR_RO(cpu_slabs);
  
  static ssize_t objects_show(struct kmem_cache *s, char *buf)
  {
205ab99dd   Christoph Lameter   slub: Update stat...
4368
  	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
81819f0fc   Christoph Lameter   SLUB core
4369
4370
  }
  SLAB_ATTR_RO(objects);
205ab99dd   Christoph Lameter   slub: Update stat...
4371
4372
4373
4374
4375
  static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
  {
  	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
  }
  SLAB_ATTR_RO(objects_partial);
a5a84755c   Christoph Lameter   slub: Move functi...
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
  static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
  }
  
  static ssize_t reclaim_account_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
  	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
  	if (buf[0] == '1')
  		s->flags |= SLAB_RECLAIM_ACCOUNT;
  	return length;
  }
  SLAB_ATTR(reclaim_account);
  
  static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_HWCACHE_ALIGN));
  }
  SLAB_ATTR_RO(hwcache_align);
  
  #ifdef CONFIG_ZONE_DMA
  static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_CACHE_DMA));
  }
  SLAB_ATTR_RO(cache_dma);
  #endif
  
  static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_DESTROY_BY_RCU));
  }
  SLAB_ATTR_RO(destroy_by_rcu);
ab9a0f196   Lai Jiangshan   slub: automatical...
4414
4415
4416
4417
4418
4419
  static ssize_t reserved_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", s->reserved);
  }
  SLAB_ATTR_RO(reserved);
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4420
  #ifdef CONFIG_SLUB_DEBUG
a5a84755c   Christoph Lameter   slub: Move functi...
4421
4422
4423
4424
4425
  static ssize_t slabs_show(struct kmem_cache *s, char *buf)
  {
  	return show_slab_objects(s, buf, SO_ALL);
  }
  SLAB_ATTR_RO(slabs);
205ab99dd   Christoph Lameter   slub: Update stat...
4426
4427
4428
4429
4430
  static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
  {
  	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
  }
  SLAB_ATTR_RO(total_objects);
81819f0fc   Christoph Lameter   SLUB core
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440
  static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_DEBUG_FREE));
  }
  
  static ssize_t sanity_checks_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
  	s->flags &= ~SLAB_DEBUG_FREE;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4441
4442
  	if (buf[0] == '1') {
  		s->flags &= ~__CMPXCHG_DOUBLE;
81819f0fc   Christoph Lameter   SLUB core
4443
  		s->flags |= SLAB_DEBUG_FREE;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4444
  	}
81819f0fc   Christoph Lameter   SLUB core
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458
  	return length;
  }
  SLAB_ATTR(sanity_checks);
  
  static ssize_t trace_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_TRACE));
  }
  
  static ssize_t trace_store(struct kmem_cache *s, const char *buf,
  							size_t length)
  {
  	s->flags &= ~SLAB_TRACE;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4459
4460
  	if (buf[0] == '1') {
  		s->flags &= ~__CMPXCHG_DOUBLE;
81819f0fc   Christoph Lameter   SLUB core
4461
  		s->flags |= SLAB_TRACE;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4462
  	}
81819f0fc   Christoph Lameter   SLUB core
4463
4464
4465
  	return length;
  }
  SLAB_ATTR(trace);
81819f0fc   Christoph Lameter   SLUB core
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
  static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_RED_ZONE));
  }
  
  static ssize_t red_zone_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
  	if (any_slab_objects(s))
  		return -EBUSY;
  
  	s->flags &= ~SLAB_RED_ZONE;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4479
4480
  	if (buf[0] == '1') {
  		s->flags &= ~__CMPXCHG_DOUBLE;
81819f0fc   Christoph Lameter   SLUB core
4481
  		s->flags |= SLAB_RED_ZONE;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4482
  	}
06b285dc3   Christoph Lameter   slub: Make the or...
4483
  	calculate_sizes(s, -1);
81819f0fc   Christoph Lameter   SLUB core
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
  	return length;
  }
  SLAB_ATTR(red_zone);
  
  static ssize_t poison_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_POISON));
  }
  
  static ssize_t poison_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
  	if (any_slab_objects(s))
  		return -EBUSY;
  
  	s->flags &= ~SLAB_POISON;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4501
4502
  	if (buf[0] == '1') {
  		s->flags &= ~__CMPXCHG_DOUBLE;
81819f0fc   Christoph Lameter   SLUB core
4503
  		s->flags |= SLAB_POISON;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4504
  	}
06b285dc3   Christoph Lameter   slub: Make the or...
4505
  	calculate_sizes(s, -1);
81819f0fc   Christoph Lameter   SLUB core
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
  	return length;
  }
  SLAB_ATTR(poison);
  
  static ssize_t store_user_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_STORE_USER));
  }
  
  static ssize_t store_user_store(struct kmem_cache *s,
  				const char *buf, size_t length)
  {
  	if (any_slab_objects(s))
  		return -EBUSY;
  
  	s->flags &= ~SLAB_STORE_USER;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4523
4524
  	if (buf[0] == '1') {
  		s->flags &= ~__CMPXCHG_DOUBLE;
81819f0fc   Christoph Lameter   SLUB core
4525
  		s->flags |= SLAB_STORE_USER;
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4526
  	}
06b285dc3   Christoph Lameter   slub: Make the or...
4527
  	calculate_sizes(s, -1);
81819f0fc   Christoph Lameter   SLUB core
4528
4529
4530
  	return length;
  }
  SLAB_ATTR(store_user);
53e15af03   Christoph Lameter   slub: validation ...
4531
4532
4533
4534
4535
4536
4537
4538
  static ssize_t validate_show(struct kmem_cache *s, char *buf)
  {
  	return 0;
  }
  
  static ssize_t validate_store(struct kmem_cache *s,
  			const char *buf, size_t length)
  {
434e245dd   Christoph Lameter   SLUB: Do not allo...
4539
4540
4541
4542
4543
4544
4545
4546
  	int ret = -EINVAL;
  
  	if (buf[0] == '1') {
  		ret = validate_slab_cache(s);
  		if (ret >= 0)
  			ret = length;
  	}
  	return ret;
53e15af03   Christoph Lameter   slub: validation ...
4547
4548
  }
  SLAB_ATTR(validate);
a5a84755c   Christoph Lameter   slub: Move functi...
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
  
  static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
  {
  	if (!(s->flags & SLAB_STORE_USER))
  		return -ENOSYS;
  	return list_locations(s, buf, TRACK_ALLOC);
  }
  SLAB_ATTR_RO(alloc_calls);
  
  static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
  {
  	if (!(s->flags & SLAB_STORE_USER))
  		return -ENOSYS;
  	return list_locations(s, buf, TRACK_FREE);
  }
  SLAB_ATTR_RO(free_calls);
  #endif /* CONFIG_SLUB_DEBUG */
  
  #ifdef CONFIG_FAILSLAB
  static ssize_t failslab_show(struct kmem_cache *s, char *buf)
  {
  	return sprintf(buf, "%d
  ", !!(s->flags & SLAB_FAILSLAB));
  }
  
  static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
  							size_t length)
  {
  	s->flags &= ~SLAB_FAILSLAB;
  	if (buf[0] == '1')
  		s->flags |= SLAB_FAILSLAB;
  	return length;
  }
  SLAB_ATTR(failslab);
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4583
  #endif
53e15af03   Christoph Lameter   slub: validation ...
4584

2086d26a0   Christoph Lameter   SLUB: Free slabs ...
4585
4586
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600
4601
4602
  static ssize_t shrink_show(struct kmem_cache *s, char *buf)
  {
  	return 0;
  }
  
  static ssize_t shrink_store(struct kmem_cache *s,
  			const char *buf, size_t length)
  {
  	if (buf[0] == '1') {
  		int rc = kmem_cache_shrink(s);
  
  		if (rc)
  			return rc;
  	} else
  		return -EINVAL;
  	return length;
  }
  SLAB_ATTR(shrink);
81819f0fc   Christoph Lameter   SLUB core
4603
  #ifdef CONFIG_NUMA
9824601ea   Christoph Lameter   SLUB: rename defr...
4604
  static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
81819f0fc   Christoph Lameter   SLUB core
4605
  {
9824601ea   Christoph Lameter   SLUB: rename defr...
4606
4607
  	return sprintf(buf, "%d
  ", s->remote_node_defrag_ratio / 10);
81819f0fc   Christoph Lameter   SLUB core
4608
  }
9824601ea   Christoph Lameter   SLUB: rename defr...
4609
  static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
81819f0fc   Christoph Lameter   SLUB core
4610
4611
  				const char *buf, size_t length)
  {
0121c619d   Christoph Lameter   slub: Whitespace ...
4612
4613
4614
4615
4616
4617
  	unsigned long ratio;
  	int err;
  
  	err = strict_strtoul(buf, 10, &ratio);
  	if (err)
  		return err;
e2cb96b7e   Christoph Lameter   slub: Disable NUM...
4618
  	if (ratio <= 100)
0121c619d   Christoph Lameter   slub: Whitespace ...
4619
  		s->remote_node_defrag_ratio = ratio * 10;
81819f0fc   Christoph Lameter   SLUB core
4620

81819f0fc   Christoph Lameter   SLUB core
4621
4622
  	return length;
  }
9824601ea   Christoph Lameter   SLUB: rename defr...
4623
  SLAB_ATTR(remote_node_defrag_ratio);
81819f0fc   Christoph Lameter   SLUB core
4624
  #endif
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4625
  #ifdef CONFIG_SLUB_STATS
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636
  static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
  {
  	unsigned long sum  = 0;
  	int cpu;
  	int len;
  	int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
  
  	if (!data)
  		return -ENOMEM;
  
  	for_each_online_cpu(cpu) {
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
4637
  		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4638
4639
4640
4641
4642
4643
  
  		data[cpu] = x;
  		sum += x;
  	}
  
  	len = sprintf(buf, "%lu", sum);
50ef37b96   Christoph Lameter   slub: Fixes to pe...
4644
  #ifdef CONFIG_SMP
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4645
4646
  	for_each_online_cpu(cpu) {
  		if (data[cpu] && len < PAGE_SIZE - 20)
50ef37b96   Christoph Lameter   slub: Fixes to pe...
4647
  			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4648
  	}
50ef37b96   Christoph Lameter   slub: Fixes to pe...
4649
  #endif
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4650
4651
4652
4653
  	kfree(data);
  	return len + sprintf(buf + len, "
  ");
  }
78eb00cc5   David Rientjes   slub: allow stats...
4654
4655
4656
4657
4658
  static void clear_stat(struct kmem_cache *s, enum stat_item si)
  {
  	int cpu;
  
  	for_each_online_cpu(cpu)
9dfc6e68b   Christoph Lameter   SLUB: Use this_cp...
4659
  		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
78eb00cc5   David Rientjes   slub: allow stats...
4660
  }
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4661
4662
4663
4664
4665
  #define STAT_ATTR(si, text) 					\
  static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
  {								\
  	return show_stat(s, buf, si);				\
  }								\
78eb00cc5   David Rientjes   slub: allow stats...
4666
4667
4668
4669
4670
4671
4672
4673
4674
  static ssize_t text##_store(struct kmem_cache *s,		\
  				const char *buf, size_t length)	\
  {								\
  	if (buf[0] != '0')					\
  		return -EINVAL;					\
  	clear_stat(s, si);					\
  	return length;						\
  }								\
  SLAB_ATTR(text);						\
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684
4685
  
  STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
  STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
  STAT_ATTR(FREE_FASTPATH, free_fastpath);
  STAT_ATTR(FREE_SLOWPATH, free_slowpath);
  STAT_ATTR(FREE_FROZEN, free_frozen);
  STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
  STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
  STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
  STAT_ATTR(ALLOC_SLAB, alloc_slab);
  STAT_ATTR(ALLOC_REFILL, alloc_refill);
e36a2652d   Christoph Lameter   slub: Add statist...
4686
  STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4687
4688
4689
4690
4691
4692
4693
  STAT_ATTR(FREE_SLAB, free_slab);
  STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
  STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
  STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
  STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
  STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
  STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
03e404af2   Christoph Lameter   slub: fast releas...
4694
  STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
65c3376aa   Christoph Lameter   slub: Fallback to...
4695
  STAT_ATTR(ORDER_FALLBACK, order_fallback);
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4696
4697
  STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
  STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4698
  #endif
064287807   Pekka Enberg   SLUB: Fix coding ...
4699
  static struct attribute *slab_attrs[] = {
81819f0fc   Christoph Lameter   SLUB core
4700
4701
4702
4703
  	&slab_size_attr.attr,
  	&object_size_attr.attr,
  	&objs_per_slab_attr.attr,
  	&order_attr.attr,
73d342b16   David Rientjes   slub: add min_par...
4704
  	&min_partial_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4705
  	&objects_attr.attr,
205ab99dd   Christoph Lameter   slub: Update stat...
4706
  	&objects_partial_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4707
4708
4709
  	&partial_attr.attr,
  	&cpu_slabs_attr.attr,
  	&ctor_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4710
4711
  	&aliases_attr.attr,
  	&align_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4712
4713
4714
  	&hwcache_align_attr.attr,
  	&reclaim_account_attr.attr,
  	&destroy_by_rcu_attr.attr,
a5a84755c   Christoph Lameter   slub: Move functi...
4715
  	&shrink_attr.attr,
ab9a0f196   Lai Jiangshan   slub: automatical...
4716
  	&reserved_attr.attr,
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4717
  #ifdef CONFIG_SLUB_DEBUG
a5a84755c   Christoph Lameter   slub: Move functi...
4718
4719
4720
4721
  	&total_objects_attr.attr,
  	&slabs_attr.attr,
  	&sanity_checks_attr.attr,
  	&trace_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4722
4723
4724
  	&red_zone_attr.attr,
  	&poison_attr.attr,
  	&store_user_attr.attr,
53e15af03   Christoph Lameter   slub: validation ...
4725
  	&validate_attr.attr,
88a420e4e   Christoph Lameter   slub: add ability...
4726
4727
  	&alloc_calls_attr.attr,
  	&free_calls_attr.attr,
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
4728
  #endif
81819f0fc   Christoph Lameter   SLUB core
4729
4730
4731
4732
  #ifdef CONFIG_ZONE_DMA
  	&cache_dma_attr.attr,
  #endif
  #ifdef CONFIG_NUMA
9824601ea   Christoph Lameter   SLUB: rename defr...
4733
  	&remote_node_defrag_ratio_attr.attr,
81819f0fc   Christoph Lameter   SLUB core
4734
  #endif
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744
4745
  #ifdef CONFIG_SLUB_STATS
  	&alloc_fastpath_attr.attr,
  	&alloc_slowpath_attr.attr,
  	&free_fastpath_attr.attr,
  	&free_slowpath_attr.attr,
  	&free_frozen_attr.attr,
  	&free_add_partial_attr.attr,
  	&free_remove_partial_attr.attr,
  	&alloc_from_partial_attr.attr,
  	&alloc_slab_attr.attr,
  	&alloc_refill_attr.attr,
e36a2652d   Christoph Lameter   slub: Add statist...
4746
  	&alloc_node_mismatch_attr.attr,
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4747
4748
4749
4750
4751
4752
4753
  	&free_slab_attr.attr,
  	&cpuslab_flush_attr.attr,
  	&deactivate_full_attr.attr,
  	&deactivate_empty_attr.attr,
  	&deactivate_to_head_attr.attr,
  	&deactivate_to_tail_attr.attr,
  	&deactivate_remote_frees_attr.attr,
03e404af2   Christoph Lameter   slub: fast releas...
4754
  	&deactivate_bypass_attr.attr,
65c3376aa   Christoph Lameter   slub: Fallback to...
4755
  	&order_fallback_attr.attr,
b789ef518   Christoph Lameter   slub: Add cmpxchg...
4756
4757
  	&cmpxchg_double_fail_attr.attr,
  	&cmpxchg_double_cpu_fail_attr.attr,
8ff12cfc0   Christoph Lameter   SLUB: Support for...
4758
  #endif
4c13dd3b4   Dmitry Monakhov   failslab: add abi...
4759
4760
4761
  #ifdef CONFIG_FAILSLAB
  	&failslab_attr.attr,
  #endif
81819f0fc   Christoph Lameter   SLUB core
4762
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799
4800
4801
4802
4803
4804
4805
  	NULL
  };
  
  static struct attribute_group slab_attr_group = {
  	.attrs = slab_attrs,
  };
  
  static ssize_t slab_attr_show(struct kobject *kobj,
  				struct attribute *attr,
  				char *buf)
  {
  	struct slab_attribute *attribute;
  	struct kmem_cache *s;
  	int err;
  
  	attribute = to_slab_attr(attr);
  	s = to_slab(kobj);
  
  	if (!attribute->show)
  		return -EIO;
  
  	err = attribute->show(s, buf);
  
  	return err;
  }
  
  static ssize_t slab_attr_store(struct kobject *kobj,
  				struct attribute *attr,
  				const char *buf, size_t len)
  {
  	struct slab_attribute *attribute;
  	struct kmem_cache *s;
  	int err;
  
  	attribute = to_slab_attr(attr);
  	s = to_slab(kobj);
  
  	if (!attribute->store)
  		return -EIO;
  
  	err = attribute->store(s, buf, len);
  
  	return err;
  }
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
4806
4807
4808
  static void kmem_cache_release(struct kobject *kobj)
  {
  	struct kmem_cache *s = to_slab(kobj);
84c1cf624   Pekka Enberg   SLUB: Fix merged ...
4809
  	kfree(s->name);
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
4810
4811
  	kfree(s);
  }
52cf25d0a   Emese Revfy   Driver core: Cons...
4812
  static const struct sysfs_ops slab_sysfs_ops = {
81819f0fc   Christoph Lameter   SLUB core
4813
4814
4815
4816
4817
4818
  	.show = slab_attr_show,
  	.store = slab_attr_store,
  };
  
  static struct kobj_type slab_ktype = {
  	.sysfs_ops = &slab_sysfs_ops,
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
4819
  	.release = kmem_cache_release
81819f0fc   Christoph Lameter   SLUB core
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829
  };
  
  static int uevent_filter(struct kset *kset, struct kobject *kobj)
  {
  	struct kobj_type *ktype = get_ktype(kobj);
  
  	if (ktype == &slab_ktype)
  		return 1;
  	return 0;
  }
9cd43611c   Emese Revfy   kobject: Constify...
4830
  static const struct kset_uevent_ops slab_uevent_ops = {
81819f0fc   Christoph Lameter   SLUB core
4831
4832
  	.filter = uevent_filter,
  };
27c3a314d   Greg Kroah-Hartman   kset: convert slu...
4833
  static struct kset *slab_kset;
81819f0fc   Christoph Lameter   SLUB core
4834
4835
4836
4837
  
  #define ID_STR_LENGTH 64
  
  /* Create a unique string id for a slab cache:
6446faa2f   Christoph Lameter   slub: Fix up comm...
4838
4839
   *
   * Format	:[flags-]size
81819f0fc   Christoph Lameter   SLUB core
4840
4841
4842
4843
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856
4857
4858
4859
4860
4861
   */
  static char *create_unique_id(struct kmem_cache *s)
  {
  	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
  	char *p = name;
  
  	BUG_ON(!name);
  
  	*p++ = ':';
  	/*
  	 * First flags affecting slabcache operations. We will only
  	 * get here for aliasable slabs so we do not need to support
  	 * too many flags. The flags here must cover all flags that
  	 * are matched during merging to guarantee that the id is
  	 * unique.
  	 */
  	if (s->flags & SLAB_CACHE_DMA)
  		*p++ = 'd';
  	if (s->flags & SLAB_RECLAIM_ACCOUNT)
  		*p++ = 'a';
  	if (s->flags & SLAB_DEBUG_FREE)
  		*p++ = 'F';
5a896d9e7   Vegard Nossum   slub: add hooks f...
4862
4863
  	if (!(s->flags & SLAB_NOTRACK))
  		*p++ = 't';
81819f0fc   Christoph Lameter   SLUB core
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885
4886
4887
  	if (p != name + 1)
  		*p++ = '-';
  	p += sprintf(p, "%07d", s->size);
  	BUG_ON(p > name + ID_STR_LENGTH - 1);
  	return name;
  }
  
  static int sysfs_slab_add(struct kmem_cache *s)
  {
  	int err;
  	const char *name;
  	int unmergeable;
  
  	if (slab_state < SYSFS)
  		/* Defer until later */
  		return 0;
  
  	unmergeable = slab_unmergeable(s);
  	if (unmergeable) {
  		/*
  		 * Slabcache can never be merged so we can use the name proper.
  		 * This is typically the case for debug situations. In that
  		 * case we can catch duplicate names easily.
  		 */
27c3a314d   Greg Kroah-Hartman   kset: convert slu...
4888
  		sysfs_remove_link(&slab_kset->kobj, s->name);
81819f0fc   Christoph Lameter   SLUB core
4889
4890
4891
4892
4893
4894
4895
4896
  		name = s->name;
  	} else {
  		/*
  		 * Create a unique name for the slab as a target
  		 * for the symlinks.
  		 */
  		name = create_unique_id(s);
  	}
27c3a314d   Greg Kroah-Hartman   kset: convert slu...
4897
  	s->kobj.kset = slab_kset;
1eada11c8   Greg Kroah-Hartman   Kobject: convert ...
4898
4899
4900
  	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
  	if (err) {
  		kobject_put(&s->kobj);
81819f0fc   Christoph Lameter   SLUB core
4901
  		return err;
1eada11c8   Greg Kroah-Hartman   Kobject: convert ...
4902
  	}
81819f0fc   Christoph Lameter   SLUB core
4903
4904
  
  	err = sysfs_create_group(&s->kobj, &slab_attr_group);
5788d8ad6   Xiaotian Feng   slub: release kob...
4905
4906
4907
  	if (err) {
  		kobject_del(&s->kobj);
  		kobject_put(&s->kobj);
81819f0fc   Christoph Lameter   SLUB core
4908
  		return err;
5788d8ad6   Xiaotian Feng   slub: release kob...
4909
  	}
81819f0fc   Christoph Lameter   SLUB core
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919
4920
  	kobject_uevent(&s->kobj, KOBJ_ADD);
  	if (!unmergeable) {
  		/* Setup first alias */
  		sysfs_slab_alias(s, s->name);
  		kfree(name);
  	}
  	return 0;
  }
  
  static void sysfs_slab_remove(struct kmem_cache *s)
  {
2bce64858   Christoph Lameter   slub: Allow remov...
4921
4922
4923
4924
4925
4926
  	if (slab_state < SYSFS)
  		/*
  		 * Sysfs has not been setup yet so no need to remove the
  		 * cache from sysfs.
  		 */
  		return;
81819f0fc   Christoph Lameter   SLUB core
4927
4928
  	kobject_uevent(&s->kobj, KOBJ_REMOVE);
  	kobject_del(&s->kobj);
151c602f7   Christoph Lameter   SLUB: Fix sysfs r...
4929
  	kobject_put(&s->kobj);
81819f0fc   Christoph Lameter   SLUB core
4930
4931
4932
4933
  }
  
  /*
   * Need to buffer aliases during bootup until sysfs becomes
9f6c708e5   Nick Andrew   slub: Fix incorre...
4934
   * available lest we lose that information.
81819f0fc   Christoph Lameter   SLUB core
4935
4936
4937
4938
4939
4940
   */
  struct saved_alias {
  	struct kmem_cache *s;
  	const char *name;
  	struct saved_alias *next;
  };
5af328a51   Adrian Bunk   mm/slub.c: make c...
4941
  static struct saved_alias *alias_list;
81819f0fc   Christoph Lameter   SLUB core
4942
4943
4944
4945
4946
4947
4948
4949
4950
  
  static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
  {
  	struct saved_alias *al;
  
  	if (slab_state == SYSFS) {
  		/*
  		 * If we have a leftover link then remove it.
  		 */
27c3a314d   Greg Kroah-Hartman   kset: convert slu...
4951
4952
  		sysfs_remove_link(&slab_kset->kobj, name);
  		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
81819f0fc   Christoph Lameter   SLUB core
4953
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
  	}
  
  	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
  	if (!al)
  		return -ENOMEM;
  
  	al->s = s;
  	al->name = name;
  	al->next = alias_list;
  	alias_list = al;
  	return 0;
  }
  
  static int __init slab_sysfs_init(void)
  {
5b95a4acf   Christoph Lameter   SLUB: use list_fo...
4968
  	struct kmem_cache *s;
81819f0fc   Christoph Lameter   SLUB core
4969
  	int err;
2bce64858   Christoph Lameter   slub: Allow remov...
4970
  	down_write(&slub_lock);
0ff21e466   Greg Kroah-Hartman   kobject: convert ...
4971
  	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
27c3a314d   Greg Kroah-Hartman   kset: convert slu...
4972
  	if (!slab_kset) {
2bce64858   Christoph Lameter   slub: Allow remov...
4973
  		up_write(&slub_lock);
81819f0fc   Christoph Lameter   SLUB core
4974
4975
4976
4977
  		printk(KERN_ERR "Cannot register slab subsystem.
  ");
  		return -ENOSYS;
  	}
26a7bd030   Christoph Lameter   SLUB: get rid of ...
4978
  	slab_state = SYSFS;
5b95a4acf   Christoph Lameter   SLUB: use list_fo...
4979
  	list_for_each_entry(s, &slab_caches, list) {
26a7bd030   Christoph Lameter   SLUB: get rid of ...
4980
  		err = sysfs_slab_add(s);
5d540fb71   Christoph Lameter   slub: do not fail...
4981
4982
4983
4984
  		if (err)
  			printk(KERN_ERR "SLUB: Unable to add boot slab %s"
  						" to sysfs
  ", s->name);
26a7bd030   Christoph Lameter   SLUB: get rid of ...
4985
  	}
81819f0fc   Christoph Lameter   SLUB core
4986
4987
4988
4989
4990
4991
  
  	while (alias_list) {
  		struct saved_alias *al = alias_list;
  
  		alias_list = alias_list->next;
  		err = sysfs_slab_alias(al->s, al->name);
5d540fb71   Christoph Lameter   slub: do not fail...
4992
4993
4994
4995
  		if (err)
  			printk(KERN_ERR "SLUB: Unable to add boot slab alias"
  					" %s to sysfs
  ", s->name);
81819f0fc   Christoph Lameter   SLUB core
4996
4997
  		kfree(al);
  	}
2bce64858   Christoph Lameter   slub: Allow remov...
4998
  	up_write(&slub_lock);
81819f0fc   Christoph Lameter   SLUB core
4999
5000
5001
5002
5003
  	resiliency_test();
  	return 0;
  }
  
  __initcall(slab_sysfs_init);
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
5004
  #endif /* CONFIG_SYSFS */
57ed3eda9   Pekka J Enberg   slub: provide /pr...
5005
5006
5007
5008
  
  /*
   * The /proc/slabinfo ABI
   */
158a96242   Linus Torvalds   Unify /proc/slabi...
5009
  #ifdef CONFIG_SLABINFO
57ed3eda9   Pekka J Enberg   slub: provide /pr...
5010
5011
5012
5013
5014
5015
5016
5017
5018
5019
5020
5021
5022
5023
5024
5025
5026
5027
5028
5029
5030
5031
5032
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044
5045
5046
5047
  static void print_slabinfo_header(struct seq_file *m)
  {
  	seq_puts(m, "slabinfo - version: 2.1
  ");
  	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
  		 "<objperslab> <pagesperslab>");
  	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
  	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
  	seq_putc(m, '
  ');
  }
  
  static void *s_start(struct seq_file *m, loff_t *pos)
  {
  	loff_t n = *pos;
  
  	down_read(&slub_lock);
  	if (!n)
  		print_slabinfo_header(m);
  
  	return seq_list_start(&slab_caches, *pos);
  }
  
  static void *s_next(struct seq_file *m, void *p, loff_t *pos)
  {
  	return seq_list_next(p, &slab_caches, pos);
  }
  
  static void s_stop(struct seq_file *m, void *p)
  {
  	up_read(&slub_lock);
  }
  
  static int s_show(struct seq_file *m, void *p)
  {
  	unsigned long nr_partials = 0;
  	unsigned long nr_slabs = 0;
  	unsigned long nr_inuse = 0;
205ab99dd   Christoph Lameter   slub: Update stat...
5048
5049
  	unsigned long nr_objs = 0;
  	unsigned long nr_free = 0;
57ed3eda9   Pekka J Enberg   slub: provide /pr...
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060
5061
5062
  	struct kmem_cache *s;
  	int node;
  
  	s = list_entry(p, struct kmem_cache, list);
  
  	for_each_online_node(node) {
  		struct kmem_cache_node *n = get_node(s, node);
  
  		if (!n)
  			continue;
  
  		nr_partials += n->nr_partial;
  		nr_slabs += atomic_long_read(&n->nr_slabs);
205ab99dd   Christoph Lameter   slub: Update stat...
5063
5064
  		nr_objs += atomic_long_read(&n->total_objects);
  		nr_free += count_partial(n, count_free);
57ed3eda9   Pekka J Enberg   slub: provide /pr...
5065
  	}
205ab99dd   Christoph Lameter   slub: Update stat...
5066
  	nr_inuse = nr_objs - nr_free;
57ed3eda9   Pekka J Enberg   slub: provide /pr...
5067
5068
  
  	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
834f3d119   Christoph Lameter   slub: Add kmem_ca...
5069
5070
  		   nr_objs, s->size, oo_objects(s->oo),
  		   (1 << oo_order(s->oo)));
57ed3eda9   Pekka J Enberg   slub: provide /pr...
5071
5072
5073
5074
5075
5076
5077
  	seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
  	seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
  		   0UL);
  	seq_putc(m, '
  ');
  	return 0;
  }
7b3c3a50a   Alexey Dobriyan   proc: move /proc/...
5078
  static const struct seq_operations slabinfo_op = {
57ed3eda9   Pekka J Enberg   slub: provide /pr...
5079
5080
5081
5082
5083
  	.start = s_start,
  	.next = s_next,
  	.stop = s_stop,
  	.show = s_show,
  };
7b3c3a50a   Alexey Dobriyan   proc: move /proc/...
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095
5096
5097
  static int slabinfo_open(struct inode *inode, struct file *file)
  {
  	return seq_open(file, &slabinfo_op);
  }
  
  static const struct file_operations proc_slabinfo_operations = {
  	.open		= slabinfo_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
  	.release	= seq_release,
  };
  
  static int __init slab_proc_init(void)
  {
cf5d11317   WANG Cong   SLUB: Drop write ...
5098
  	proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
7b3c3a50a   Alexey Dobriyan   proc: move /proc/...
5099
5100
5101
  	return 0;
  }
  module_init(slab_proc_init);
158a96242   Linus Torvalds   Unify /proc/slabi...
5102
  #endif /* CONFIG_SLABINFO */